Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2017-12-28 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Fix incorrect state pruning related to recognition of zero initialized stack slots, where stacksafe exploration would mistakenly return a positive pruning verdict too early ignoring other slots, from Gianluca. 2) Various BPF to BPF calls related follow-up fixes. Fix an off-by-one in maximum call depth check, and rework maximum stack depth tracking logic to fix a bypass of the total stack size check reported by Jann. Also fix a bug in arm64 JIT where prog->jited_len was uninitialized. Addition of various test cases to BPF selftests, from Alexei. 3) Addition of a BPF selftest to test_verifier that is related to BPF to BPF calls which demonstrates a late caller stack size increase and thus out of bounds access. Fixed above in 2). Test case from Jann. 4) Addition of correlating BPF helper calls, BPF to BPF calls as well as BPF maps to bpftool xlated dump in order to allow for better BPF program introspection and debugging, from Daniel. 5) Fixing several bugs in BPF to BPF calls kallsyms handling in order to get it actually to work for subprogs, from Daniel. 6) Extending sparc64 JIT support for BPF to BPF calls and fix a couple of build errors for libbpf on sparc64, from David. 7) Allow narrower context access for BPF dev cgroup typed programs in order to adapt to LLVM code generation. Also adjust memlock rlimit in the test_dev_cgroup BPF selftest, from Yonghong. 8) Add netdevsim Kconfig entry to BPF selftests since test_offload.py relies on netdevsim device being available, from Jakub. 9) Reduce scope of xdp_do_generic_redirect_map() to being static, from Xiongwei. 10) Minor cleanups and spelling fixes in BPF verifier, from Colin. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
fcffe2edbd
17 changed files with 764 additions and 68 deletions
|
@ -897,6 +897,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
image_ptr = jit_data->image;
|
||||
header = jit_data->header;
|
||||
extra_pass = true;
|
||||
image_size = sizeof(u32) * ctx.idx;
|
||||
goto skip_init_ctx;
|
||||
}
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
|
|
|
@ -1509,11 +1509,19 @@ static void jit_fill_hole(void *area, unsigned int size)
|
|||
*ptr++ = 0x91d02005; /* ta 5 */
|
||||
}
|
||||
|
||||
struct sparc64_jit_data {
|
||||
struct bpf_binary_header *header;
|
||||
u8 *image;
|
||||
struct jit_ctx ctx;
|
||||
};
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
struct sparc64_jit_data *jit_data;
|
||||
struct bpf_binary_header *header;
|
||||
bool tmp_blinded = false;
|
||||
bool extra_pass = false;
|
||||
struct jit_ctx ctx;
|
||||
u32 image_size;
|
||||
u8 *image_ptr;
|
||||
|
@ -1533,13 +1541,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
prog = tmp;
|
||||
}
|
||||
|
||||
jit_data = prog->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
|
||||
if (!jit_data) {
|
||||
prog = orig_prog;
|
||||
goto out;
|
||||
}
|
||||
prog->aux->jit_data = jit_data;
|
||||
}
|
||||
if (jit_data->ctx.offset) {
|
||||
ctx = jit_data->ctx;
|
||||
image_ptr = jit_data->image;
|
||||
header = jit_data->header;
|
||||
extra_pass = true;
|
||||
image_size = sizeof(u32) * ctx.idx;
|
||||
goto skip_init_ctx;
|
||||
}
|
||||
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
ctx.prog = prog;
|
||||
|
||||
ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
|
||||
if (ctx.offset == NULL) {
|
||||
prog = orig_prog;
|
||||
goto out;
|
||||
goto out_off;
|
||||
}
|
||||
|
||||
/* Fake pass to detect features used, and get an accurate assessment
|
||||
|
@ -1562,7 +1588,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
}
|
||||
|
||||
ctx.image = (u32 *)image_ptr;
|
||||
|
||||
skip_init_ctx:
|
||||
for (pass = 1; pass < 3; pass++) {
|
||||
ctx.idx = 0;
|
||||
|
||||
|
@ -1593,14 +1619,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
|
||||
bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
|
||||
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
if (!prog->is_func || extra_pass) {
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
} else {
|
||||
jit_data->ctx = ctx;
|
||||
jit_data->image = image_ptr;
|
||||
jit_data->header = header;
|
||||
}
|
||||
|
||||
prog->bpf_func = (void *)ctx.image;
|
||||
prog->jited = 1;
|
||||
prog->jited_len = image_size;
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
out_off:
|
||||
kfree(ctx.offset);
|
||||
kfree(ctx.offset);
|
||||
kfree(jit_data);
|
||||
prog->aux->jit_data = NULL;
|
||||
}
|
||||
out:
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(prog, prog == orig_prog ?
|
||||
|
|
|
@ -194,6 +194,7 @@ struct bpf_verifier_env {
|
|||
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
|
||||
struct bpf_verifer_log log;
|
||||
u32 subprog_starts[BPF_MAX_SUBPROGS];
|
||||
/* computes the stack depth of each bpf function */
|
||||
u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
|
||||
u32 subprog_cnt;
|
||||
};
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/capability.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/kallsyms.h>
|
||||
|
||||
#include <net/sch_generic.h>
|
||||
|
||||
|
@ -724,6 +725,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
|
|||
void bpf_jit_compile(struct bpf_prog *prog);
|
||||
bool bpf_helper_changes_pkt_data(void *func);
|
||||
|
||||
static inline bool bpf_dump_raw_ok(void)
|
||||
{
|
||||
/* Reconstruction of call-sites is dependent on kallsyms,
|
||||
* thus make dump the same restriction.
|
||||
*/
|
||||
return kallsyms_show_value() == 1;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
const struct bpf_insn *patch, u32 len);
|
||||
|
||||
|
|
|
@ -1012,7 +1012,8 @@ struct bpf_perf_event_value {
|
|||
#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
|
||||
|
||||
struct bpf_cgroup_dev_ctx {
|
||||
__u32 access_type; /* (access << 16) | type */
|
||||
/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
|
||||
__u32 access_type;
|
||||
__u32 major;
|
||||
__u32 minor;
|
||||
};
|
||||
|
|
|
@ -568,6 +568,8 @@ static bool cgroup_dev_is_valid_access(int off, int size,
|
|||
enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
const int size_default = sizeof(__u32);
|
||||
|
||||
if (type == BPF_WRITE)
|
||||
return false;
|
||||
|
||||
|
@ -576,8 +578,17 @@ static bool cgroup_dev_is_valid_access(int off, int size,
|
|||
/* The verifier guarantees that size > 0. */
|
||||
if (off % size != 0)
|
||||
return false;
|
||||
if (size != sizeof(__u32))
|
||||
return false;
|
||||
|
||||
switch (off) {
|
||||
case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
|
||||
bpf_ctx_record_field_size(info, size_default);
|
||||
if (!bpf_ctx_narrow_access_ok(off, size, size_default))
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
if (size != size_default)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -771,7 +771,9 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
|
|||
|
||||
/* Base function for offset calculation. Needs to go into .text section,
|
||||
* therefore keeping it non-static as well; will also be used by JITs
|
||||
* anyway later on, so do not let the compiler omit it.
|
||||
* anyway later on, so do not let the compiler omit it. This also needs
|
||||
* to go into kallsyms for correlation from e.g. bpftool, so naming
|
||||
* must not change.
|
||||
*/
|
||||
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||
{
|
||||
|
|
|
@ -21,10 +21,39 @@ static const char * const func_id_str[] = {
|
|||
};
|
||||
#undef __BPF_FUNC_STR_FN
|
||||
|
||||
const char *func_id_name(int id)
|
||||
static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
|
||||
const struct bpf_insn *insn,
|
||||
char *buff, size_t len)
|
||||
{
|
||||
BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
|
||||
|
||||
if (insn->src_reg != BPF_PSEUDO_CALL &&
|
||||
insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
|
||||
func_id_str[insn->imm])
|
||||
return func_id_str[insn->imm];
|
||||
|
||||
if (cbs && cbs->cb_call)
|
||||
return cbs->cb_call(cbs->private_data, insn);
|
||||
|
||||
if (insn->src_reg == BPF_PSEUDO_CALL)
|
||||
snprintf(buff, len, "%+d", insn->imm);
|
||||
|
||||
return buff;
|
||||
}
|
||||
|
||||
static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
|
||||
const struct bpf_insn *insn,
|
||||
u64 full_imm, char *buff, size_t len)
|
||||
{
|
||||
if (cbs && cbs->cb_imm)
|
||||
return cbs->cb_imm(cbs->private_data, insn, full_imm);
|
||||
|
||||
snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
|
||||
return buff;
|
||||
}
|
||||
|
||||
const char *func_id_name(int id)
|
||||
{
|
||||
if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
|
||||
return func_id_str[id];
|
||||
else
|
||||
|
@ -83,7 +112,7 @@ static const char *const bpf_jmp_string[16] = {
|
|||
[BPF_EXIT >> 4] = "exit",
|
||||
};
|
||||
|
||||
static void print_bpf_end_insn(bpf_insn_print_cb verbose,
|
||||
static void print_bpf_end_insn(bpf_insn_print_t verbose,
|
||||
struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn)
|
||||
{
|
||||
|
@ -92,9 +121,12 @@ static void print_bpf_end_insn(bpf_insn_print_cb verbose,
|
|||
insn->imm, insn->dst_reg);
|
||||
}
|
||||
|
||||
void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn, bool allow_ptr_leaks)
|
||||
void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
||||
struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn,
|
||||
bool allow_ptr_leaks)
|
||||
{
|
||||
const bpf_insn_print_t verbose = cbs->cb_print;
|
||||
u8 class = BPF_CLASS(insn->code);
|
||||
|
||||
if (class == BPF_ALU || class == BPF_ALU64) {
|
||||
|
@ -175,12 +207,15 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
|
|||
*/
|
||||
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
|
||||
bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
|
||||
char tmp[64];
|
||||
|
||||
if (map_ptr && !allow_ptr_leaks)
|
||||
imm = 0;
|
||||
|
||||
verbose(env, "(%02x) r%d = 0x%llx\n", insn->code,
|
||||
insn->dst_reg, (unsigned long long)imm);
|
||||
verbose(env, "(%02x) r%d = %s\n",
|
||||
insn->code, insn->dst_reg,
|
||||
__func_imm_name(cbs, insn, imm,
|
||||
tmp, sizeof(tmp)));
|
||||
} else {
|
||||
verbose(env, "BUG_ld_%02x\n", insn->code);
|
||||
return;
|
||||
|
@ -189,12 +224,20 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
|
|||
u8 opcode = BPF_OP(insn->code);
|
||||
|
||||
if (opcode == BPF_CALL) {
|
||||
if (insn->src_reg == BPF_PSEUDO_CALL)
|
||||
verbose(env, "(%02x) call pc%+d\n", insn->code,
|
||||
insn->imm);
|
||||
else
|
||||
char tmp[64];
|
||||
|
||||
if (insn->src_reg == BPF_PSEUDO_CALL) {
|
||||
verbose(env, "(%02x) call pc%s\n",
|
||||
insn->code,
|
||||
__func_get_name(cbs, insn,
|
||||
tmp, sizeof(tmp)));
|
||||
} else {
|
||||
strcpy(tmp, "unknown");
|
||||
verbose(env, "(%02x) call %s#%d\n", insn->code,
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
__func_get_name(cbs, insn,
|
||||
tmp, sizeof(tmp)),
|
||||
insn->imm);
|
||||
}
|
||||
} else if (insn->code == (BPF_JMP | BPF_JA)) {
|
||||
verbose(env, "(%02x) goto pc%+d\n",
|
||||
insn->code, insn->off);
|
||||
|
|
|
@ -17,16 +17,35 @@
|
|||
#include <linux/bpf.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/stringify.h>
|
||||
#ifndef __KERNEL__
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#endif
|
||||
|
||||
struct bpf_verifier_env;
|
||||
|
||||
extern const char *const bpf_alu_string[16];
|
||||
extern const char *const bpf_class_string[8];
|
||||
|
||||
const char *func_id_name(int id);
|
||||
|
||||
struct bpf_verifier_env;
|
||||
typedef void (*bpf_insn_print_cb)(struct bpf_verifier_env *env,
|
||||
const char *, ...);
|
||||
void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn, bool allow_ptr_leaks);
|
||||
typedef void (*bpf_insn_print_t)(struct bpf_verifier_env *env,
|
||||
const char *, ...);
|
||||
typedef const char *(*bpf_insn_revmap_call_t)(void *private_data,
|
||||
const struct bpf_insn *insn);
|
||||
typedef const char *(*bpf_insn_print_imm_t)(void *private_data,
|
||||
const struct bpf_insn *insn,
|
||||
__u64 full_imm);
|
||||
|
||||
struct bpf_insn_cbs {
|
||||
bpf_insn_print_t cb_print;
|
||||
bpf_insn_revmap_call_t cb_call;
|
||||
bpf_insn_print_imm_t cb_imm;
|
||||
void *private_data;
|
||||
};
|
||||
|
||||
void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
||||
struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn,
|
||||
bool allow_ptr_leaks);
|
||||
#endif
|
||||
|
|
|
@ -937,10 +937,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
|
|||
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
||||
{
|
||||
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
||||
int i;
|
||||
|
||||
trace_bpf_prog_put_rcu(prog);
|
||||
/* bpf_prog_free_id() must be called first */
|
||||
bpf_prog_free_id(prog, do_idr_lock);
|
||||
|
||||
for (i = 0; i < prog->aux->func_cnt; i++)
|
||||
bpf_prog_kallsyms_del(prog->aux->func[i]);
|
||||
bpf_prog_kallsyms_del(prog);
|
||||
|
||||
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
|
||||
}
|
||||
}
|
||||
|
@ -1552,6 +1558,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
|
|||
return fd;
|
||||
}
|
||||
|
||||
static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
|
||||
unsigned long addr)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < prog->aux->used_map_cnt; i++)
|
||||
if (prog->aux->used_maps[i] == (void *)addr)
|
||||
return prog->aux->used_maps[i];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
|
||||
{
|
||||
const struct bpf_map *map;
|
||||
struct bpf_insn *insns;
|
||||
u64 imm;
|
||||
int i;
|
||||
|
||||
insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
|
||||
GFP_USER);
|
||||
if (!insns)
|
||||
return insns;
|
||||
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
|
||||
insns[i].code = BPF_JMP | BPF_CALL;
|
||||
insns[i].imm = BPF_FUNC_tail_call;
|
||||
/* fall-through */
|
||||
}
|
||||
if (insns[i].code == (BPF_JMP | BPF_CALL) ||
|
||||
insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
|
||||
if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
|
||||
insns[i].code = BPF_JMP | BPF_CALL;
|
||||
if (!bpf_dump_raw_ok())
|
||||
insns[i].imm = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
|
||||
continue;
|
||||
|
||||
imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
|
||||
map = bpf_map_from_imm(prog, imm);
|
||||
if (map) {
|
||||
insns[i].src_reg = BPF_PSEUDO_MAP_FD;
|
||||
insns[i].imm = map->id;
|
||||
insns[i + 1].imm = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!bpf_dump_raw_ok() &&
|
||||
imm == (unsigned long)prog->aux) {
|
||||
insns[i].imm = 0;
|
||||
insns[i + 1].imm = 0;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return insns;
|
||||
}
|
||||
|
||||
static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
||||
const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
|
@ -1602,18 +1669,34 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
|||
ulen = info.jited_prog_len;
|
||||
info.jited_prog_len = prog->jited_len;
|
||||
if (info.jited_prog_len && ulen) {
|
||||
uinsns = u64_to_user_ptr(info.jited_prog_insns);
|
||||
ulen = min_t(u32, info.jited_prog_len, ulen);
|
||||
if (copy_to_user(uinsns, prog->bpf_func, ulen))
|
||||
return -EFAULT;
|
||||
if (bpf_dump_raw_ok()) {
|
||||
uinsns = u64_to_user_ptr(info.jited_prog_insns);
|
||||
ulen = min_t(u32, info.jited_prog_len, ulen);
|
||||
if (copy_to_user(uinsns, prog->bpf_func, ulen))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
info.jited_prog_insns = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ulen = info.xlated_prog_len;
|
||||
info.xlated_prog_len = bpf_prog_insn_size(prog);
|
||||
if (info.xlated_prog_len && ulen) {
|
||||
struct bpf_insn *insns_sanitized;
|
||||
bool fault;
|
||||
|
||||
if (prog->blinded && !bpf_dump_raw_ok()) {
|
||||
info.xlated_prog_insns = 0;
|
||||
goto done;
|
||||
}
|
||||
insns_sanitized = bpf_insn_prepare_dump(prog);
|
||||
if (!insns_sanitized)
|
||||
return -ENOMEM;
|
||||
uinsns = u64_to_user_ptr(info.xlated_prog_insns);
|
||||
ulen = min_t(u32, info.xlated_prog_len, ulen);
|
||||
if (copy_to_user(uinsns, prog->insnsi, ulen))
|
||||
fault = copy_to_user(uinsns, insns_sanitized, ulen);
|
||||
kfree(insns_sanitized);
|
||||
if (fault)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
|
|
@ -772,7 +772,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
|
|||
return -EPERM;
|
||||
}
|
||||
if (bpf_prog_is_dev_bound(env->prog->aux)) {
|
||||
verbose(env, "funcation calls in offloaded programs are not supported yet\n");
|
||||
verbose(env, "function calls in offloaded programs are not supported yet\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = add_subprog(env, i + insn[i].imm + 1);
|
||||
|
@ -823,6 +823,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
|
||||
const struct bpf_verifier_state *state,
|
||||
struct bpf_verifier_state *parent,
|
||||
|
@ -867,7 +868,7 @@ struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
|
|||
verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
|
||||
verbose(env, "regno %d parent frame %d current frame %d\n",
|
||||
regno, parent->curframe, state->curframe);
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int mark_reg_read(struct bpf_verifier_env *env,
|
||||
|
@ -1434,33 +1435,80 @@ static int update_stack_depth(struct bpf_verifier_env *env,
|
|||
const struct bpf_func_state *func,
|
||||
int off)
|
||||
{
|
||||
u16 stack = env->subprog_stack_depth[func->subprogno], total = 0;
|
||||
struct bpf_verifier_state *cur = env->cur_state;
|
||||
int i;
|
||||
u16 stack = env->subprog_stack_depth[func->subprogno];
|
||||
|
||||
if (stack >= -off)
|
||||
return 0;
|
||||
|
||||
/* update known max for given subprogram */
|
||||
env->subprog_stack_depth[func->subprogno] = -off;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* compute the total for current call chain */
|
||||
for (i = 0; i <= cur->curframe; i++) {
|
||||
u32 depth = env->subprog_stack_depth[cur->frame[i]->subprogno];
|
||||
/* starting from main bpf function walk all instructions of the function
|
||||
* and recursively walk all callees that given function can call.
|
||||
* Ignore jump and exit insns.
|
||||
* Since recursion is prevented by check_cfg() this algorithm
|
||||
* only needs a local stack of MAX_CALL_FRAMES to remember callsites
|
||||
*/
|
||||
static int check_max_stack_depth(struct bpf_verifier_env *env)
|
||||
{
|
||||
int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
int insn_cnt = env->prog->len;
|
||||
int ret_insn[MAX_CALL_FRAMES];
|
||||
int ret_prog[MAX_CALL_FRAMES];
|
||||
|
||||
/* round up to 32-bytes, since this is granularity
|
||||
* of interpreter stack sizes
|
||||
*/
|
||||
depth = round_up(depth, 32);
|
||||
total += depth;
|
||||
}
|
||||
|
||||
if (total > MAX_BPF_STACK) {
|
||||
process_func:
|
||||
/* round up to 32-bytes, since this is granularity
|
||||
* of interpreter stack size
|
||||
*/
|
||||
depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
|
||||
if (depth > MAX_BPF_STACK) {
|
||||
verbose(env, "combined stack size of %d calls is %d. Too large\n",
|
||||
cur->curframe, total);
|
||||
frame + 1, depth);
|
||||
return -EACCES;
|
||||
}
|
||||
return 0;
|
||||
continue_func:
|
||||
if (env->subprog_cnt == subprog)
|
||||
subprog_end = insn_cnt;
|
||||
else
|
||||
subprog_end = env->subprog_starts[subprog];
|
||||
for (; i < subprog_end; i++) {
|
||||
if (insn[i].code != (BPF_JMP | BPF_CALL))
|
||||
continue;
|
||||
if (insn[i].src_reg != BPF_PSEUDO_CALL)
|
||||
continue;
|
||||
/* remember insn and function to return to */
|
||||
ret_insn[frame] = i + 1;
|
||||
ret_prog[frame] = subprog;
|
||||
|
||||
/* find the callee */
|
||||
i = i + insn[i].imm + 1;
|
||||
subprog = find_subprog(env, i);
|
||||
if (subprog < 0) {
|
||||
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
|
||||
i);
|
||||
return -EFAULT;
|
||||
}
|
||||
subprog++;
|
||||
frame++;
|
||||
if (frame >= MAX_CALL_FRAMES) {
|
||||
WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
goto process_func;
|
||||
}
|
||||
/* end of for() loop means the last insn of the 'subprog'
|
||||
* was reached. Doesn't matter whether it was JA or EXIT
|
||||
*/
|
||||
if (frame == 0)
|
||||
return 0;
|
||||
depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
|
||||
frame--;
|
||||
i = ret_insn[frame];
|
||||
subprog = ret_prog[frame];
|
||||
goto continue_func;
|
||||
}
|
||||
|
||||
static int get_callee_stack_depth(struct bpf_verifier_env *env,
|
||||
|
@ -2105,9 +2153,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
struct bpf_func_state *caller, *callee;
|
||||
int i, subprog, target_insn;
|
||||
|
||||
if (state->curframe >= MAX_CALL_FRAMES) {
|
||||
if (state->curframe + 1 >= MAX_CALL_FRAMES) {
|
||||
verbose(env, "the call stack of %d frames is too deep\n",
|
||||
state->curframe);
|
||||
state->curframe + 2);
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
|
@ -4155,7 +4203,7 @@ static bool stacksafe(struct bpf_func_state *old,
|
|||
|
||||
if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
|
||||
/* explored state didn't use this */
|
||||
return true;
|
||||
continue;
|
||||
|
||||
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
|
||||
continue;
|
||||
|
@ -4475,9 +4523,12 @@ static int do_check(struct bpf_verifier_env *env)
|
|||
}
|
||||
|
||||
if (env->log.level) {
|
||||
const struct bpf_insn_cbs cbs = {
|
||||
.cb_print = verbose,
|
||||
};
|
||||
|
||||
verbose(env, "%d: ", insn_idx);
|
||||
print_bpf_insn(verbose, env, insn,
|
||||
env->allow_ptr_leaks);
|
||||
print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
|
||||
}
|
||||
|
||||
err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
|
||||
|
@ -5065,14 +5116,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
{
|
||||
struct bpf_prog *prog = env->prog, **func, *tmp;
|
||||
int i, j, subprog_start, subprog_end = 0, len, subprog;
|
||||
struct bpf_insn *insn = prog->insnsi;
|
||||
struct bpf_insn *insn;
|
||||
void *old_bpf_func;
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (env->subprog_cnt == 0)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < prog->len; i++, insn++) {
|
||||
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
|
||||
if (insn->code != (BPF_JMP | BPF_CALL) ||
|
||||
insn->src_reg != BPF_PSEUDO_CALL)
|
||||
continue;
|
||||
|
@ -5111,7 +5162,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
goto out_free;
|
||||
memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
|
||||
len * sizeof(struct bpf_insn));
|
||||
func[i]->type = prog->type;
|
||||
func[i]->len = len;
|
||||
if (bpf_prog_calc_tag(func[i]))
|
||||
goto out_free;
|
||||
func[i]->is_func = 1;
|
||||
/* Use bpf_prog_F_tag to indicate functions in stack traces.
|
||||
* Long term would need debug info to populate names
|
||||
|
@ -5161,6 +5215,25 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
bpf_prog_lock_ro(func[i]);
|
||||
bpf_prog_kallsyms_add(func[i]);
|
||||
}
|
||||
|
||||
/* Last step: make now unused interpreter insns from main
|
||||
* prog consistent for later dump requests, so they can
|
||||
* later look the same as if they were interpreted only.
|
||||
*/
|
||||
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
|
||||
unsigned long addr;
|
||||
|
||||
if (insn->code != (BPF_JMP | BPF_CALL) ||
|
||||
insn->src_reg != BPF_PSEUDO_CALL)
|
||||
continue;
|
||||
insn->off = env->insn_aux_data[i].call_imm;
|
||||
subprog = find_subprog(env, i + insn->off + 1);
|
||||
addr = (unsigned long)func[subprog + 1]->bpf_func;
|
||||
addr &= PAGE_MASK;
|
||||
insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
|
||||
addr - __bpf_call_base;
|
||||
}
|
||||
|
||||
prog->jited = 1;
|
||||
prog->bpf_func = func[0]->bpf_func;
|
||||
prog->aux->func = func;
|
||||
|
@ -5426,6 +5499,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
|
|||
if (ret == 0)
|
||||
sanitize_dead_code(env);
|
||||
|
||||
if (ret == 0)
|
||||
ret = check_max_stack_depth(env);
|
||||
|
||||
if (ret == 0)
|
||||
/* program is valid, convert *(u32*)(ctx + off) accesses */
|
||||
ret = convert_ctx_accesses(env);
|
||||
|
|
|
@ -2684,8 +2684,9 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb,
|
||||
struct bpf_prog *xdp_prog)
|
||||
static int xdp_do_generic_redirect_map(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||
unsigned long map_owner = ri->map_owner;
|
||||
|
|
|
@ -401,6 +401,88 @@ static int do_show(int argc, char **argv)
|
|||
return err;
|
||||
}
|
||||
|
||||
#define SYM_MAX_NAME 256
|
||||
|
||||
struct kernel_sym {
|
||||
unsigned long address;
|
||||
char name[SYM_MAX_NAME];
|
||||
};
|
||||
|
||||
struct dump_data {
|
||||
unsigned long address_call_base;
|
||||
struct kernel_sym *sym_mapping;
|
||||
__u32 sym_count;
|
||||
char scratch_buff[SYM_MAX_NAME];
|
||||
};
|
||||
|
||||
static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
|
||||
{
|
||||
return ((struct kernel_sym *)sym_a)->address -
|
||||
((struct kernel_sym *)sym_b)->address;
|
||||
}
|
||||
|
||||
static void kernel_syms_load(struct dump_data *dd)
|
||||
{
|
||||
struct kernel_sym *sym;
|
||||
char buff[256];
|
||||
void *tmp, *address;
|
||||
FILE *fp;
|
||||
|
||||
fp = fopen("/proc/kallsyms", "r");
|
||||
if (!fp)
|
||||
return;
|
||||
|
||||
while (!feof(fp)) {
|
||||
if (!fgets(buff, sizeof(buff), fp))
|
||||
break;
|
||||
tmp = realloc(dd->sym_mapping,
|
||||
(dd->sym_count + 1) *
|
||||
sizeof(*dd->sym_mapping));
|
||||
if (!tmp) {
|
||||
out:
|
||||
free(dd->sym_mapping);
|
||||
dd->sym_mapping = NULL;
|
||||
fclose(fp);
|
||||
return;
|
||||
}
|
||||
dd->sym_mapping = tmp;
|
||||
sym = &dd->sym_mapping[dd->sym_count];
|
||||
if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
|
||||
continue;
|
||||
sym->address = (unsigned long)address;
|
||||
if (!strcmp(sym->name, "__bpf_call_base")) {
|
||||
dd->address_call_base = sym->address;
|
||||
/* sysctl kernel.kptr_restrict was set */
|
||||
if (!sym->address)
|
||||
goto out;
|
||||
}
|
||||
if (sym->address)
|
||||
dd->sym_count++;
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
|
||||
qsort(dd->sym_mapping, dd->sym_count,
|
||||
sizeof(*dd->sym_mapping), kernel_syms_cmp);
|
||||
}
|
||||
|
||||
static void kernel_syms_destroy(struct dump_data *dd)
|
||||
{
|
||||
free(dd->sym_mapping);
|
||||
}
|
||||
|
||||
static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
|
||||
unsigned long key)
|
||||
{
|
||||
struct kernel_sym sym = {
|
||||
.address = key,
|
||||
};
|
||||
|
||||
return dd->sym_mapping ?
|
||||
bsearch(&sym, dd->sym_mapping, dd->sym_count,
|
||||
sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
|
||||
}
|
||||
|
||||
static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
@ -410,8 +492,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
|
|||
va_end(args);
|
||||
}
|
||||
|
||||
static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
|
||||
static const char *print_call_pcrel(struct dump_data *dd,
|
||||
struct kernel_sym *sym,
|
||||
unsigned long address,
|
||||
const struct bpf_insn *insn)
|
||||
{
|
||||
if (sym)
|
||||
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
|
||||
"%+d#%s", insn->off, sym->name);
|
||||
else
|
||||
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
|
||||
"%+d#0x%lx", insn->off, address);
|
||||
return dd->scratch_buff;
|
||||
}
|
||||
|
||||
static const char *print_call_helper(struct dump_data *dd,
|
||||
struct kernel_sym *sym,
|
||||
unsigned long address)
|
||||
{
|
||||
if (sym)
|
||||
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
|
||||
"%s", sym->name);
|
||||
else
|
||||
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
|
||||
"0x%lx", address);
|
||||
return dd->scratch_buff;
|
||||
}
|
||||
|
||||
static const char *print_call(void *private_data,
|
||||
const struct bpf_insn *insn)
|
||||
{
|
||||
struct dump_data *dd = private_data;
|
||||
unsigned long address = dd->address_call_base + insn->imm;
|
||||
struct kernel_sym *sym;
|
||||
|
||||
sym = kernel_syms_search(dd, address);
|
||||
if (insn->src_reg == BPF_PSEUDO_CALL)
|
||||
return print_call_pcrel(dd, sym, address, insn);
|
||||
else
|
||||
return print_call_helper(dd, sym, address);
|
||||
}
|
||||
|
||||
static const char *print_imm(void *private_data,
|
||||
const struct bpf_insn *insn,
|
||||
__u64 full_imm)
|
||||
{
|
||||
struct dump_data *dd = private_data;
|
||||
|
||||
if (insn->src_reg == BPF_PSEUDO_MAP_FD)
|
||||
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
|
||||
"map[id:%u]", insn->imm);
|
||||
else
|
||||
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
|
||||
"0x%llx", (unsigned long long)full_imm);
|
||||
return dd->scratch_buff;
|
||||
}
|
||||
|
||||
static void dump_xlated_plain(struct dump_data *dd, void *buf,
|
||||
unsigned int len, bool opcodes)
|
||||
{
|
||||
const struct bpf_insn_cbs cbs = {
|
||||
.cb_print = print_insn,
|
||||
.cb_call = print_call,
|
||||
.cb_imm = print_imm,
|
||||
.private_data = dd,
|
||||
};
|
||||
struct bpf_insn *insn = buf;
|
||||
bool double_insn = false;
|
||||
unsigned int i;
|
||||
|
@ -425,7 +570,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
|
|||
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
|
||||
printf("% 4d: ", i);
|
||||
print_bpf_insn(print_insn, NULL, insn + i, true);
|
||||
print_bpf_insn(&cbs, NULL, insn + i, true);
|
||||
|
||||
if (opcodes) {
|
||||
printf(" ");
|
||||
|
@ -454,8 +599,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
|
|||
va_end(args);
|
||||
}
|
||||
|
||||
static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
|
||||
static void dump_xlated_json(struct dump_data *dd, void *buf,
|
||||
unsigned int len, bool opcodes)
|
||||
{
|
||||
const struct bpf_insn_cbs cbs = {
|
||||
.cb_print = print_insn_json,
|
||||
.cb_call = print_call,
|
||||
.cb_imm = print_imm,
|
||||
.private_data = dd,
|
||||
};
|
||||
struct bpf_insn *insn = buf;
|
||||
bool double_insn = false;
|
||||
unsigned int i;
|
||||
|
@ -470,7 +622,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
|
|||
|
||||
jsonw_start_object(json_wtr);
|
||||
jsonw_name(json_wtr, "disasm");
|
||||
print_bpf_insn(print_insn_json, NULL, insn + i, true);
|
||||
print_bpf_insn(&cbs, NULL, insn + i, true);
|
||||
|
||||
if (opcodes) {
|
||||
jsonw_name(json_wtr, "opcodes");
|
||||
|
@ -505,6 +657,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
|
|||
static int do_dump(int argc, char **argv)
|
||||
{
|
||||
struct bpf_prog_info info = {};
|
||||
struct dump_data dd = {};
|
||||
__u32 len = sizeof(info);
|
||||
unsigned int buf_size;
|
||||
char *filepath = NULL;
|
||||
|
@ -592,6 +745,14 @@ static int do_dump(int argc, char **argv)
|
|||
goto err_free;
|
||||
}
|
||||
|
||||
if ((member_len == &info.jited_prog_len &&
|
||||
info.jited_prog_insns == 0) ||
|
||||
(member_len == &info.xlated_prog_len &&
|
||||
info.xlated_prog_insns == 0)) {
|
||||
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (filepath) {
|
||||
fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
|
||||
if (fd < 0) {
|
||||
|
@ -608,17 +769,19 @@ static int do_dump(int argc, char **argv)
|
|||
goto err_free;
|
||||
}
|
||||
} else {
|
||||
if (member_len == &info.jited_prog_len)
|
||||
if (member_len == &info.jited_prog_len) {
|
||||
disasm_print_insn(buf, *member_len, opcodes);
|
||||
else
|
||||
} else {
|
||||
kernel_syms_load(&dd);
|
||||
if (json_output)
|
||||
dump_xlated_json(buf, *member_len, opcodes);
|
||||
dump_xlated_json(&dd, buf, *member_len, opcodes);
|
||||
else
|
||||
dump_xlated_plain(buf, *member_len, opcodes);
|
||||
dump_xlated_plain(&dd, buf, *member_len, opcodes);
|
||||
kernel_syms_destroy(&dd);
|
||||
}
|
||||
}
|
||||
|
||||
free(buf);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
|
|
|
@ -910,8 +910,9 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
|
|||
GELF_R_SYM(rel.r_info));
|
||||
return -LIBBPF_ERRNO__FORMAT;
|
||||
}
|
||||
pr_debug("relo for %ld value %ld name %d\n",
|
||||
rel.r_info >> 32, sym.st_value, sym.st_name);
|
||||
pr_debug("relo for %lld value %lld name %d\n",
|
||||
(long long) (rel.r_info >> 32),
|
||||
(long long) sym.st_value, sym.st_name);
|
||||
|
||||
if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
|
||||
pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
|
||||
|
|
|
@ -4,3 +4,4 @@ CONFIG_NET_CLS_BPF=m
|
|||
CONFIG_BPF_EVENTS=y
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_NETDEVSIM=m
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <assert.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf.h>
|
||||
|
@ -23,15 +25,19 @@
|
|||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
|
||||
struct bpf_object *obj;
|
||||
int error = EXIT_FAILURE;
|
||||
int prog_fd, cgroup_fd;
|
||||
__u32 prog_cnt;
|
||||
|
||||
if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
|
||||
perror("Unable to lift memlock rlimit");
|
||||
|
||||
if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
|
||||
&obj, &prog_fd)) {
|
||||
printf("Failed to load DEV_CGROUP program\n");
|
||||
goto err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (setup_cgroup_environment()) {
|
||||
|
@ -89,5 +95,6 @@ int main(int argc, char **argv)
|
|||
err:
|
||||
cleanup_cgroup_environment();
|
||||
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -9272,6 +9272,196 @@ static struct bpf_test tests[] = {
|
|||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"calls: stack overflow using two frames (pre-call access)",
|
||||
.insns = {
|
||||
/* prog 1 */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
|
||||
/* prog 2 */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.errstr = "combined stack size",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"calls: stack overflow using two frames (post-call access)",
|
||||
.insns = {
|
||||
/* prog 1 */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
|
||||
/* prog 2 */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.errstr = "combined stack size",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"calls: stack depth check using three frames. test1",
|
||||
.insns = {
|
||||
/* main */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
/* A */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
/* B */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
/* stack_main=32, stack_A=256, stack_B=64
|
||||
* and max(main+A, main+A+B) < 512
|
||||
*/
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"calls: stack depth check using three frames. test2",
|
||||
.insns = {
|
||||
/* main */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
/* A */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
/* B */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
/* stack_main=32, stack_A=64, stack_B=256
|
||||
* and max(main+A, main+A+B) < 512
|
||||
*/
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"calls: stack depth check using three frames. test3",
|
||||
.insns = {
|
||||
/* main */
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
|
||||
BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
/* A */
|
||||
BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, -3),
|
||||
/* B */
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
/* stack_main=64, stack_A=224, stack_B=256
|
||||
* and max(main+A, main+A+B) > 512
|
||||
*/
|
||||
.errstr = "combined stack",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"calls: stack depth check using three frames. test4",
|
||||
/* void main(void) {
|
||||
* func1(0);
|
||||
* func1(1);
|
||||
* func2(1);
|
||||
* }
|
||||
* void func1(int alloc_or_recurse) {
|
||||
* if (alloc_or_recurse) {
|
||||
* frame_pointer[-300] = 1;
|
||||
* } else {
|
||||
* func2(alloc_or_recurse);
|
||||
* }
|
||||
* }
|
||||
* void func2(int alloc_or_recurse) {
|
||||
* if (alloc_or_recurse) {
|
||||
* frame_pointer[-300] = 1;
|
||||
* }
|
||||
* }
|
||||
*/
|
||||
.insns = {
|
||||
/* main */
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
/* A */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
|
||||
BPF_EXIT_INSN(),
|
||||
/* B */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.result = REJECT,
|
||||
.errstr = "combined stack",
|
||||
},
|
||||
{
|
||||
"calls: stack depth check using three frames. test5",
|
||||
.insns = {
|
||||
/* main */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
|
||||
BPF_EXIT_INSN(),
|
||||
/* A */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
|
||||
BPF_EXIT_INSN(),
|
||||
/* B */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
|
||||
BPF_EXIT_INSN(),
|
||||
/* C */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
|
||||
BPF_EXIT_INSN(),
|
||||
/* D */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
|
||||
BPF_EXIT_INSN(),
|
||||
/* E */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
|
||||
BPF_EXIT_INSN(),
|
||||
/* F */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
|
||||
BPF_EXIT_INSN(),
|
||||
/* G */
|
||||
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
|
||||
BPF_EXIT_INSN(),
|
||||
/* H */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.errstr = "call stack",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"calls: spill into caller stack frame",
|
||||
.insns = {
|
||||
|
@ -10258,6 +10448,57 @@ static struct bpf_test tests[] = {
|
|||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"search pruning: all branches should be verified (nop operation)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_A(1),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_6, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R6 invalid mem access 'inv'",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"search pruning: all branches should be verified (invalid stack access)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
|
||||
BPF_JMP_A(1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "invalid read from stack off -16+0 size 8",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
};
|
||||
|
||||
static int probe_filter_length(const struct bpf_insn *fp)
|
||||
|
|
Loading…
Reference in a new issue