Merge branch 'filter-next'
Alexei Starovoitov says:
====================
net: filter: split sk_filter into socket and bpf, cleanup names
The main goal of the series is to split 'struct sk_filter' into socket and
bpf parts and cleanup names in the following way:
- everything that deals with sockets keeps 'sk_*' prefix
- everything that is pure BPF is changed to 'bpf_*' prefix
split 'struct sk_filter' into
struct sk_filter {
atomic_t refcnt;
struct rcu_head rcu;
struct bpf_prog *prog;
};
and
struct bpf_prog {
u32 jited:1,
len:31;
struct sock_fprog_kern *orig_prog;
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
union {
struct sock_filter insns[0];
struct bpf_insn insnsi[0];
struct work_struct work;
};
};
so that 'struct bpf_prog' can be used independent of sockets and cleans up
'unattached' bpf use cases:
isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf
which don't need refcnt/rcu fields.
It's a follow up to the rcu cleanup started by Pablo in
commit 34c5bd66e5
("net: filter: don't release unattached filter through call_rcu()")
Patch 1 - cleans up socket memory charging and makes it possible for functions
sk(bpf)_migrate_filter(), sk(bpf)_prepare_filter() to be socket independent
Patches 2-4 - trivial renames
Patch 5 - sk_filter split and renames of related sk_*() functions
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e339756c99
22 changed files with 242 additions and 237 deletions
|
@ -586,12 +586,12 @@ team driver's classifier for its load-balancing mode, netfilter's xt_bpf
|
||||||
extension, PTP dissector/classifier, and much more. They are all internally
|
extension, PTP dissector/classifier, and much more. They are all internally
|
||||||
converted by the kernel into the new instruction set representation and run
|
converted by the kernel into the new instruction set representation and run
|
||||||
in the eBPF interpreter. For in-kernel handlers, this all works transparently
|
in the eBPF interpreter. For in-kernel handlers, this all works transparently
|
||||||
by using sk_unattached_filter_create() for setting up the filter, resp.
|
by using bpf_prog_create() for setting up the filter, resp.
|
||||||
sk_unattached_filter_destroy() for destroying it. The macro
|
bpf_prog_destroy() for destroying it. The macro
|
||||||
SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed
|
BPF_PROG_RUN(filter, ctx) transparently invokes eBPF interpreter or JITed
|
||||||
code to run the filter. 'filter' is a pointer to struct sk_filter that we
|
code to run the filter. 'filter' is a pointer to struct bpf_prog that we
|
||||||
got from sk_unattached_filter_create(), and 'ctx' the given context (e.g.
|
got from bpf_prog_create(), and 'ctx' the given context (e.g.
|
||||||
skb pointer). All constraints and restrictions from sk_chk_filter() apply
|
skb pointer). All constraints and restrictions from bpf_check_classic() apply
|
||||||
before a conversion to the new layout is being done behind the scenes!
|
before a conversion to the new layout is being done behind the scenes!
|
||||||
|
|
||||||
Currently, the classic BPF format is being used for JITing on most of the
|
Currently, the classic BPF format is being used for JITing on most of the
|
||||||
|
|
|
@ -56,7 +56,7 @@
|
||||||
#define FLAG_NEED_X_RESET (1 << 0)
|
#define FLAG_NEED_X_RESET (1 << 0)
|
||||||
|
|
||||||
struct jit_ctx {
|
struct jit_ctx {
|
||||||
const struct sk_filter *skf;
|
const struct bpf_prog *skf;
|
||||||
unsigned idx;
|
unsigned idx;
|
||||||
unsigned prologue_bytes;
|
unsigned prologue_bytes;
|
||||||
int ret0_fp_idx;
|
int ret0_fp_idx;
|
||||||
|
@ -465,7 +465,7 @@ static inline void update_on_xread(struct jit_ctx *ctx)
|
||||||
static int build_body(struct jit_ctx *ctx)
|
static int build_body(struct jit_ctx *ctx)
|
||||||
{
|
{
|
||||||
void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
|
void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
|
||||||
const struct sk_filter *prog = ctx->skf;
|
const struct bpf_prog *prog = ctx->skf;
|
||||||
const struct sock_filter *inst;
|
const struct sock_filter *inst;
|
||||||
unsigned i, load_order, off, condt;
|
unsigned i, load_order, off, condt;
|
||||||
int imm12;
|
int imm12;
|
||||||
|
@ -857,7 +857,7 @@ static int build_body(struct jit_ctx *ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void bpf_jit_compile(struct sk_filter *fp)
|
void bpf_jit_compile(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
struct jit_ctx ctx;
|
struct jit_ctx ctx;
|
||||||
unsigned tmp_idx;
|
unsigned tmp_idx;
|
||||||
|
@ -926,7 +926,7 @@ void bpf_jit_compile(struct sk_filter *fp)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_jit_free(struct sk_filter *fp)
|
void bpf_jit_free(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
if (fp->jited)
|
if (fp->jited)
|
||||||
module_free(NULL, fp->bpf_func);
|
module_free(NULL, fp->bpf_func);
|
||||||
|
|
|
@ -131,7 +131,7 @@
|
||||||
* @target: Memory location for the compiled filter
|
* @target: Memory location for the compiled filter
|
||||||
*/
|
*/
|
||||||
struct jit_ctx {
|
struct jit_ctx {
|
||||||
const struct sk_filter *skf;
|
const struct bpf_prog *skf;
|
||||||
unsigned int prologue_bytes;
|
unsigned int prologue_bytes;
|
||||||
u32 idx;
|
u32 idx;
|
||||||
u32 flags;
|
u32 flags;
|
||||||
|
@ -789,7 +789,7 @@ static int pkt_type_offset(void)
|
||||||
static int build_body(struct jit_ctx *ctx)
|
static int build_body(struct jit_ctx *ctx)
|
||||||
{
|
{
|
||||||
void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
|
void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
|
||||||
const struct sk_filter *prog = ctx->skf;
|
const struct bpf_prog *prog = ctx->skf;
|
||||||
const struct sock_filter *inst;
|
const struct sock_filter *inst;
|
||||||
unsigned int i, off, load_order, condt;
|
unsigned int i, off, load_order, condt;
|
||||||
u32 k, b_off __maybe_unused;
|
u32 k, b_off __maybe_unused;
|
||||||
|
@ -1369,7 +1369,7 @@ static int build_body(struct jit_ctx *ctx)
|
||||||
|
|
||||||
int bpf_jit_enable __read_mostly;
|
int bpf_jit_enable __read_mostly;
|
||||||
|
|
||||||
void bpf_jit_compile(struct sk_filter *fp)
|
void bpf_jit_compile(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
struct jit_ctx ctx;
|
struct jit_ctx ctx;
|
||||||
unsigned int alloc_size, tmp_idx;
|
unsigned int alloc_size, tmp_idx;
|
||||||
|
@ -1423,7 +1423,7 @@ void bpf_jit_compile(struct sk_filter *fp)
|
||||||
kfree(ctx.offsets);
|
kfree(ctx.offsets);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_jit_free(struct sk_filter *fp)
|
void bpf_jit_free(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
if (fp->jited)
|
if (fp->jited)
|
||||||
module_free(NULL, fp->bpf_func);
|
module_free(NULL, fp->bpf_func);
|
||||||
|
|
|
@ -25,7 +25,7 @@ static inline void bpf_flush_icache(void *start, void *end)
|
||||||
flush_icache_range((unsigned long)start, (unsigned long)end);
|
flush_icache_range((unsigned long)start, (unsigned long)end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
|
static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
|
||||||
struct codegen_context *ctx)
|
struct codegen_context *ctx)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -121,7 +121,7 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
|
||||||
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
|
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
|
||||||
|
|
||||||
/* Assemble the body code between the prologue & epilogue. */
|
/* Assemble the body code between the prologue & epilogue. */
|
||||||
static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
|
static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
||||||
struct codegen_context *ctx,
|
struct codegen_context *ctx,
|
||||||
unsigned int *addrs)
|
unsigned int *addrs)
|
||||||
{
|
{
|
||||||
|
@ -569,7 +569,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_jit_compile(struct sk_filter *fp)
|
void bpf_jit_compile(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
unsigned int proglen;
|
unsigned int proglen;
|
||||||
unsigned int alloclen;
|
unsigned int alloclen;
|
||||||
|
@ -693,7 +693,7 @@ void bpf_jit_compile(struct sk_filter *fp)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_jit_free(struct sk_filter *fp)
|
void bpf_jit_free(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
if (fp->jited)
|
if (fp->jited)
|
||||||
module_free(NULL, fp->bpf_func);
|
module_free(NULL, fp->bpf_func);
|
||||||
|
|
|
@ -812,7 +812,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
|
||||||
return header;
|
return header;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_jit_compile(struct sk_filter *fp)
|
void bpf_jit_compile(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
struct bpf_binary_header *header = NULL;
|
struct bpf_binary_header *header = NULL;
|
||||||
unsigned long size, prg_len, lit_len;
|
unsigned long size, prg_len, lit_len;
|
||||||
|
@ -875,7 +875,7 @@ void bpf_jit_compile(struct sk_filter *fp)
|
||||||
kfree(addrs);
|
kfree(addrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_jit_free(struct sk_filter *fp)
|
void bpf_jit_free(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
||||||
struct bpf_binary_header *header = (void *)addr;
|
struct bpf_binary_header *header = (void *)addr;
|
||||||
|
|
|
@ -354,7 +354,7 @@ do { *prog++ = BR_OPC | WDISP22(OFF); \
|
||||||
* emit_jump() calls with adjusted offsets.
|
* emit_jump() calls with adjusted offsets.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void bpf_jit_compile(struct sk_filter *fp)
|
void bpf_jit_compile(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
unsigned int cleanup_addr, proglen, oldproglen = 0;
|
unsigned int cleanup_addr, proglen, oldproglen = 0;
|
||||||
u32 temp[8], *prog, *func, seen = 0, pass;
|
u32 temp[8], *prog, *func, seen = 0, pass;
|
||||||
|
@ -808,7 +808,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_jit_free(struct sk_filter *fp)
|
void bpf_jit_free(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
if (fp->jited)
|
if (fp->jited)
|
||||||
module_free(NULL, fp->bpf_func);
|
module_free(NULL, fp->bpf_func);
|
||||||
|
|
|
@ -211,7 +211,7 @@ struct jit_context {
|
||||||
bool seen_ld_abs;
|
bool seen_ld_abs;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
|
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||||
int oldproglen, struct jit_context *ctx)
|
int oldproglen, struct jit_context *ctx)
|
||||||
{
|
{
|
||||||
struct bpf_insn *insn = bpf_prog->insnsi;
|
struct bpf_insn *insn = bpf_prog->insnsi;
|
||||||
|
@ -235,7 +235,7 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
|
||||||
/* mov qword ptr [rbp-X],rbx */
|
/* mov qword ptr [rbp-X],rbx */
|
||||||
EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
|
EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
|
||||||
|
|
||||||
/* sk_convert_filter() maps classic BPF register X to R7 and uses R8
|
/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
|
||||||
* as temporary, so all tcpdump filters need to spill/fill R7(r13) and
|
* as temporary, so all tcpdump filters need to spill/fill R7(r13) and
|
||||||
* R8(r14). R9(r15) spill could be made conditional, but there is only
|
* R8(r14). R9(r15) spill could be made conditional, but there is only
|
||||||
* one 'bpf_error' return path out of helper functions inside bpf_jit.S
|
* one 'bpf_error' return path out of helper functions inside bpf_jit.S
|
||||||
|
@ -841,7 +841,7 @@ common_load: ctx->seen_ld_abs = true;
|
||||||
/* By design x64 JIT should support all BPF instructions
|
/* By design x64 JIT should support all BPF instructions
|
||||||
* This error will be seen if new instruction was added
|
* This error will be seen if new instruction was added
|
||||||
* to interpreter, but not to JIT
|
* to interpreter, but not to JIT
|
||||||
* or if there is junk in sk_filter
|
* or if there is junk in bpf_prog
|
||||||
*/
|
*/
|
||||||
pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
|
pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -862,11 +862,11 @@ common_load: ctx->seen_ld_abs = true;
|
||||||
return proglen;
|
return proglen;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_jit_compile(struct sk_filter *prog)
|
void bpf_jit_compile(struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_int_jit_compile(struct sk_filter *prog)
|
void bpf_int_jit_compile(struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
struct bpf_binary_header *header = NULL;
|
struct bpf_binary_header *header = NULL;
|
||||||
int proglen, oldproglen = 0;
|
int proglen, oldproglen = 0;
|
||||||
|
@ -932,7 +932,7 @@ void bpf_int_jit_compile(struct sk_filter *prog)
|
||||||
|
|
||||||
static void bpf_jit_free_deferred(struct work_struct *work)
|
static void bpf_jit_free_deferred(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct sk_filter *fp = container_of(work, struct sk_filter, work);
|
struct bpf_prog *fp = container_of(work, struct bpf_prog, work);
|
||||||
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
||||||
struct bpf_binary_header *header = (void *)addr;
|
struct bpf_binary_header *header = (void *)addr;
|
||||||
|
|
||||||
|
@ -941,7 +941,7 @@ static void bpf_jit_free_deferred(struct work_struct *work)
|
||||||
kfree(fp);
|
kfree(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_jit_free(struct sk_filter *fp)
|
void bpf_jit_free(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
if (fp->jited) {
|
if (fp->jited) {
|
||||||
INIT_WORK(&fp->work, bpf_jit_free_deferred);
|
INIT_WORK(&fp->work, bpf_jit_free_deferred);
|
||||||
|
|
|
@ -379,12 +379,12 @@ isdn_ppp_release(int min, struct file *file)
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_IPPP_FILTER
|
#ifdef CONFIG_IPPP_FILTER
|
||||||
if (is->pass_filter) {
|
if (is->pass_filter) {
|
||||||
sk_unattached_filter_destroy(is->pass_filter);
|
bpf_prog_destroy(is->pass_filter);
|
||||||
is->pass_filter = NULL;
|
is->pass_filter = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is->active_filter) {
|
if (is->active_filter) {
|
||||||
sk_unattached_filter_destroy(is->active_filter);
|
bpf_prog_destroy(is->active_filter);
|
||||||
is->active_filter = NULL;
|
is->active_filter = NULL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -639,12 +639,11 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
|
||||||
fprog.filter = code;
|
fprog.filter = code;
|
||||||
|
|
||||||
if (is->pass_filter) {
|
if (is->pass_filter) {
|
||||||
sk_unattached_filter_destroy(is->pass_filter);
|
bpf_prog_destroy(is->pass_filter);
|
||||||
is->pass_filter = NULL;
|
is->pass_filter = NULL;
|
||||||
}
|
}
|
||||||
if (fprog.filter != NULL)
|
if (fprog.filter != NULL)
|
||||||
err = sk_unattached_filter_create(&is->pass_filter,
|
err = bpf_prog_create(&is->pass_filter, &fprog);
|
||||||
&fprog);
|
|
||||||
else
|
else
|
||||||
err = 0;
|
err = 0;
|
||||||
kfree(code);
|
kfree(code);
|
||||||
|
@ -664,12 +663,11 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
|
||||||
fprog.filter = code;
|
fprog.filter = code;
|
||||||
|
|
||||||
if (is->active_filter) {
|
if (is->active_filter) {
|
||||||
sk_unattached_filter_destroy(is->active_filter);
|
bpf_prog_destroy(is->active_filter);
|
||||||
is->active_filter = NULL;
|
is->active_filter = NULL;
|
||||||
}
|
}
|
||||||
if (fprog.filter != NULL)
|
if (fprog.filter != NULL)
|
||||||
err = sk_unattached_filter_create(&is->active_filter,
|
err = bpf_prog_create(&is->active_filter, &fprog);
|
||||||
&fprog);
|
|
||||||
else
|
else
|
||||||
err = 0;
|
err = 0;
|
||||||
kfree(code);
|
kfree(code);
|
||||||
|
@ -1174,14 +1172,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is->pass_filter
|
if (is->pass_filter
|
||||||
&& SK_RUN_FILTER(is->pass_filter, skb) == 0) {
|
&& BPF_PROG_RUN(is->pass_filter, skb) == 0) {
|
||||||
if (is->debug & 0x2)
|
if (is->debug & 0x2)
|
||||||
printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
|
printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!(is->active_filter
|
if (!(is->active_filter
|
||||||
&& SK_RUN_FILTER(is->active_filter, skb) == 0)) {
|
&& BPF_PROG_RUN(is->active_filter, skb) == 0)) {
|
||||||
if (is->debug & 0x2)
|
if (is->debug & 0x2)
|
||||||
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
|
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
|
||||||
lp->huptimer = 0;
|
lp->huptimer = 0;
|
||||||
|
@ -1320,14 +1318,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ipt->pass_filter
|
if (ipt->pass_filter
|
||||||
&& SK_RUN_FILTER(ipt->pass_filter, skb) == 0) {
|
&& BPF_PROG_RUN(ipt->pass_filter, skb) == 0) {
|
||||||
if (ipt->debug & 0x4)
|
if (ipt->debug & 0x4)
|
||||||
printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
|
printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
if (!(ipt->active_filter
|
if (!(ipt->active_filter
|
||||||
&& SK_RUN_FILTER(ipt->active_filter, skb) == 0)) {
|
&& BPF_PROG_RUN(ipt->active_filter, skb) == 0)) {
|
||||||
if (ipt->debug & 0x4)
|
if (ipt->debug & 0x4)
|
||||||
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
|
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
|
||||||
lp->huptimer = 0;
|
lp->huptimer = 0;
|
||||||
|
@ -1517,9 +1515,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
|
||||||
}
|
}
|
||||||
|
|
||||||
drop |= is->pass_filter
|
drop |= is->pass_filter
|
||||||
&& SK_RUN_FILTER(is->pass_filter, skb) == 0;
|
&& BPF_PROG_RUN(is->pass_filter, skb) == 0;
|
||||||
drop |= is->active_filter
|
drop |= is->active_filter
|
||||||
&& SK_RUN_FILTER(is->active_filter, skb) == 0;
|
&& BPF_PROG_RUN(is->active_filter, skb) == 0;
|
||||||
|
|
||||||
skb_push(skb, IPPP_MAX_HEADER - 4);
|
skb_push(skb, IPPP_MAX_HEADER - 4);
|
||||||
return drop;
|
return drop;
|
||||||
|
|
|
@ -143,8 +143,8 @@ struct ppp {
|
||||||
struct sk_buff_head mrq; /* MP: receive reconstruction queue */
|
struct sk_buff_head mrq; /* MP: receive reconstruction queue */
|
||||||
#endif /* CONFIG_PPP_MULTILINK */
|
#endif /* CONFIG_PPP_MULTILINK */
|
||||||
#ifdef CONFIG_PPP_FILTER
|
#ifdef CONFIG_PPP_FILTER
|
||||||
struct sk_filter *pass_filter; /* filter for packets to pass */
|
struct bpf_prog *pass_filter; /* filter for packets to pass */
|
||||||
struct sk_filter *active_filter;/* filter for pkts to reset idle */
|
struct bpf_prog *active_filter; /* filter for pkts to reset idle */
|
||||||
#endif /* CONFIG_PPP_FILTER */
|
#endif /* CONFIG_PPP_FILTER */
|
||||||
struct net *ppp_net; /* the net we belong to */
|
struct net *ppp_net; /* the net we belong to */
|
||||||
struct ppp_link_stats stats64; /* 64 bit network stats */
|
struct ppp_link_stats stats64; /* 64 bit network stats */
|
||||||
|
@ -762,12 +762,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||||
|
|
||||||
ppp_lock(ppp);
|
ppp_lock(ppp);
|
||||||
if (ppp->pass_filter) {
|
if (ppp->pass_filter) {
|
||||||
sk_unattached_filter_destroy(ppp->pass_filter);
|
bpf_prog_destroy(ppp->pass_filter);
|
||||||
ppp->pass_filter = NULL;
|
ppp->pass_filter = NULL;
|
||||||
}
|
}
|
||||||
if (fprog.filter != NULL)
|
if (fprog.filter != NULL)
|
||||||
err = sk_unattached_filter_create(&ppp->pass_filter,
|
err = bpf_prog_create(&ppp->pass_filter,
|
||||||
&fprog);
|
&fprog);
|
||||||
else
|
else
|
||||||
err = 0;
|
err = 0;
|
||||||
kfree(code);
|
kfree(code);
|
||||||
|
@ -788,12 +788,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||||
|
|
||||||
ppp_lock(ppp);
|
ppp_lock(ppp);
|
||||||
if (ppp->active_filter) {
|
if (ppp->active_filter) {
|
||||||
sk_unattached_filter_destroy(ppp->active_filter);
|
bpf_prog_destroy(ppp->active_filter);
|
||||||
ppp->active_filter = NULL;
|
ppp->active_filter = NULL;
|
||||||
}
|
}
|
||||||
if (fprog.filter != NULL)
|
if (fprog.filter != NULL)
|
||||||
err = sk_unattached_filter_create(&ppp->active_filter,
|
err = bpf_prog_create(&ppp->active_filter,
|
||||||
&fprog);
|
&fprog);
|
||||||
else
|
else
|
||||||
err = 0;
|
err = 0;
|
||||||
kfree(code);
|
kfree(code);
|
||||||
|
@ -1205,7 +1205,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
|
||||||
a four-byte PPP header on each packet */
|
a four-byte PPP header on each packet */
|
||||||
*skb_push(skb, 2) = 1;
|
*skb_push(skb, 2) = 1;
|
||||||
if (ppp->pass_filter &&
|
if (ppp->pass_filter &&
|
||||||
SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
|
BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
|
||||||
if (ppp->debug & 1)
|
if (ppp->debug & 1)
|
||||||
netdev_printk(KERN_DEBUG, ppp->dev,
|
netdev_printk(KERN_DEBUG, ppp->dev,
|
||||||
"PPP: outbound frame "
|
"PPP: outbound frame "
|
||||||
|
@ -1215,7 +1215,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
/* if this packet passes the active filter, record the time */
|
/* if this packet passes the active filter, record the time */
|
||||||
if (!(ppp->active_filter &&
|
if (!(ppp->active_filter &&
|
||||||
SK_RUN_FILTER(ppp->active_filter, skb) == 0))
|
BPF_PROG_RUN(ppp->active_filter, skb) == 0))
|
||||||
ppp->last_xmit = jiffies;
|
ppp->last_xmit = jiffies;
|
||||||
skb_pull(skb, 2);
|
skb_pull(skb, 2);
|
||||||
#else
|
#else
|
||||||
|
@ -1839,7 +1839,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
|
||||||
|
|
||||||
*skb_push(skb, 2) = 0;
|
*skb_push(skb, 2) = 0;
|
||||||
if (ppp->pass_filter &&
|
if (ppp->pass_filter &&
|
||||||
SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
|
BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
|
||||||
if (ppp->debug & 1)
|
if (ppp->debug & 1)
|
||||||
netdev_printk(KERN_DEBUG, ppp->dev,
|
netdev_printk(KERN_DEBUG, ppp->dev,
|
||||||
"PPP: inbound frame "
|
"PPP: inbound frame "
|
||||||
|
@ -1848,7 +1848,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!(ppp->active_filter &&
|
if (!(ppp->active_filter &&
|
||||||
SK_RUN_FILTER(ppp->active_filter, skb) == 0))
|
BPF_PROG_RUN(ppp->active_filter, skb) == 0))
|
||||||
ppp->last_recv = jiffies;
|
ppp->last_recv = jiffies;
|
||||||
__skb_pull(skb, 2);
|
__skb_pull(skb, 2);
|
||||||
} else
|
} else
|
||||||
|
@ -2829,12 +2829,12 @@ static void ppp_destroy_interface(struct ppp *ppp)
|
||||||
#endif /* CONFIG_PPP_MULTILINK */
|
#endif /* CONFIG_PPP_MULTILINK */
|
||||||
#ifdef CONFIG_PPP_FILTER
|
#ifdef CONFIG_PPP_FILTER
|
||||||
if (ppp->pass_filter) {
|
if (ppp->pass_filter) {
|
||||||
sk_unattached_filter_destroy(ppp->pass_filter);
|
bpf_prog_destroy(ppp->pass_filter);
|
||||||
ppp->pass_filter = NULL;
|
ppp->pass_filter = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ppp->active_filter) {
|
if (ppp->active_filter) {
|
||||||
sk_unattached_filter_destroy(ppp->active_filter);
|
bpf_prog_destroy(ppp->active_filter);
|
||||||
ppp->active_filter = NULL;
|
ppp->active_filter = NULL;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PPP_FILTER */
|
#endif /* CONFIG_PPP_FILTER */
|
||||||
|
|
|
@ -58,7 +58,7 @@ struct lb_priv_ex {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct lb_priv {
|
struct lb_priv {
|
||||||
struct sk_filter __rcu *fp;
|
struct bpf_prog __rcu *fp;
|
||||||
lb_select_tx_port_func_t __rcu *select_tx_port_func;
|
lb_select_tx_port_func_t __rcu *select_tx_port_func;
|
||||||
struct lb_pcpu_stats __percpu *pcpu_stats;
|
struct lb_pcpu_stats __percpu *pcpu_stats;
|
||||||
struct lb_priv_ex *ex; /* priv extension */
|
struct lb_priv_ex *ex; /* priv extension */
|
||||||
|
@ -174,14 +174,14 @@ static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
|
||||||
static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
|
static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct sk_filter *fp;
|
struct bpf_prog *fp;
|
||||||
uint32_t lhash;
|
uint32_t lhash;
|
||||||
unsigned char *c;
|
unsigned char *c;
|
||||||
|
|
||||||
fp = rcu_dereference_bh(lb_priv->fp);
|
fp = rcu_dereference_bh(lb_priv->fp);
|
||||||
if (unlikely(!fp))
|
if (unlikely(!fp))
|
||||||
return 0;
|
return 0;
|
||||||
lhash = SK_RUN_FILTER(fp, skb);
|
lhash = BPF_PROG_RUN(fp, skb);
|
||||||
c = (char *) &lhash;
|
c = (char *) &lhash;
|
||||||
return c[0] ^ c[1] ^ c[2] ^ c[3];
|
return c[0] ^ c[1] ^ c[2] ^ c[3];
|
||||||
}
|
}
|
||||||
|
@ -271,8 +271,8 @@ static void __fprog_destroy(struct sock_fprog_kern *fprog)
|
||||||
static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
|
static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
|
||||||
{
|
{
|
||||||
struct lb_priv *lb_priv = get_lb_priv(team);
|
struct lb_priv *lb_priv = get_lb_priv(team);
|
||||||
struct sk_filter *fp = NULL;
|
struct bpf_prog *fp = NULL;
|
||||||
struct sk_filter *orig_fp = NULL;
|
struct bpf_prog *orig_fp = NULL;
|
||||||
struct sock_fprog_kern *fprog = NULL;
|
struct sock_fprog_kern *fprog = NULL;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -281,7 +281,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
|
||||||
ctx->data.bin_val.ptr);
|
ctx->data.bin_val.ptr);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
err = sk_unattached_filter_create(&fp, fprog);
|
err = bpf_prog_create(&fp, fprog);
|
||||||
if (err) {
|
if (err) {
|
||||||
__fprog_destroy(fprog);
|
__fprog_destroy(fprog);
|
||||||
return err;
|
return err;
|
||||||
|
@ -300,7 +300,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
|
||||||
|
|
||||||
if (orig_fp) {
|
if (orig_fp) {
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
sk_unattached_filter_destroy(orig_fp);
|
bpf_prog_destroy(orig_fp);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -296,7 +296,8 @@ enum {
|
||||||
})
|
})
|
||||||
|
|
||||||
/* Macro to invoke filter function. */
|
/* Macro to invoke filter function. */
|
||||||
#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
|
#define SK_RUN_FILTER(filter, ctx) \
|
||||||
|
(*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
|
||||||
|
|
||||||
struct bpf_insn {
|
struct bpf_insn {
|
||||||
__u8 code; /* opcode */
|
__u8 code; /* opcode */
|
||||||
|
@ -323,12 +324,10 @@ struct sk_buff;
|
||||||
struct sock;
|
struct sock;
|
||||||
struct seccomp_data;
|
struct seccomp_data;
|
||||||
|
|
||||||
struct sk_filter {
|
struct bpf_prog {
|
||||||
atomic_t refcnt;
|
|
||||||
u32 jited:1, /* Is our filter JIT'ed? */
|
u32 jited:1, /* Is our filter JIT'ed? */
|
||||||
len:31; /* Number of filter blocks */
|
len:31; /* Number of filter blocks */
|
||||||
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
||||||
struct rcu_head rcu;
|
|
||||||
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
||||||
const struct bpf_insn *filter);
|
const struct bpf_insn *filter);
|
||||||
union {
|
union {
|
||||||
|
@ -338,39 +337,45 @@ struct sk_filter {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline unsigned int sk_filter_size(unsigned int proglen)
|
struct sk_filter {
|
||||||
|
atomic_t refcnt;
|
||||||
|
struct rcu_head rcu;
|
||||||
|
struct bpf_prog *prog;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
|
||||||
|
|
||||||
|
static inline unsigned int bpf_prog_size(unsigned int proglen)
|
||||||
{
|
{
|
||||||
return max(sizeof(struct sk_filter),
|
return max(sizeof(struct bpf_prog),
|
||||||
offsetof(struct sk_filter, insns[proglen]));
|
offsetof(struct bpf_prog, insns[proglen]));
|
||||||
}
|
}
|
||||||
|
|
||||||
#define sk_filter_proglen(fprog) \
|
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
||||||
(fprog->len * sizeof(fprog->filter[0]))
|
|
||||||
|
|
||||||
int sk_filter(struct sock *sk, struct sk_buff *skb);
|
int sk_filter(struct sock *sk, struct sk_buff *skb);
|
||||||
|
|
||||||
void sk_filter_select_runtime(struct sk_filter *fp);
|
void bpf_prog_select_runtime(struct bpf_prog *fp);
|
||||||
void sk_filter_free(struct sk_filter *fp);
|
void bpf_prog_free(struct bpf_prog *fp);
|
||||||
|
|
||||||
int sk_convert_filter(struct sock_filter *prog, int len,
|
int bpf_convert_filter(struct sock_filter *prog, int len,
|
||||||
struct bpf_insn *new_prog, int *new_len);
|
struct bpf_insn *new_prog, int *new_len);
|
||||||
|
|
||||||
int sk_unattached_filter_create(struct sk_filter **pfp,
|
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
|
||||||
struct sock_fprog_kern *fprog);
|
void bpf_prog_destroy(struct bpf_prog *fp);
|
||||||
void sk_unattached_filter_destroy(struct sk_filter *fp);
|
|
||||||
|
|
||||||
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
||||||
int sk_detach_filter(struct sock *sk);
|
int sk_detach_filter(struct sock *sk);
|
||||||
|
|
||||||
int sk_chk_filter(const struct sock_filter *filter, unsigned int flen);
|
int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
|
||||||
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
|
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
|
||||||
unsigned int len);
|
unsigned int len);
|
||||||
|
|
||||||
void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
|
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
|
||||||
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
|
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
|
||||||
|
|
||||||
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||||
void bpf_int_jit_compile(struct sk_filter *fp);
|
void bpf_int_jit_compile(struct bpf_prog *fp);
|
||||||
|
|
||||||
#define BPF_ANC BIT(15)
|
#define BPF_ANC BIT(15)
|
||||||
|
|
||||||
|
@ -424,8 +429,8 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <linux/printk.h>
|
#include <linux/printk.h>
|
||||||
|
|
||||||
void bpf_jit_compile(struct sk_filter *fp);
|
void bpf_jit_compile(struct bpf_prog *fp);
|
||||||
void bpf_jit_free(struct sk_filter *fp);
|
void bpf_jit_free(struct bpf_prog *fp);
|
||||||
|
|
||||||
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
||||||
u32 pass, void *image)
|
u32 pass, void *image)
|
||||||
|
@ -439,11 +444,11 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
||||||
#else
|
#else
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
static inline void bpf_jit_compile(struct sk_filter *fp)
|
static inline void bpf_jit_compile(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void bpf_jit_free(struct sk_filter *fp)
|
static inline void bpf_jit_free(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
kfree(fp);
|
kfree(fp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -180,8 +180,8 @@ struct ippp_struct {
|
||||||
struct slcompress *slcomp;
|
struct slcompress *slcomp;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_IPPP_FILTER
|
#ifdef CONFIG_IPPP_FILTER
|
||||||
struct sk_filter *pass_filter; /* filter for packets to pass */
|
struct bpf_prog *pass_filter; /* filter for packets to pass */
|
||||||
struct sk_filter *active_filter; /* filter for pkts to reset idle */
|
struct bpf_prog *active_filter; /* filter for pkts to reset idle */
|
||||||
#endif
|
#endif
|
||||||
unsigned long debug;
|
unsigned long debug;
|
||||||
struct isdn_ppp_compressor *compressor,*decompressor;
|
struct isdn_ppp_compressor *compressor,*decompressor;
|
||||||
|
|
|
@ -6,14 +6,14 @@
|
||||||
|
|
||||||
#define XT_BPF_MAX_NUM_INSTR 64
|
#define XT_BPF_MAX_NUM_INSTR 64
|
||||||
|
|
||||||
struct sk_filter;
|
struct bpf_prog;
|
||||||
|
|
||||||
struct xt_bpf_info {
|
struct xt_bpf_info {
|
||||||
__u16 bpf_program_num_elem;
|
__u16 bpf_program_num_elem;
|
||||||
struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
|
struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
|
||||||
|
|
||||||
/* only used in the kernel */
|
/* only used in the kernel */
|
||||||
struct sk_filter *filter __attribute__((aligned(8)));
|
struct bpf_prog *filter __attribute__((aligned(8)));
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*_XT_BPF_H */
|
#endif /*_XT_BPF_H */
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
* 2 of the License, or (at your option) any later version.
|
* 2 of the License, or (at your option) any later version.
|
||||||
*
|
*
|
||||||
* Andi Kleen - Fix a few bad bugs and races.
|
* Andi Kleen - Fix a few bad bugs and races.
|
||||||
* Kris Katterjohn - Added many additional checks in sk_chk_filter()
|
* Kris Katterjohn - Added many additional checks in bpf_check_classic()
|
||||||
*/
|
*/
|
||||||
#include <linux/filter.h>
|
#include <linux/filter.h>
|
||||||
#include <linux/skbuff.h>
|
#include <linux/skbuff.h>
|
||||||
|
@ -73,15 +73,13 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __sk_run_filter - run a filter on a given context
|
* __bpf_prog_run - run eBPF program on a given context
|
||||||
* @ctx: buffer to run the filter on
|
* @ctx: is the data we are operating on
|
||||||
* @insn: filter to apply
|
* @insn: is the array of eBPF instructions
|
||||||
*
|
*
|
||||||
* Decode and apply filter instructions to the skb->data. Return length to
|
* Decode and execute eBPF instructions.
|
||||||
* keep, 0 for none. @ctx is the data we are operating on, @insn is the
|
|
||||||
* array of filter instructions.
|
|
||||||
*/
|
*/
|
||||||
static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn)
|
static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
|
||||||
{
|
{
|
||||||
u64 stack[MAX_BPF_STACK / sizeof(u64)];
|
u64 stack[MAX_BPF_STACK / sizeof(u64)];
|
||||||
u64 regs[MAX_BPF_REG], tmp;
|
u64 regs[MAX_BPF_REG], tmp;
|
||||||
|
@ -446,7 +444,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn)
|
||||||
/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
|
/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
|
||||||
* only appearing in the programs where ctx ==
|
* only appearing in the programs where ctx ==
|
||||||
* skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
|
* skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
|
||||||
* == BPF_R6, sk_convert_filter() saves it in BPF_R6,
|
* == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
|
||||||
* internal BPF verifier will check that BPF_R6 ==
|
* internal BPF verifier will check that BPF_R6 ==
|
||||||
* ctx.
|
* ctx.
|
||||||
*
|
*
|
||||||
|
@ -508,29 +506,29 @@ static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __weak bpf_int_jit_compile(struct sk_filter *prog)
|
void __weak bpf_int_jit_compile(struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sk_filter_select_runtime - select execution runtime for BPF program
|
* bpf_prog_select_runtime - select execution runtime for BPF program
|
||||||
* @fp: sk_filter populated with internal BPF program
|
* @fp: bpf_prog populated with internal BPF program
|
||||||
*
|
*
|
||||||
* try to JIT internal BPF program, if JIT is not available select interpreter
|
* try to JIT internal BPF program, if JIT is not available select interpreter
|
||||||
* BPF program will be executed via SK_RUN_FILTER() macro
|
* BPF program will be executed via BPF_PROG_RUN() macro
|
||||||
*/
|
*/
|
||||||
void sk_filter_select_runtime(struct sk_filter *fp)
|
void bpf_prog_select_runtime(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
fp->bpf_func = (void *) __sk_run_filter;
|
fp->bpf_func = (void *) __bpf_prog_run;
|
||||||
|
|
||||||
/* Probe if internal BPF can be JITed */
|
/* Probe if internal BPF can be JITed */
|
||||||
bpf_int_jit_compile(fp);
|
bpf_int_jit_compile(fp);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
|
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
||||||
|
|
||||||
/* free internal BPF program */
|
/* free internal BPF program */
|
||||||
void sk_filter_free(struct sk_filter *fp)
|
void bpf_prog_free(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
bpf_jit_free(fp);
|
bpf_jit_free(fp);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(sk_filter_free);
|
EXPORT_SYMBOL_GPL(bpf_prog_free);
|
||||||
|
|
|
@ -54,7 +54,7 @@
|
||||||
struct seccomp_filter {
|
struct seccomp_filter {
|
||||||
atomic_t usage;
|
atomic_t usage;
|
||||||
struct seccomp_filter *prev;
|
struct seccomp_filter *prev;
|
||||||
struct sk_filter *prog;
|
struct bpf_prog *prog;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Limit any path through the tree to 256KB worth of instructions. */
|
/* Limit any path through the tree to 256KB worth of instructions. */
|
||||||
|
@ -87,7 +87,7 @@ static void populate_seccomp_data(struct seccomp_data *sd)
|
||||||
* @filter: filter to verify
|
* @filter: filter to verify
|
||||||
* @flen: length of filter
|
* @flen: length of filter
|
||||||
*
|
*
|
||||||
* Takes a previously checked filter (by sk_chk_filter) and
|
* Takes a previously checked filter (by bpf_check_classic) and
|
||||||
* redirects all filter code that loads struct sk_buff data
|
* redirects all filter code that loads struct sk_buff data
|
||||||
* and related data through seccomp_bpf_load. It also
|
* and related data through seccomp_bpf_load. It also
|
||||||
* enforces length and alignment checking of those loads.
|
* enforces length and alignment checking of those loads.
|
||||||
|
@ -187,7 +187,7 @@ static u32 seccomp_run_filters(int syscall)
|
||||||
* value always takes priority (ignoring the DATA).
|
* value always takes priority (ignoring the DATA).
|
||||||
*/
|
*/
|
||||||
for (f = current->seccomp.filter; f; f = f->prev) {
|
for (f = current->seccomp.filter; f; f = f->prev) {
|
||||||
u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
|
u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)&sd);
|
||||||
|
|
||||||
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
|
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
|
||||||
ret = cur_ret;
|
ret = cur_ret;
|
||||||
|
@ -239,7 +239,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
|
||||||
goto free_prog;
|
goto free_prog;
|
||||||
|
|
||||||
/* Check and rewrite the fprog via the skb checker */
|
/* Check and rewrite the fprog via the skb checker */
|
||||||
ret = sk_chk_filter(fp, fprog->len);
|
ret = bpf_check_classic(fp, fprog->len);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_prog;
|
goto free_prog;
|
||||||
|
|
||||||
|
@ -249,7 +249,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
|
||||||
goto free_prog;
|
goto free_prog;
|
||||||
|
|
||||||
/* Convert 'sock_filter' insns to 'bpf_insn' insns */
|
/* Convert 'sock_filter' insns to 'bpf_insn' insns */
|
||||||
ret = sk_convert_filter(fp, fprog->len, NULL, &new_len);
|
ret = bpf_convert_filter(fp, fprog->len, NULL, &new_len);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_prog;
|
goto free_prog;
|
||||||
|
|
||||||
|
@ -260,12 +260,12 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
|
||||||
if (!filter)
|
if (!filter)
|
||||||
goto free_prog;
|
goto free_prog;
|
||||||
|
|
||||||
filter->prog = kzalloc(sk_filter_size(new_len),
|
filter->prog = kzalloc(bpf_prog_size(new_len),
|
||||||
GFP_KERNEL|__GFP_NOWARN);
|
GFP_KERNEL|__GFP_NOWARN);
|
||||||
if (!filter->prog)
|
if (!filter->prog)
|
||||||
goto free_filter;
|
goto free_filter;
|
||||||
|
|
||||||
ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
|
ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_filter_prog;
|
goto free_filter_prog;
|
||||||
kfree(fp);
|
kfree(fp);
|
||||||
|
@ -273,7 +273,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
|
||||||
atomic_set(&filter->usage, 1);
|
atomic_set(&filter->usage, 1);
|
||||||
filter->prog->len = new_len;
|
filter->prog->len = new_len;
|
||||||
|
|
||||||
sk_filter_select_runtime(filter->prog);
|
bpf_prog_select_runtime(filter->prog);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there is an existing filter, make it the prev and don't drop its
|
* If there is an existing filter, make it the prev and don't drop its
|
||||||
|
@ -337,7 +337,7 @@ void put_seccomp_filter(struct task_struct *tsk)
|
||||||
while (orig && atomic_dec_and_test(&orig->usage)) {
|
while (orig && atomic_dec_and_test(&orig->usage)) {
|
||||||
struct seccomp_filter *freeme = orig;
|
struct seccomp_filter *freeme = orig;
|
||||||
orig = orig->prev;
|
orig = orig->prev;
|
||||||
sk_filter_free(freeme->prog);
|
bpf_prog_free(freeme->prog);
|
||||||
kfree(freeme);
|
kfree(freeme);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp)
|
||||||
return len + 1;
|
return len + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_filter *generate_filter(int which, int *err)
|
static struct bpf_prog *generate_filter(int which, int *err)
|
||||||
{
|
{
|
||||||
struct sk_filter *fp;
|
struct bpf_prog *fp;
|
||||||
struct sock_fprog_kern fprog;
|
struct sock_fprog_kern fprog;
|
||||||
unsigned int flen = probe_filter_length(tests[which].u.insns);
|
unsigned int flen = probe_filter_length(tests[which].u.insns);
|
||||||
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
|
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
|
||||||
|
@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err)
|
||||||
fprog.filter = tests[which].u.insns;
|
fprog.filter = tests[which].u.insns;
|
||||||
fprog.len = flen;
|
fprog.len = flen;
|
||||||
|
|
||||||
*err = sk_unattached_filter_create(&fp, &fprog);
|
*err = bpf_prog_create(&fp, &fprog);
|
||||||
if (tests[which].aux & FLAG_EXPECTED_FAIL) {
|
if (tests[which].aux & FLAG_EXPECTED_FAIL) {
|
||||||
if (*err == -EINVAL) {
|
if (*err == -EINVAL) {
|
||||||
pr_cont("PASS\n");
|
pr_cont("PASS\n");
|
||||||
|
@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case INTERNAL:
|
case INTERNAL:
|
||||||
fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
|
fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
|
||||||
if (fp == NULL) {
|
if (fp == NULL) {
|
||||||
pr_cont("UNEXPECTED_FAIL no memory left\n");
|
pr_cont("UNEXPECTED_FAIL no memory left\n");
|
||||||
*err = -ENOMEM;
|
*err = -ENOMEM;
|
||||||
|
@ -1809,7 +1809,7 @@ static struct sk_filter *generate_filter(int which, int *err)
|
||||||
memcpy(fp->insnsi, tests[which].u.insns_int,
|
memcpy(fp->insnsi, tests[which].u.insns_int,
|
||||||
fp->len * sizeof(struct bpf_insn));
|
fp->len * sizeof(struct bpf_insn));
|
||||||
|
|
||||||
sk_filter_select_runtime(fp);
|
bpf_prog_select_runtime(fp);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err)
|
||||||
return fp;
|
return fp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void release_filter(struct sk_filter *fp, int which)
|
static void release_filter(struct bpf_prog *fp, int which)
|
||||||
{
|
{
|
||||||
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
|
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
|
||||||
|
|
||||||
switch (test_type) {
|
switch (test_type) {
|
||||||
case CLASSIC:
|
case CLASSIC:
|
||||||
sk_unattached_filter_destroy(fp);
|
bpf_prog_destroy(fp);
|
||||||
break;
|
break;
|
||||||
case INTERNAL:
|
case INTERNAL:
|
||||||
sk_filter_free(fp);
|
bpf_prog_free(fp);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __run_one(const struct sk_filter *fp, const void *data,
|
static int __run_one(const struct bpf_prog *fp, const void *data,
|
||||||
int runs, u64 *duration)
|
int runs, u64 *duration)
|
||||||
{
|
{
|
||||||
u64 start, finish;
|
u64 start, finish;
|
||||||
|
@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
|
||||||
start = ktime_to_us(ktime_get());
|
start = ktime_to_us(ktime_get());
|
||||||
|
|
||||||
for (i = 0; i < runs; i++)
|
for (i = 0; i < runs; i++)
|
||||||
ret = SK_RUN_FILTER(fp, data);
|
ret = BPF_PROG_RUN(fp, data);
|
||||||
|
|
||||||
finish = ktime_to_us(ktime_get());
|
finish = ktime_to_us(ktime_get());
|
||||||
|
|
||||||
|
@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int run_one(const struct sk_filter *fp, struct bpf_test *test)
|
static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
|
||||||
{
|
{
|
||||||
int err_cnt = 0, i, runs = MAX_TESTRUNS;
|
int err_cnt = 0, i, runs = MAX_TESTRUNS;
|
||||||
|
|
||||||
|
@ -1884,7 +1884,7 @@ static __init int test_bpf(void)
|
||||||
int i, err_cnt = 0, pass_cnt = 0;
|
int i, err_cnt = 0, pass_cnt = 0;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||||
struct sk_filter *fp;
|
struct bpf_prog *fp;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
pr_info("#%d %s ", i, tests[i].descr);
|
pr_info("#%d %s ", i, tests[i].descr);
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
* 2 of the License, or (at your option) any later version.
|
* 2 of the License, or (at your option) any later version.
|
||||||
*
|
*
|
||||||
* Andi Kleen - Fix a few bad bugs and races.
|
* Andi Kleen - Fix a few bad bugs and races.
|
||||||
* Kris Katterjohn - Added many additional checks in sk_chk_filter()
|
* Kris Katterjohn - Added many additional checks in bpf_check_classic()
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
@ -312,7 +312,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sk_convert_filter - convert filter program
|
* bpf_convert_filter - convert filter program
|
||||||
* @prog: the user passed filter program
|
* @prog: the user passed filter program
|
||||||
* @len: the length of the user passed filter program
|
* @len: the length of the user passed filter program
|
||||||
* @new_prog: buffer where converted program will be stored
|
* @new_prog: buffer where converted program will be stored
|
||||||
|
@ -322,12 +322,12 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
|
||||||
* Conversion workflow:
|
* Conversion workflow:
|
||||||
*
|
*
|
||||||
* 1) First pass for calculating the new program length:
|
* 1) First pass for calculating the new program length:
|
||||||
* sk_convert_filter(old_prog, old_len, NULL, &new_len)
|
* bpf_convert_filter(old_prog, old_len, NULL, &new_len)
|
||||||
*
|
*
|
||||||
* 2) 2nd pass to remap in two passes: 1st pass finds new
|
* 2) 2nd pass to remap in two passes: 1st pass finds new
|
||||||
* jump offsets, 2nd pass remapping:
|
* jump offsets, 2nd pass remapping:
|
||||||
* new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
|
* new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
|
||||||
* sk_convert_filter(old_prog, old_len, new_prog, &new_len);
|
* bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
|
||||||
*
|
*
|
||||||
* User BPF's register A is mapped to our BPF register 6, user BPF
|
* User BPF's register A is mapped to our BPF register 6, user BPF
|
||||||
* register X is mapped to BPF register 7; frame pointer is always
|
* register X is mapped to BPF register 7; frame pointer is always
|
||||||
|
@ -335,8 +335,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
|
||||||
* for socket filters: ctx == 'struct sk_buff *', for seccomp:
|
* for socket filters: ctx == 'struct sk_buff *', for seccomp:
|
||||||
* ctx == 'struct seccomp_data *'.
|
* ctx == 'struct seccomp_data *'.
|
||||||
*/
|
*/
|
||||||
int sk_convert_filter(struct sock_filter *prog, int len,
|
int bpf_convert_filter(struct sock_filter *prog, int len,
|
||||||
struct bpf_insn *new_prog, int *new_len)
|
struct bpf_insn *new_prog, int *new_len)
|
||||||
{
|
{
|
||||||
int new_flen = 0, pass = 0, target, i;
|
int new_flen = 0, pass = 0, target, i;
|
||||||
struct bpf_insn *new_insn;
|
struct bpf_insn *new_insn;
|
||||||
|
@ -721,7 +721,7 @@ static bool chk_code_allowed(u16 code_to_probe)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sk_chk_filter - verify socket filter code
|
* bpf_check_classic - verify socket filter code
|
||||||
* @filter: filter to verify
|
* @filter: filter to verify
|
||||||
* @flen: length of filter
|
* @flen: length of filter
|
||||||
*
|
*
|
||||||
|
@ -734,7 +734,7 @@ static bool chk_code_allowed(u16 code_to_probe)
|
||||||
*
|
*
|
||||||
* Returns 0 if the rule set is legal or -EINVAL if not.
|
* Returns 0 if the rule set is legal or -EINVAL if not.
|
||||||
*/
|
*/
|
||||||
int sk_chk_filter(const struct sock_filter *filter, unsigned int flen)
|
int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
|
||||||
{
|
{
|
||||||
bool anc_found;
|
bool anc_found;
|
||||||
int pc;
|
int pc;
|
||||||
|
@ -808,12 +808,12 @@ int sk_chk_filter(const struct sock_filter *filter, unsigned int flen)
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sk_chk_filter);
|
EXPORT_SYMBOL(bpf_check_classic);
|
||||||
|
|
||||||
static int sk_store_orig_filter(struct sk_filter *fp,
|
static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
|
||||||
const struct sock_fprog *fprog)
|
const struct sock_fprog *fprog)
|
||||||
{
|
{
|
||||||
unsigned int fsize = sk_filter_proglen(fprog);
|
unsigned int fsize = bpf_classic_proglen(fprog);
|
||||||
struct sock_fprog_kern *fkprog;
|
struct sock_fprog_kern *fkprog;
|
||||||
|
|
||||||
fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
|
fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
|
||||||
|
@ -831,7 +831,7 @@ static int sk_store_orig_filter(struct sk_filter *fp,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sk_release_orig_filter(struct sk_filter *fp)
|
static void bpf_release_orig_filter(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
struct sock_fprog_kern *fprog = fp->orig_prog;
|
struct sock_fprog_kern *fprog = fp->orig_prog;
|
||||||
|
|
||||||
|
@ -841,10 +841,16 @@ static void sk_release_orig_filter(struct sk_filter *fp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __bpf_prog_release(struct bpf_prog *prog)
|
||||||
|
{
|
||||||
|
bpf_release_orig_filter(prog);
|
||||||
|
bpf_prog_free(prog);
|
||||||
|
}
|
||||||
|
|
||||||
static void __sk_filter_release(struct sk_filter *fp)
|
static void __sk_filter_release(struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
sk_release_orig_filter(fp);
|
__bpf_prog_release(fp->prog);
|
||||||
sk_filter_free(fp);
|
kfree(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -872,44 +878,33 @@ static void sk_filter_release(struct sk_filter *fp)
|
||||||
|
|
||||||
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
|
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
|
u32 filter_size = bpf_prog_size(fp->prog->len);
|
||||||
|
|
||||||
|
atomic_sub(filter_size, &sk->sk_omem_alloc);
|
||||||
sk_filter_release(fp);
|
sk_filter_release(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
|
/* try to charge the socket memory if there is space available
|
||||||
|
* return true on success
|
||||||
|
*/
|
||||||
|
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
atomic_inc(&fp->refcnt);
|
u32 filter_size = bpf_prog_size(fp->prog->len);
|
||||||
atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
|
/* same check as in sock_kmalloc() */
|
||||||
struct sock *sk,
|
if (filter_size <= sysctl_optmem_max &&
|
||||||
unsigned int len)
|
atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
|
||||||
{
|
atomic_inc(&fp->refcnt);
|
||||||
struct sk_filter *fp_new;
|
atomic_add(filter_size, &sk->sk_omem_alloc);
|
||||||
|
return true;
|
||||||
if (sk == NULL)
|
|
||||||
return krealloc(fp, len, GFP_KERNEL);
|
|
||||||
|
|
||||||
fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
|
|
||||||
if (fp_new) {
|
|
||||||
*fp_new = *fp;
|
|
||||||
/* As we're keeping orig_prog in fp_new along,
|
|
||||||
* we need to make sure we're not evicting it
|
|
||||||
* from the old fp.
|
|
||||||
*/
|
|
||||||
fp->orig_prog = NULL;
|
|
||||||
sk_filter_uncharge(sk, fp);
|
|
||||||
}
|
}
|
||||||
|
return false;
|
||||||
return fp_new;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
|
static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
|
||||||
struct sock *sk)
|
|
||||||
{
|
{
|
||||||
struct sock_filter *old_prog;
|
struct sock_filter *old_prog;
|
||||||
struct sk_filter *old_fp;
|
struct bpf_prog *old_fp;
|
||||||
int err, new_len, old_len = fp->len;
|
int err, new_len, old_len = fp->len;
|
||||||
|
|
||||||
/* We are free to overwrite insns et al right here as it
|
/* We are free to overwrite insns et al right here as it
|
||||||
|
@ -932,13 +927,13 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 1st pass: calculate the new program length. */
|
/* 1st pass: calculate the new program length. */
|
||||||
err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
|
err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_err_free;
|
goto out_err_free;
|
||||||
|
|
||||||
/* Expand fp for appending the new filter representation. */
|
/* Expand fp for appending the new filter representation. */
|
||||||
old_fp = fp;
|
old_fp = fp;
|
||||||
fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
|
fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
|
||||||
if (!fp) {
|
if (!fp) {
|
||||||
/* The old_fp is still around in case we couldn't
|
/* The old_fp is still around in case we couldn't
|
||||||
* allocate new memory, so uncharge on that one.
|
* allocate new memory, so uncharge on that one.
|
||||||
|
@ -951,16 +946,16 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
|
||||||
fp->len = new_len;
|
fp->len = new_len;
|
||||||
|
|
||||||
/* 2nd pass: remap sock_filter insns into bpf_insn insns. */
|
/* 2nd pass: remap sock_filter insns into bpf_insn insns. */
|
||||||
err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
|
err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
|
||||||
if (err)
|
if (err)
|
||||||
/* 2nd sk_convert_filter() can fail only if it fails
|
/* 2nd bpf_convert_filter() can fail only if it fails
|
||||||
* to allocate memory, remapping must succeed. Note,
|
* to allocate memory, remapping must succeed. Note,
|
||||||
* that at this time old_fp has already been released
|
* that at this time old_fp has already been released
|
||||||
* by __sk_migrate_realloc().
|
* by krealloc().
|
||||||
*/
|
*/
|
||||||
goto out_err_free;
|
goto out_err_free;
|
||||||
|
|
||||||
sk_filter_select_runtime(fp);
|
bpf_prog_select_runtime(fp);
|
||||||
|
|
||||||
kfree(old_prog);
|
kfree(old_prog);
|
||||||
return fp;
|
return fp;
|
||||||
|
@ -968,28 +963,20 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
|
||||||
out_err_free:
|
out_err_free:
|
||||||
kfree(old_prog);
|
kfree(old_prog);
|
||||||
out_err:
|
out_err:
|
||||||
/* Rollback filter setup. */
|
__bpf_prog_release(fp);
|
||||||
if (sk != NULL)
|
|
||||||
sk_filter_uncharge(sk, fp);
|
|
||||||
else
|
|
||||||
kfree(fp);
|
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
|
static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
|
||||||
struct sock *sk)
|
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
fp->bpf_func = NULL;
|
fp->bpf_func = NULL;
|
||||||
fp->jited = 0;
|
fp->jited = 0;
|
||||||
|
|
||||||
err = sk_chk_filter(fp->insns, fp->len);
|
err = bpf_check_classic(fp->insns, fp->len);
|
||||||
if (err) {
|
if (err) {
|
||||||
if (sk != NULL)
|
__bpf_prog_release(fp);
|
||||||
sk_filter_uncharge(sk, fp);
|
|
||||||
else
|
|
||||||
kfree(fp);
|
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1002,13 +989,13 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
|
||||||
* internal BPF translation for the optimized interpreter.
|
* internal BPF translation for the optimized interpreter.
|
||||||
*/
|
*/
|
||||||
if (!fp->jited)
|
if (!fp->jited)
|
||||||
fp = __sk_migrate_filter(fp, sk);
|
fp = bpf_migrate_filter(fp);
|
||||||
|
|
||||||
return fp;
|
return fp;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sk_unattached_filter_create - create an unattached filter
|
* bpf_prog_create - create an unattached filter
|
||||||
* @pfp: the unattached filter that is created
|
* @pfp: the unattached filter that is created
|
||||||
* @fprog: the filter program
|
* @fprog: the filter program
|
||||||
*
|
*
|
||||||
|
@ -1017,23 +1004,21 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
|
||||||
* If an error occurs or there is insufficient memory for the filter
|
* If an error occurs or there is insufficient memory for the filter
|
||||||
* a negative errno code is returned. On success the return is zero.
|
* a negative errno code is returned. On success the return is zero.
|
||||||
*/
|
*/
|
||||||
int sk_unattached_filter_create(struct sk_filter **pfp,
|
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
|
||||||
struct sock_fprog_kern *fprog)
|
|
||||||
{
|
{
|
||||||
unsigned int fsize = sk_filter_proglen(fprog);
|
unsigned int fsize = bpf_classic_proglen(fprog);
|
||||||
struct sk_filter *fp;
|
struct bpf_prog *fp;
|
||||||
|
|
||||||
/* Make sure new filter is there and in the right amounts. */
|
/* Make sure new filter is there and in the right amounts. */
|
||||||
if (fprog->filter == NULL)
|
if (fprog->filter == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
|
fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
|
||||||
if (!fp)
|
if (!fp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
memcpy(fp->insns, fprog->filter, fsize);
|
memcpy(fp->insns, fprog->filter, fsize);
|
||||||
|
|
||||||
atomic_set(&fp->refcnt, 1);
|
|
||||||
fp->len = fprog->len;
|
fp->len = fprog->len;
|
||||||
/* Since unattached filters are not copied back to user
|
/* Since unattached filters are not copied back to user
|
||||||
* space through sk_get_filter(), we do not need to hold
|
* space through sk_get_filter(), we do not need to hold
|
||||||
|
@ -1041,23 +1026,23 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
|
||||||
*/
|
*/
|
||||||
fp->orig_prog = NULL;
|
fp->orig_prog = NULL;
|
||||||
|
|
||||||
/* __sk_prepare_filter() already takes care of uncharging
|
/* bpf_prepare_filter() already takes care of freeing
|
||||||
* memory in case something goes wrong.
|
* memory in case something goes wrong.
|
||||||
*/
|
*/
|
||||||
fp = __sk_prepare_filter(fp, NULL);
|
fp = bpf_prepare_filter(fp);
|
||||||
if (IS_ERR(fp))
|
if (IS_ERR(fp))
|
||||||
return PTR_ERR(fp);
|
return PTR_ERR(fp);
|
||||||
|
|
||||||
*pfp = fp;
|
*pfp = fp;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
|
EXPORT_SYMBOL_GPL(bpf_prog_create);
|
||||||
|
|
||||||
void sk_unattached_filter_destroy(struct sk_filter *fp)
|
void bpf_prog_destroy(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
__sk_filter_release(fp);
|
__bpf_prog_release(fp);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
|
EXPORT_SYMBOL_GPL(bpf_prog_destroy);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sk_attach_filter - attach a socket filter
|
* sk_attach_filter - attach a socket filter
|
||||||
|
@ -1072,8 +1057,9 @@ EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
|
||||||
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||||
{
|
{
|
||||||
struct sk_filter *fp, *old_fp;
|
struct sk_filter *fp, *old_fp;
|
||||||
unsigned int fsize = sk_filter_proglen(fprog);
|
unsigned int fsize = bpf_classic_proglen(fprog);
|
||||||
unsigned int sk_fsize = sk_filter_size(fprog->len);
|
unsigned int bpf_fsize = bpf_prog_size(fprog->len);
|
||||||
|
struct bpf_prog *prog;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (sock_flag(sk, SOCK_FILTER_LOCKED))
|
if (sock_flag(sk, SOCK_FILTER_LOCKED))
|
||||||
|
@ -1083,30 +1069,43 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||||
if (fprog->filter == NULL)
|
if (fprog->filter == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
|
prog = kmalloc(bpf_fsize, GFP_KERNEL);
|
||||||
if (!fp)
|
if (!prog)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (copy_from_user(fp->insns, fprog->filter, fsize)) {
|
if (copy_from_user(prog->insns, fprog->filter, fsize)) {
|
||||||
sock_kfree_s(sk, fp, sk_fsize);
|
kfree(prog);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_set(&fp->refcnt, 1);
|
prog->len = fprog->len;
|
||||||
fp->len = fprog->len;
|
|
||||||
|
|
||||||
err = sk_store_orig_filter(fp, fprog);
|
err = bpf_prog_store_orig_filter(prog, fprog);
|
||||||
if (err) {
|
if (err) {
|
||||||
sk_filter_uncharge(sk, fp);
|
kfree(prog);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* __sk_prepare_filter() already takes care of uncharging
|
/* bpf_prepare_filter() already takes care of freeing
|
||||||
* memory in case something goes wrong.
|
* memory in case something goes wrong.
|
||||||
*/
|
*/
|
||||||
fp = __sk_prepare_filter(fp, sk);
|
prog = bpf_prepare_filter(prog);
|
||||||
if (IS_ERR(fp))
|
if (IS_ERR(prog))
|
||||||
return PTR_ERR(fp);
|
return PTR_ERR(prog);
|
||||||
|
|
||||||
|
fp = kmalloc(sizeof(*fp), GFP_KERNEL);
|
||||||
|
if (!fp) {
|
||||||
|
__bpf_prog_release(prog);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
fp->prog = prog;
|
||||||
|
|
||||||
|
atomic_set(&fp->refcnt, 0);
|
||||||
|
|
||||||
|
if (!sk_filter_charge(sk, fp)) {
|
||||||
|
__sk_filter_release(fp);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
old_fp = rcu_dereference_protected(sk->sk_filter,
|
old_fp = rcu_dereference_protected(sk->sk_filter,
|
||||||
sock_owned_by_user(sk));
|
sock_owned_by_user(sk));
|
||||||
|
@ -1155,7 +1154,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
|
||||||
/* We're copying the filter that has been originally attached,
|
/* We're copying the filter that has been originally attached,
|
||||||
* so no conversion/decode needed anymore.
|
* so no conversion/decode needed anymore.
|
||||||
*/
|
*/
|
||||||
fprog = filter->orig_prog;
|
fprog = filter->prog->orig_prog;
|
||||||
|
|
||||||
ret = fprog->len;
|
ret = fprog->len;
|
||||||
if (!len)
|
if (!len)
|
||||||
|
@ -1167,7 +1166,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
|
if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* Instead of bytes, the API requests to return the number
|
/* Instead of bytes, the API requests to return the number
|
||||||
|
|
|
@ -107,11 +107,11 @@
|
||||||
#include <linux/filter.h>
|
#include <linux/filter.h>
|
||||||
#include <linux/ptp_classify.h>
|
#include <linux/ptp_classify.h>
|
||||||
|
|
||||||
static struct sk_filter *ptp_insns __read_mostly;
|
static struct bpf_prog *ptp_insns __read_mostly;
|
||||||
|
|
||||||
unsigned int ptp_classify_raw(const struct sk_buff *skb)
|
unsigned int ptp_classify_raw(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return SK_RUN_FILTER(ptp_insns, skb);
|
return BPF_PROG_RUN(ptp_insns, skb);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ptp_classify_raw);
|
EXPORT_SYMBOL_GPL(ptp_classify_raw);
|
||||||
|
|
||||||
|
@ -189,5 +189,5 @@ void __init ptp_classifier_init(void)
|
||||||
.len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
|
.len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
|
||||||
};
|
};
|
||||||
|
|
||||||
BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog));
|
BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1474,6 +1474,7 @@ static void sk_update_clone(const struct sock *sk, struct sock *newsk)
|
||||||
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
||||||
{
|
{
|
||||||
struct sock *newsk;
|
struct sock *newsk;
|
||||||
|
bool is_charged = true;
|
||||||
|
|
||||||
newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
|
newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
|
||||||
if (newsk != NULL) {
|
if (newsk != NULL) {
|
||||||
|
@ -1518,9 +1519,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
||||||
|
|
||||||
filter = rcu_dereference_protected(newsk->sk_filter, 1);
|
filter = rcu_dereference_protected(newsk->sk_filter, 1);
|
||||||
if (filter != NULL)
|
if (filter != NULL)
|
||||||
sk_filter_charge(newsk, filter);
|
/* though it's an empty new sock, the charging may fail
|
||||||
|
* if sysctl_optmem_max was changed between creation of
|
||||||
|
* original socket and cloning
|
||||||
|
*/
|
||||||
|
is_charged = sk_filter_charge(newsk, filter);
|
||||||
|
|
||||||
if (unlikely(xfrm_sk_clone_policy(newsk))) {
|
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
|
||||||
/* It is still raw copy of parent, so invalidate
|
/* It is still raw copy of parent, so invalidate
|
||||||
* destructor and make plain sk_free() */
|
* destructor and make plain sk_free() */
|
||||||
newsk->sk_destruct = NULL;
|
newsk->sk_destruct = NULL;
|
||||||
|
|
|
@ -68,8 +68,8 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
|
||||||
if (!filter)
|
if (!filter)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
fprog = filter->orig_prog;
|
fprog = filter->prog->orig_prog;
|
||||||
flen = sk_filter_proglen(fprog);
|
flen = bpf_classic_proglen(fprog);
|
||||||
|
|
||||||
attr = nla_reserve(skb, attrtype, flen);
|
attr = nla_reserve(skb, attrtype, flen);
|
||||||
if (attr == NULL) {
|
if (attr == NULL) {
|
||||||
|
|
|
@ -28,7 +28,7 @@ static int bpf_mt_check(const struct xt_mtchk_param *par)
|
||||||
program.len = info->bpf_program_num_elem;
|
program.len = info->bpf_program_num_elem;
|
||||||
program.filter = info->bpf_program;
|
program.filter = info->bpf_program;
|
||||||
|
|
||||||
if (sk_unattached_filter_create(&info->filter, &program)) {
|
if (bpf_prog_create(&info->filter, &program)) {
|
||||||
pr_info("bpf: check failed: parse error\n");
|
pr_info("bpf: check failed: parse error\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -40,13 +40,13 @@ static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||||
{
|
{
|
||||||
const struct xt_bpf_info *info = par->matchinfo;
|
const struct xt_bpf_info *info = par->matchinfo;
|
||||||
|
|
||||||
return SK_RUN_FILTER(info->filter, skb);
|
return BPF_PROG_RUN(info->filter, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
|
static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
|
||||||
{
|
{
|
||||||
const struct xt_bpf_info *info = par->matchinfo;
|
const struct xt_bpf_info *info = par->matchinfo;
|
||||||
sk_unattached_filter_destroy(info->filter);
|
bpf_prog_destroy(info->filter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct xt_match bpf_mt_reg __read_mostly = {
|
static struct xt_match bpf_mt_reg __read_mostly = {
|
||||||
|
|
|
@ -30,7 +30,7 @@ struct cls_bpf_head {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cls_bpf_prog {
|
struct cls_bpf_prog {
|
||||||
struct sk_filter *filter;
|
struct bpf_prog *filter;
|
||||||
struct sock_filter *bpf_ops;
|
struct sock_filter *bpf_ops;
|
||||||
struct tcf_exts exts;
|
struct tcf_exts exts;
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
|
@ -54,7 +54,7 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
list_for_each_entry(prog, &head->plist, link) {
|
list_for_each_entry(prog, &head->plist, link) {
|
||||||
int filter_res = SK_RUN_FILTER(prog->filter, skb);
|
int filter_res = BPF_PROG_RUN(prog->filter, skb);
|
||||||
|
|
||||||
if (filter_res == 0)
|
if (filter_res == 0)
|
||||||
continue;
|
continue;
|
||||||
|
@ -92,7 +92,7 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
|
||||||
tcf_unbind_filter(tp, &prog->res);
|
tcf_unbind_filter(tp, &prog->res);
|
||||||
tcf_exts_destroy(tp, &prog->exts);
|
tcf_exts_destroy(tp, &prog->exts);
|
||||||
|
|
||||||
sk_unattached_filter_destroy(prog->filter);
|
bpf_prog_destroy(prog->filter);
|
||||||
|
|
||||||
kfree(prog->bpf_ops);
|
kfree(prog->bpf_ops);
|
||||||
kfree(prog);
|
kfree(prog);
|
||||||
|
@ -161,7 +161,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
|
||||||
struct sock_filter *bpf_ops, *bpf_old;
|
struct sock_filter *bpf_ops, *bpf_old;
|
||||||
struct tcf_exts exts;
|
struct tcf_exts exts;
|
||||||
struct sock_fprog_kern tmp;
|
struct sock_fprog_kern tmp;
|
||||||
struct sk_filter *fp, *fp_old;
|
struct bpf_prog *fp, *fp_old;
|
||||||
u16 bpf_size, bpf_len;
|
u16 bpf_size, bpf_len;
|
||||||
u32 classid;
|
u32 classid;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -193,7 +193,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
|
||||||
tmp.len = bpf_len;
|
tmp.len = bpf_len;
|
||||||
tmp.filter = bpf_ops;
|
tmp.filter = bpf_ops;
|
||||||
|
|
||||||
ret = sk_unattached_filter_create(&fp, &tmp);
|
ret = bpf_prog_create(&fp, &tmp);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto errout_free;
|
goto errout_free;
|
||||||
|
|
||||||
|
@ -211,7 +211,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
|
||||||
tcf_exts_change(tp, &prog->exts, &exts);
|
tcf_exts_change(tp, &prog->exts, &exts);
|
||||||
|
|
||||||
if (fp_old)
|
if (fp_old)
|
||||||
sk_unattached_filter_destroy(fp_old);
|
bpf_prog_destroy(fp_old);
|
||||||
if (bpf_old)
|
if (bpf_old)
|
||||||
kfree(bpf_old);
|
kfree(bpf_old);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue