bpf: fix struct htab_elem layout
when htab_elem is removed from the bucket list the htab_elem.hash_node.next
field should not be overridden too early otherwise we have a tiny race window
between lookup and delete.
The bug was discovered by manual code analysis and reproducible
only with explicit udelay() in lookup_elem_raw().
Fixes: 6c90598174
("bpf: pre-allocate hash map elements")
Reported-by: Jonathan Perry <jonperry@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
745cb7f8a5
commit
9f691549f7
1 changed files with 20 additions and 5 deletions
|
@ -45,8 +45,13 @@ enum extra_elem_state {
|
||||||
struct htab_elem {
|
struct htab_elem {
|
||||||
union {
|
union {
|
||||||
struct hlist_node hash_node;
|
struct hlist_node hash_node;
|
||||||
struct bpf_htab *htab;
|
struct {
|
||||||
struct pcpu_freelist_node fnode;
|
void *padding;
|
||||||
|
union {
|
||||||
|
struct bpf_htab *htab;
|
||||||
|
struct pcpu_freelist_node fnode;
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
union {
|
union {
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
|
@ -162,7 +167,8 @@ static int prealloc_init(struct bpf_htab *htab)
|
||||||
offsetof(struct htab_elem, lru_node),
|
offsetof(struct htab_elem, lru_node),
|
||||||
htab->elem_size, htab->map.max_entries);
|
htab->elem_size, htab->map.max_entries);
|
||||||
else
|
else
|
||||||
pcpu_freelist_populate(&htab->freelist, htab->elems,
|
pcpu_freelist_populate(&htab->freelist,
|
||||||
|
htab->elems + offsetof(struct htab_elem, fnode),
|
||||||
htab->elem_size, htab->map.max_entries);
|
htab->elem_size, htab->map.max_entries);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -217,6 +223,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||||
int err, i;
|
int err, i;
|
||||||
u64 cost;
|
u64 cost;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
|
||||||
|
offsetof(struct htab_elem, hash_node.pprev));
|
||||||
|
BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
|
||||||
|
offsetof(struct htab_elem, hash_node.pprev));
|
||||||
|
|
||||||
if (lru && !capable(CAP_SYS_ADMIN))
|
if (lru && !capable(CAP_SYS_ADMIN))
|
||||||
/* LRU implementation is much complicated than other
|
/* LRU implementation is much complicated than other
|
||||||
* maps. Hence, limit to CAP_SYS_ADMIN for now.
|
* maps. Hence, limit to CAP_SYS_ADMIN for now.
|
||||||
|
@ -582,9 +593,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (prealloc) {
|
if (prealloc) {
|
||||||
l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
|
struct pcpu_freelist_node *l;
|
||||||
if (!l_new)
|
|
||||||
|
l = pcpu_freelist_pop(&htab->freelist);
|
||||||
|
if (!l)
|
||||||
err = -E2BIG;
|
err = -E2BIG;
|
||||||
|
else
|
||||||
|
l_new = container_of(l, struct htab_elem, fnode);
|
||||||
} else {
|
} else {
|
||||||
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
|
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
|
||||||
atomic_dec(&htab->count);
|
atomic_dec(&htab->count);
|
||||||
|
|
Loading…
Reference in a new issue