Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net This is a large batch of Netfilter fixes for net, they are: 1) Three patches to fix NAT conversion to rhashtable: Switch to rhlist structure that allows to have several objects with the same key. Moreover, fix wrong comparison logic in nf_nat_bysource_cmp() as this is expecting a return value similar to memcmp(). Change location of the nat_bysource field in the nf_conn structure to avoid zeroing this as it breaks interaction with SLAB_DESTROY_BY_RCU and lead us to crashes. From Florian Westphal. 2) Don't allow malformed fragments go through in IPv6, drop them, otherwise we hit GPF, patch from Florian Westphal. 3) Fix crash if attributes are missing in nft_range, from Liping Zhang. 4) Fix arptables 32-bits userspace 64-bits kernel compat, from Hongxu Jia. 5) Two patches from David Ahern to fix netfilter interaction with vrf. From David Ahern. 6) Fix element timeout calculation in nf_tables, we take milliseconds from userspace, but we use jiffies from kernelspace. Patch from Anders K. Pedersen. 7) Missing validation length netlink attribute for nft_hash, from Laura Garcia. 8) Fix nf_conntrack_helper documentation, we don't default to off anymore for a bit of time so let's get this in sync with the code. I know is late but I think these are important, specifically the NAT bits, as they are mostly addressing fallout from recent changes. I also read there are chances to have -rc8, if that is the case, that would also give us a bit more time to test this. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
3d2dd617fb
12 changed files with 69 additions and 38 deletions
|
@ -62,10 +62,13 @@ nf_conntrack_generic_timeout - INTEGER (seconds)
|
|||
protocols.
|
||||
|
||||
nf_conntrack_helper - BOOLEAN
|
||||
0 - disabled
|
||||
not 0 - enabled (default)
|
||||
0 - disabled (default)
|
||||
not 0 - enabled
|
||||
|
||||
Enable automatic conntrack helper assignment.
|
||||
If disabled it is required to set up iptables rules to assign
|
||||
helpers to connections. See the CT target description in the
|
||||
iptables-extensions(8) man page for further information.
|
||||
|
||||
nf_conntrack_icmp_timeout - INTEGER (seconds)
|
||||
default 30
|
||||
|
|
|
@ -100,6 +100,9 @@ struct nf_conn {
|
|||
|
||||
possible_net_t ct_net;
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
struct rhlist_head nat_bysource;
|
||||
#endif
|
||||
/* all members below initialized via memset */
|
||||
u8 __nfct_init_offset[0];
|
||||
|
||||
|
@ -117,9 +120,6 @@ struct nf_conn {
|
|||
/* Extensions */
|
||||
struct nf_ct_ext *ext;
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
struct rhash_head nat_bysource;
|
||||
#endif
|
||||
/* Storage reserved for other modules, must be the last member */
|
||||
union nf_conntrack_proto proto;
|
||||
};
|
||||
|
|
|
@ -313,7 +313,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
|
|||
* @size: maximum set size
|
||||
* @nelems: number of elements
|
||||
* @ndeact: number of deactivated elements queued for removal
|
||||
* @timeout: default timeout value in msecs
|
||||
* @timeout: default timeout value in jiffies
|
||||
* @gc_int: garbage collection interval in msecs
|
||||
* @policy: set parameterization (see enum nft_set_policies)
|
||||
* @udlen: user data length
|
||||
|
|
|
@ -24,10 +24,11 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
|
|||
struct flowi4 fl4 = {};
|
||||
__be32 saddr = iph->saddr;
|
||||
__u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
|
||||
struct net_device *dev = skb_dst(skb)->dev;
|
||||
unsigned int hh_len;
|
||||
|
||||
if (addr_type == RTN_UNSPEC)
|
||||
addr_type = inet_addr_type(net, saddr);
|
||||
addr_type = inet_addr_type_dev_table(net, dev, saddr);
|
||||
if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
|
||||
flags |= FLOWI_FLAG_ANYSRC;
|
||||
else
|
||||
|
@ -40,6 +41,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
|
|||
fl4.saddr = saddr;
|
||||
fl4.flowi4_tos = RT_TOS(iph->tos);
|
||||
fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
|
||||
if (!fl4.flowi4_oif)
|
||||
fl4.flowi4_oif = l3mdev_master_ifindex(dev);
|
||||
fl4.flowi4_mark = skb->mark;
|
||||
fl4.flowi4_flags = flags;
|
||||
rt = ip_route_output_key(net, &fl4);
|
||||
|
|
|
@ -1201,8 +1201,8 @@ static int translate_compat_table(struct xt_table_info **pinfo,
|
|||
|
||||
newinfo->number = compatr->num_entries;
|
||||
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
||||
newinfo->hook_entry[i] = info->hook_entry[i];
|
||||
newinfo->underflow[i] = info->underflow[i];
|
||||
newinfo->hook_entry[i] = compatr->hook_entry[i];
|
||||
newinfo->underflow[i] = compatr->underflow[i];
|
||||
}
|
||||
entry1 = newinfo->entries;
|
||||
pos = entry1;
|
||||
|
|
|
@ -576,11 +576,11 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
|||
/* Jumbo payload inhibits frag. header */
|
||||
if (ipv6_hdr(skb)->payload_len == 0) {
|
||||
pr_debug("payload len = 0\n");
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -69,7 +69,7 @@ static unsigned int ipv6_defrag(void *priv,
|
|||
if (err == -EINPROGRESS)
|
||||
return NF_STOLEN;
|
||||
|
||||
return NF_ACCEPT;
|
||||
return err == 0 ? NF_ACCEPT : NF_DROP;
|
||||
}
|
||||
|
||||
static struct nf_hook_ops ipv6_defrag_ops[] = {
|
||||
|
|
|
@ -156,6 +156,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
fl6.daddr = oip6h->saddr;
|
||||
fl6.fl6_sport = otcph->dest;
|
||||
fl6.fl6_dport = otcph->source;
|
||||
fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
|
||||
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
|
||||
dst = ip6_route_output(net, NULL, &fl6);
|
||||
if (dst->error) {
|
||||
|
|
|
@ -42,7 +42,7 @@ struct nf_nat_conn_key {
|
|||
const struct nf_conntrack_zone *zone;
|
||||
};
|
||||
|
||||
static struct rhashtable nf_nat_bysource_table;
|
||||
static struct rhltable nf_nat_bysource_table;
|
||||
|
||||
inline const struct nf_nat_l3proto *
|
||||
__nf_nat_l3proto_find(u8 family)
|
||||
|
@ -193,9 +193,12 @@ static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
|
|||
const struct nf_nat_conn_key *key = arg->key;
|
||||
const struct nf_conn *ct = obj;
|
||||
|
||||
return same_src(ct, key->tuple) &&
|
||||
net_eq(nf_ct_net(ct), key->net) &&
|
||||
nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL);
|
||||
if (!same_src(ct, key->tuple) ||
|
||||
!net_eq(nf_ct_net(ct), key->net) ||
|
||||
!nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct rhashtable_params nf_nat_bysource_params = {
|
||||
|
@ -204,7 +207,6 @@ static struct rhashtable_params nf_nat_bysource_params = {
|
|||
.obj_cmpfn = nf_nat_bysource_cmp,
|
||||
.nelem_hint = 256,
|
||||
.min_size = 1024,
|
||||
.nulls_base = (1U << RHT_BASE_SHIFT),
|
||||
};
|
||||
|
||||
/* Only called for SRC manip */
|
||||
|
@ -223,12 +225,15 @@ find_appropriate_src(struct net *net,
|
|||
.tuple = tuple,
|
||||
.zone = zone
|
||||
};
|
||||
struct rhlist_head *hl;
|
||||
|
||||
ct = rhashtable_lookup_fast(&nf_nat_bysource_table, &key,
|
||||
nf_nat_bysource_params);
|
||||
if (!ct)
|
||||
hl = rhltable_lookup(&nf_nat_bysource_table, &key,
|
||||
nf_nat_bysource_params);
|
||||
if (!hl)
|
||||
return 0;
|
||||
|
||||
ct = container_of(hl, typeof(*ct), nat_bysource);
|
||||
|
||||
nf_ct_invert_tuplepr(result,
|
||||
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
result->dst = tuple->dst;
|
||||
|
@ -446,11 +451,17 @@ nf_nat_setup_info(struct nf_conn *ct,
|
|||
}
|
||||
|
||||
if (maniptype == NF_NAT_MANIP_SRC) {
|
||||
struct nf_nat_conn_key key = {
|
||||
.net = nf_ct_net(ct),
|
||||
.tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
||||
.zone = nf_ct_zone(ct),
|
||||
};
|
||||
int err;
|
||||
|
||||
err = rhashtable_insert_fast(&nf_nat_bysource_table,
|
||||
&ct->nat_bysource,
|
||||
nf_nat_bysource_params);
|
||||
err = rhltable_insert_key(&nf_nat_bysource_table,
|
||||
&key,
|
||||
&ct->nat_bysource,
|
||||
nf_nat_bysource_params);
|
||||
if (err)
|
||||
return NF_DROP;
|
||||
}
|
||||
|
@ -567,8 +578,8 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
|
|||
* will delete entry from already-freed table.
|
||||
*/
|
||||
ct->status &= ~IPS_NAT_DONE_MASK;
|
||||
rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
|
||||
nf_nat_bysource_params);
|
||||
rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
|
||||
nf_nat_bysource_params);
|
||||
|
||||
/* don't delete conntrack. Although that would make things a lot
|
||||
* simpler, we'd end up flushing all conntracks on nat rmmod.
|
||||
|
@ -698,8 +709,8 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
|
|||
if (!nat)
|
||||
return;
|
||||
|
||||
rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
|
||||
nf_nat_bysource_params);
|
||||
rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
|
||||
nf_nat_bysource_params);
|
||||
}
|
||||
|
||||
static struct nf_ct_ext_type nat_extend __read_mostly = {
|
||||
|
@ -834,13 +845,13 @@ static int __init nf_nat_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = rhashtable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
|
||||
ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nf_ct_extend_register(&nat_extend);
|
||||
if (ret < 0) {
|
||||
rhashtable_destroy(&nf_nat_bysource_table);
|
||||
rhltable_destroy(&nf_nat_bysource_table);
|
||||
printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
|
||||
return ret;
|
||||
}
|
||||
|
@ -864,7 +875,7 @@ static int __init nf_nat_init(void)
|
|||
return 0;
|
||||
|
||||
cleanup_extend:
|
||||
rhashtable_destroy(&nf_nat_bysource_table);
|
||||
rhltable_destroy(&nf_nat_bysource_table);
|
||||
nf_ct_extend_unregister(&nat_extend);
|
||||
return ret;
|
||||
}
|
||||
|
@ -883,7 +894,7 @@ static void __exit nf_nat_cleanup(void)
|
|||
for (i = 0; i < NFPROTO_NUMPROTO; i++)
|
||||
kfree(nf_nat_l4protos[i]);
|
||||
|
||||
rhashtable_destroy(&nf_nat_bysource_table);
|
||||
rhltable_destroy(&nf_nat_bysource_table);
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -2570,7 +2570,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
|
|||
}
|
||||
|
||||
if (set->timeout &&
|
||||
nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout),
|
||||
nla_put_be64(skb, NFTA_SET_TIMEOUT,
|
||||
cpu_to_be64(jiffies_to_msecs(set->timeout)),
|
||||
NFTA_SET_PAD))
|
||||
goto nla_put_failure;
|
||||
if (set->gc_int &&
|
||||
|
@ -2859,7 +2860,8 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
if (nla[NFTA_SET_TIMEOUT] != NULL) {
|
||||
if (!(flags & NFT_SET_TIMEOUT))
|
||||
return -EINVAL;
|
||||
timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_TIMEOUT]));
|
||||
timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
|
||||
nla[NFTA_SET_TIMEOUT])));
|
||||
}
|
||||
gc_int = 0;
|
||||
if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
|
||||
|
@ -3178,7 +3180,8 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
|
|||
|
||||
if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
|
||||
nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
|
||||
cpu_to_be64(*nft_set_ext_timeout(ext)),
|
||||
cpu_to_be64(jiffies_to_msecs(
|
||||
*nft_set_ext_timeout(ext))),
|
||||
NFTA_SET_ELEM_PAD))
|
||||
goto nla_put_failure;
|
||||
|
||||
|
@ -3447,7 +3450,7 @@ void *nft_set_elem_init(const struct nft_set *set,
|
|||
memcpy(nft_set_ext_data(ext), data, set->dlen);
|
||||
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION))
|
||||
*nft_set_ext_expiration(ext) =
|
||||
jiffies + msecs_to_jiffies(timeout);
|
||||
jiffies + timeout;
|
||||
if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
|
||||
*nft_set_ext_timeout(ext) = timeout;
|
||||
|
||||
|
@ -3535,7 +3538,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
|
||||
if (!(set->flags & NFT_SET_TIMEOUT))
|
||||
return -EINVAL;
|
||||
timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_ELEM_TIMEOUT]));
|
||||
timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
|
||||
nla[NFTA_SET_ELEM_TIMEOUT])));
|
||||
} else if (set->flags & NFT_SET_TIMEOUT) {
|
||||
timeout = set->timeout;
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@ static int nft_hash_init(const struct nft_ctx *ctx,
|
|||
{
|
||||
struct nft_hash *priv = nft_expr_priv(expr);
|
||||
u32 len;
|
||||
int err;
|
||||
|
||||
if (!tb[NFTA_HASH_SREG] ||
|
||||
!tb[NFTA_HASH_DREG] ||
|
||||
|
@ -67,8 +68,10 @@ static int nft_hash_init(const struct nft_ctx *ctx,
|
|||
priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]);
|
||||
priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
|
||||
|
||||
len = ntohl(nla_get_be32(tb[NFTA_HASH_LEN]));
|
||||
if (len == 0 || len > U8_MAX)
|
||||
err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (len == 0)
|
||||
return -ERANGE;
|
||||
|
||||
priv->len = len;
|
||||
|
|
|
@ -59,6 +59,12 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
|
|||
int err;
|
||||
u32 op;
|
||||
|
||||
if (!tb[NFTA_RANGE_SREG] ||
|
||||
!tb[NFTA_RANGE_OP] ||
|
||||
!tb[NFTA_RANGE_FROM_DATA] ||
|
||||
!tb[NFTA_RANGE_TO_DATA])
|
||||
return -EINVAL;
|
||||
|
||||
err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
|
||||
&desc_from, tb[NFTA_RANGE_FROM_DATA]);
|
||||
if (err < 0)
|
||||
|
|
Loading…
Reference in a new issue