Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next
Steffen Klassert says: ==================== pull request (net-next): ipsec-next 2015-10-30 1) The flow cache is limited by the flow cache limit which depends on the number of cpus and the xfrm garbage collector threshold which is independent of the number of cpus. This leads to the fact that on systems with more than 16 cpus we hit the xfrm garbage collector limit and refuse new allocations, so new flows are dropped. On systems with 16 or less cpus, we hit the flowcache limit. In this case, we shrink the flow cache instead of refusing new flows. We increase the xfrm garbage collector threshold to INT_MAX to get the same behaviour, independent of the number of cpus. 2) Fix some unaligned accesses on sparc systems. From Sowmini Varadhan. 3) Fix some header checks in _decode_session4. We may call pskb_may_pull with a negative value converted to unsigened int from pskb_may_pull. This can lead to incorrect policy lookups. We fix this by a check of the data pointer position before we call pskb_may_pull. 4) Reload skb header pointers after calling pskb_may_pull in _decode_session4 as this may change the pointers into the packet. 5) Add a missing statistic counter on inner mode errors. Please pull or let me know if there are problems. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e7b63ff115
5 changed files with 49 additions and 18 deletions
|
@ -1216,7 +1216,8 @@ tag - INTEGER
|
|||
xfrm4_gc_thresh - INTEGER
|
||||
The threshold at which we will start garbage collecting for IPv4
|
||||
destination cache entries. At twice this value the system will
|
||||
refuse new allocations.
|
||||
refuse new allocations. The value must be set below the flowcache
|
||||
limit (4096 * number of online cpus) to take effect.
|
||||
|
||||
igmp_link_local_mcast_reports - BOOLEAN
|
||||
Enable IGMP reports for link local multicast groups in the
|
||||
|
@ -1662,7 +1663,8 @@ ratelimit - INTEGER
|
|||
xfrm6_gc_thresh - INTEGER
|
||||
The threshold at which we will start garbage collecting for IPv6
|
||||
destination cache entries. At twice this value the system will
|
||||
refuse new allocations.
|
||||
refuse new allocations. The value must be set below the flowcache
|
||||
limit (4096 * number of online cpus) to take effect.
|
||||
|
||||
|
||||
IPv6 Update by:
|
||||
|
|
|
@ -127,7 +127,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
|||
case IPPROTO_DCCP:
|
||||
if (xprth + 4 < skb->data ||
|
||||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be16 *ports = (__be16 *)xprth;
|
||||
__be16 *ports;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
ports = (__be16 *)xprth;
|
||||
|
||||
fl4->fl4_sport = ports[!!reverse];
|
||||
fl4->fl4_dport = ports[!reverse];
|
||||
|
@ -135,8 +138,12 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
|||
break;
|
||||
|
||||
case IPPROTO_ICMP:
|
||||
if (pskb_may_pull(skb, xprth + 2 - skb->data)) {
|
||||
u8 *icmp = xprth;
|
||||
if (xprth + 2 < skb->data ||
|
||||
pskb_may_pull(skb, xprth + 2 - skb->data)) {
|
||||
u8 *icmp;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
icmp = xprth;
|
||||
|
||||
fl4->fl4_icmp_type = icmp[0];
|
||||
fl4->fl4_icmp_code = icmp[1];
|
||||
|
@ -144,33 +151,50 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
|||
break;
|
||||
|
||||
case IPPROTO_ESP:
|
||||
if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be32 *ehdr = (__be32 *)xprth;
|
||||
if (xprth + 4 < skb->data ||
|
||||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be32 *ehdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
ehdr = (__be32 *)xprth;
|
||||
|
||||
fl4->fl4_ipsec_spi = ehdr[0];
|
||||
}
|
||||
break;
|
||||
|
||||
case IPPROTO_AH:
|
||||
if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
|
||||
__be32 *ah_hdr = (__be32 *)xprth;
|
||||
if (xprth + 8 < skb->data ||
|
||||
pskb_may_pull(skb, xprth + 8 - skb->data)) {
|
||||
__be32 *ah_hdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
ah_hdr = (__be32 *)xprth;
|
||||
|
||||
fl4->fl4_ipsec_spi = ah_hdr[1];
|
||||
}
|
||||
break;
|
||||
|
||||
case IPPROTO_COMP:
|
||||
if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be16 *ipcomp_hdr = (__be16 *)xprth;
|
||||
if (xprth + 4 < skb->data ||
|
||||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be16 *ipcomp_hdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
ipcomp_hdr = (__be16 *)xprth;
|
||||
|
||||
fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
|
||||
}
|
||||
break;
|
||||
|
||||
case IPPROTO_GRE:
|
||||
if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
|
||||
__be16 *greflags = (__be16 *)xprth;
|
||||
__be32 *gre_hdr = (__be32 *)xprth;
|
||||
if (xprth + 12 < skb->data ||
|
||||
pskb_may_pull(skb, xprth + 12 - skb->data)) {
|
||||
__be16 *greflags;
|
||||
__be32 *gre_hdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
greflags = (__be16 *)xprth;
|
||||
gre_hdr = (__be32 *)xprth;
|
||||
|
||||
if (greflags[0] & GRE_KEY) {
|
||||
if (greflags[0] & GRE_CSUM)
|
||||
|
@ -244,7 +268,7 @@ static struct dst_ops xfrm4_dst_ops = {
|
|||
.destroy = xfrm4_dst_destroy,
|
||||
.ifdown = xfrm4_dst_ifdown,
|
||||
.local_out = __ip_local_out,
|
||||
.gc_thresh = 32768,
|
||||
.gc_thresh = INT_MAX,
|
||||
};
|
||||
|
||||
static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
|
||||
|
|
|
@ -288,7 +288,7 @@ static struct dst_ops xfrm6_dst_ops = {
|
|||
.destroy = xfrm6_dst_destroy,
|
||||
.ifdown = xfrm6_dst_ifdown,
|
||||
.local_out = __ip6_local_out,
|
||||
.gc_thresh = 32768,
|
||||
.gc_thresh = INT_MAX,
|
||||
};
|
||||
|
||||
static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
|
||||
|
|
|
@ -330,8 +330,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
|
|||
|
||||
if (x->sel.family == AF_UNSPEC) {
|
||||
inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
|
||||
if (inner_mode == NULL)
|
||||
if (inner_mode == NULL) {
|
||||
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
|
||||
if (inner_mode->input(x, skb)) {
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
#include <linux/in6.h>
|
||||
#endif
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
|
||||
{
|
||||
|
@ -728,7 +729,9 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
|
|||
memcpy(&p->sel, &x->sel, sizeof(p->sel));
|
||||
memcpy(&p->lft, &x->lft, sizeof(p->lft));
|
||||
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
|
||||
memcpy(&p->stats, &x->stats, sizeof(p->stats));
|
||||
put_unaligned(x->stats.replay_window, &p->stats.replay_window);
|
||||
put_unaligned(x->stats.replay, &p->stats.replay);
|
||||
put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
|
||||
memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
|
||||
p->mode = x->props.mode;
|
||||
p->replay_window = x->props.replay_window;
|
||||
|
|
Loading…
Reference in a new issue