Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "Several bug fixes, some to new features appearing in this merge
  window, some that have been around for a while.

  I have a short list of known problems that need to be sorted out, but
  all of them can be solved easily during the run up to 3.6-final.

  I'll be offline until Sunday afternoon, but nothing need hold up
  3.6-rc1 and the close of the merge window, networking wise, at this
  point.

  1) Fix interface check in ipv4 TCP early demux, from Eric Dumazet.

  2) Fix a long standing bug in TCP DMA to userspace offload that can
     hang applications using MSG_TRUNC, from Jiri Kosina.

  3) Don't allow TCP_USER_TIMEOUT to be negative, from Hangbin Liu.

  4) Don't use GFP_KERNEL under spinlock in kaweth driver, from Dan
     Carpenter"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  tcp: perform DMA to userspace only if there is a task waiting for it
  Revert "openvswitch: potential NULL deref in sample()"
  ipv4: fix TCP early demux
  net: fix rtnetlink IFF_PROMISC and IFF_ALLMULTI handling
  USB: kaweth.c: use GFP_ATOMIC under spin_lock
  tcp: Add TCP_USER_TIMEOUT negative value check
  bcma: add missing iounmap on error path
  bcma: fix regression in interrupt assignment on mips
  mac80211_hwsim: fix possible race condition in usage of info->control.sta & control.vif
This commit is contained in:
Linus Torvalds 2012-07-28 06:00:39 -07:00
commit f7da9cdf45
10 changed files with 36 additions and 28 deletions

View file

@ -131,7 +131,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
/* backplane irq line is in use, find out who uses /* backplane irq line is in use, find out who uses
* it and set user to irq 0 * it and set user to irq 0
*/ */
list_for_each_entry_reverse(core, &bus->cores, list) { list_for_each_entry(core, &bus->cores, list) {
if ((1 << bcma_core_mips_irqflag(core)) == if ((1 << bcma_core_mips_irqflag(core)) ==
oldirqflag) { oldirqflag) {
bcma_core_mips_set_irq(core, 0); bcma_core_mips_set_irq(core, 0);
@ -161,7 +161,7 @@ static void bcma_core_mips_dump_irq(struct bcma_bus *bus)
{ {
struct bcma_device *core; struct bcma_device *core;
list_for_each_entry_reverse(core, &bus->cores, list) { list_for_each_entry(core, &bus->cores, list) {
bcma_core_mips_print_irq(core, bcma_core_mips_irq(core)); bcma_core_mips_print_irq(core, bcma_core_mips_irq(core));
} }
} }
@ -224,7 +224,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
mcore->assigned_irqs = 1; mcore->assigned_irqs = 1;
/* Assign IRQs to all cores on the bus */ /* Assign IRQs to all cores on the bus */
list_for_each_entry_reverse(core, &bus->cores, list) { list_for_each_entry(core, &bus->cores, list) {
int mips_irq; int mips_irq;
if (core->irq) if (core->irq)
continue; continue;

View file

@ -462,8 +462,10 @@ int bcma_bus_scan(struct bcma_bus *bus)
while (eromptr < eromend) { while (eromptr < eromend) {
struct bcma_device *other_core; struct bcma_device *other_core;
struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL); struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL);
if (!core) if (!core) {
return -ENOMEM; err = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&core->list); INIT_LIST_HEAD(&core->list);
core->bus = bus; core->bus = bus;
@ -478,7 +480,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
} else if (err == -ESPIPE) { } else if (err == -ESPIPE) {
break; break;
} }
return err; goto out;
} }
core->core_index = core_num++; core->core_index = core_num++;
@ -494,10 +496,12 @@ int bcma_bus_scan(struct bcma_bus *bus)
list_add_tail(&core->list, &bus->cores); list_add_tail(&core->list, &bus->cores);
} }
err = 0;
out:
if (bus->hosttype == BCMA_HOSTTYPE_SOC) if (bus->hosttype == BCMA_HOSTTYPE_SOC)
iounmap(eromptr); iounmap(eromptr);
return 0; return err;
} }
int __init bcma_bus_scan_early(struct bcma_bus *bus, int __init bcma_bus_scan_early(struct bcma_bus *bus,
@ -537,7 +541,7 @@ int __init bcma_bus_scan_early(struct bcma_bus *bus,
else if (err == -ESPIPE) else if (err == -ESPIPE)
break; break;
else if (err < 0) else if (err < 0)
return err; goto out;
core->core_index = core_num++; core->core_index = core_num++;
bus->nr_cores++; bus->nr_cores++;
@ -551,6 +555,7 @@ int __init bcma_bus_scan_early(struct bcma_bus *bus,
break; break;
} }
out:
if (bus->hosttype == BCMA_HOSTTYPE_SOC) if (bus->hosttype == BCMA_HOSTTYPE_SOC)
iounmap(eromptr); iounmap(eromptr);

View file

@ -1314,7 +1314,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
int retv; int retv;
int length = 0; /* shut up GCC */ int length = 0; /* shut up GCC */
urb = usb_alloc_urb(0, GFP_NOIO); urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) if (!urb)
return -ENOMEM; return -ENOMEM;

View file

@ -739,11 +739,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
txi = IEEE80211_SKB_CB(skb); txi = IEEE80211_SKB_CB(skb);
if (txi->control.vif)
hwsim_check_magic(txi->control.vif);
if (txi->control.sta)
hwsim_check_sta_magic(txi->control.sta);
ieee80211_tx_info_clear_status(txi); ieee80211_tx_info_clear_status(txi);
/* frame was transmitted at most favorable rate at first attempt */ /* frame was transmitted at most favorable rate at first attempt */

View file

@ -659,6 +659,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
} }
} }
static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
{
return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
(dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
}
static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
const struct ifinfomsg *ifm) const struct ifinfomsg *ifm)
{ {
@ -667,7 +673,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
/* bugwards compatibility: ifi_change == 0 is treated as ~0 */ /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
if (ifm->ifi_change) if (ifm->ifi_change)
flags = (flags & ifm->ifi_change) | flags = (flags & ifm->ifi_change) |
(dev->flags & ~ifm->ifi_change); (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
return flags; return flags;
} }

View file

@ -2681,6 +2681,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
/* Cap the max timeout in ms TCP will retry/retrans /* Cap the max timeout in ms TCP will retry/retrans
* before giving up and aborting (ETIMEDOUT) a connection. * before giving up and aborting (ETIMEDOUT) a connection.
*/ */
if (val < 0)
err = -EINVAL;
else
icsk->icsk_user_timeout = msecs_to_jiffies(val); icsk->icsk_user_timeout = msecs_to_jiffies(val);
break; break;
default: default:

View file

@ -5475,7 +5475,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (tp->copied_seq == tp->rcv_nxt && if (tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len) { len - tcp_header_len <= tp->ucopy.len) {
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { if (tp->ucopy.task == current &&
sock_owned_by_user(sk) &&
tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
copied_early = 1; copied_early = 1;
eaten = 1; eaten = 1;
} }
@ -5603,6 +5605,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
if (skb != NULL) { if (skb != NULL) {
sk->sk_rx_dst = dst_clone(skb_dst(skb)); sk->sk_rx_dst = dst_clone(skb_dst(skb));
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
security_inet_conn_established(sk, skb); security_inet_conn_established(sk, skb);
} }

View file

@ -1620,17 +1620,15 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
sock_rps_save_rxhash(sk, skb); sock_rps_save_rxhash(sk, skb);
if (sk->sk_rx_dst) { if (sk->sk_rx_dst) {
struct dst_entry *dst = sk->sk_rx_dst; struct dst_entry *dst = sk->sk_rx_dst;
if (dst->ops->check(dst, 0) == NULL) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
dst->ops->check(dst, 0) == NULL) {
dst_release(dst); dst_release(dst);
sk->sk_rx_dst = NULL; sk->sk_rx_dst = NULL;
} }
} }
if (unlikely(sk->sk_rx_dst == NULL)) { if (unlikely(sk->sk_rx_dst == NULL)) {
struct inet_sock *icsk = inet_sk(sk); sk->sk_rx_dst = dst_clone(skb_dst(skb));
struct rtable *rt = skb_rtable(skb); inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
sk->sk_rx_dst = dst_clone(&rt->dst);
icsk->rx_dst_ifindex = inet_iif(skb);
} }
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk; rsk = sk;
@ -1709,11 +1707,11 @@ void tcp_v4_early_demux(struct sk_buff *skb)
skb->destructor = sock_edemux; skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) { if (sk->sk_state != TCP_TIME_WAIT) {
struct dst_entry *dst = sk->sk_rx_dst; struct dst_entry *dst = sk->sk_rx_dst;
struct inet_sock *icsk = inet_sk(sk);
if (dst) if (dst)
dst = dst_check(dst, 0); dst = dst_check(dst, 0);
if (dst && if (dst &&
icsk->rx_dst_ifindex == skb->skb_iif) inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
skb_dst_set_noref(skb, dst); skb_dst_set_noref(skb, dst);
} }
} }

View file

@ -388,6 +388,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
struct tcp_cookie_values *oldcvp = oldtp->cookie_values; struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
newsk->sk_rx_dst = dst_clone(skb_dst(skb)); newsk->sk_rx_dst = dst_clone(skb_dst(skb));
inet_sk(newsk)->rx_dst_ifindex = skb->skb_iif;
/* TCP Cookie Transactions require space for the cookie pair, /* TCP Cookie Transactions require space for the cookie pair,
* as it differs for each connection. There is no need to * as it differs for each connection. There is no need to

View file

@ -325,9 +325,6 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
} }
} }
if (!acts_list)
return 0;
return do_execute_actions(dp, skb, nla_data(acts_list), return do_execute_actions(dp, skb, nla_data(acts_list),
nla_len(acts_list), true); nla_len(acts_list), true);
} }