Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: [NET]: Fix possible dev_deactivate race condition [INET]: Justification for local port range robustness. [PACKET]: Kill unused pg_vec_endpage() function [NET]: QoS/Sched as menuconfig [NET]: Fix bug in sk_filter race cures. [PATCH] mac80211: make ieee802_11_parse_elems return void
This commit is contained in:
commit
804b908adf
11 changed files with 48 additions and 71 deletions
|
@ -2797,11 +2797,12 @@ static void cma_remove_one(struct ib_device *device)
|
|||
|
||||
static int cma_init(void)
|
||||
{
|
||||
int ret, low, high;
|
||||
int ret, low, high, remaining;
|
||||
|
||||
get_random_bytes(&next_port, sizeof next_port);
|
||||
inet_get_local_port_range(&low, &high);
|
||||
next_port = ((unsigned int) next_port % (high - low)) + low;
|
||||
remaining = (high - low) + 1;
|
||||
next_port = ((unsigned int) next_port % remaining) + low;
|
||||
|
||||
cma_wq = create_singlethread_workqueue("rdma_cm");
|
||||
if (!cma_wq)
|
||||
|
|
|
@ -447,7 +447,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
|||
rcu_assign_pointer(sk->sk_filter, fp);
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
sk_filter_delayed_uncharge(sk, old_fp);
|
||||
if (old_fp)
|
||||
sk_filter_delayed_uncharge(sk, old_fp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo,
|
|||
int remaining, rover, low, high;
|
||||
|
||||
inet_get_local_port_range(&low, &high);
|
||||
remaining = high - low;
|
||||
remaining = (high - low) + 1;
|
||||
rover = net_random() % remaining + low;
|
||||
|
||||
do {
|
||||
|
|
|
@ -286,7 +286,7 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row,
|
|||
struct inet_timewait_sock *tw = NULL;
|
||||
|
||||
inet_get_local_port_range(&low, &high);
|
||||
remaining = high - low;
|
||||
remaining = (high - low) + 1;
|
||||
|
||||
local_bh_disable();
|
||||
for (i = 1; i <= remaining; i++) {
|
||||
|
|
|
@ -122,7 +122,7 @@ static int ipv4_local_port_range(ctl_table *table, int write, struct file *filp,
|
|||
ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos);
|
||||
|
||||
if (write && ret == 0) {
|
||||
if (range[1] <= range[0])
|
||||
if (range[1] < range[0])
|
||||
ret = -EINVAL;
|
||||
else
|
||||
set_local_port_range(range);
|
||||
|
@ -150,7 +150,7 @@ static int ipv4_sysctl_local_port_range(ctl_table *table, int __user *name,
|
|||
|
||||
ret = sysctl_intvec(&tmp, name, nlen, oldval, oldlenp, newval, newlen);
|
||||
if (ret == 0 && newval && newlen) {
|
||||
if (range[1] <= range[0])
|
||||
if (range[1] < range[0])
|
||||
ret = -EINVAL;
|
||||
else
|
||||
set_local_port_range(range);
|
||||
|
|
|
@ -147,13 +147,14 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
|
|||
write_lock_bh(&udp_hash_lock);
|
||||
|
||||
if (!snum) {
|
||||
int i, low, high;
|
||||
int i, low, high, remaining;
|
||||
unsigned rover, best, best_size_so_far;
|
||||
|
||||
inet_get_local_port_range(&low, &high);
|
||||
remaining = (high - low) + 1;
|
||||
|
||||
best_size_so_far = UINT_MAX;
|
||||
best = rover = net_random() % (high - low) + low;
|
||||
best = rover = net_random() % remaining + low;
|
||||
|
||||
/* 1st pass: look for empty (or shortest) hash chain */
|
||||
for (i = 0; i < UDP_HTABLE_SIZE; i++) {
|
||||
|
|
|
@ -261,7 +261,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
|
|||
struct inet_timewait_sock *tw = NULL;
|
||||
|
||||
inet_get_local_port_range(&low, &high);
|
||||
remaining = high - low;
|
||||
remaining = (high - low) + 1;
|
||||
|
||||
local_bh_disable();
|
||||
for (i = 1; i <= remaining; i++) {
|
||||
|
|
|
@ -108,14 +108,11 @@ struct ieee802_11_elems {
|
|||
u8 wmm_param_len;
|
||||
};
|
||||
|
||||
enum ParseRes { ParseOK = 0, ParseUnknown = 1, ParseFailed = -1 };
|
||||
|
||||
static enum ParseRes ieee802_11_parse_elems(u8 *start, size_t len,
|
||||
struct ieee802_11_elems *elems)
|
||||
static void ieee802_11_parse_elems(u8 *start, size_t len,
|
||||
struct ieee802_11_elems *elems)
|
||||
{
|
||||
size_t left = len;
|
||||
u8 *pos = start;
|
||||
int unknown = 0;
|
||||
|
||||
memset(elems, 0, sizeof(*elems));
|
||||
|
||||
|
@ -126,15 +123,8 @@ static enum ParseRes ieee802_11_parse_elems(u8 *start, size_t len,
|
|||
elen = *pos++;
|
||||
left -= 2;
|
||||
|
||||
if (elen > left) {
|
||||
#if 0
|
||||
if (net_ratelimit())
|
||||
printk(KERN_DEBUG "IEEE 802.11 element parse "
|
||||
"failed (id=%d elen=%d left=%d)\n",
|
||||
id, elen, left);
|
||||
#endif
|
||||
return ParseFailed;
|
||||
}
|
||||
if (elen > left)
|
||||
return;
|
||||
|
||||
switch (id) {
|
||||
case WLAN_EID_SSID:
|
||||
|
@ -201,28 +191,15 @@ static enum ParseRes ieee802_11_parse_elems(u8 *start, size_t len,
|
|||
elems->ext_supp_rates_len = elen;
|
||||
break;
|
||||
default:
|
||||
#if 0
|
||||
printk(KERN_DEBUG "IEEE 802.11 element parse ignored "
|
||||
"unknown element (id=%d elen=%d)\n",
|
||||
id, elen);
|
||||
#endif
|
||||
unknown++;
|
||||
break;
|
||||
}
|
||||
|
||||
left -= elen;
|
||||
pos += elen;
|
||||
}
|
||||
|
||||
/* Do not trigger error if left == 1 as Apple Airport base stations
|
||||
* send AssocResps that are one spurious byte too long. */
|
||||
|
||||
return unknown ? ParseUnknown : ParseOK;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
static int ecw2cw(int ecw)
|
||||
{
|
||||
int cw = 1;
|
||||
|
@ -931,12 +908,7 @@ static void ieee80211_auth_challenge(struct net_device *dev,
|
|||
|
||||
printk(KERN_DEBUG "%s: replying to auth challenge\n", dev->name);
|
||||
pos = mgmt->u.auth.variable;
|
||||
if (ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems)
|
||||
== ParseFailed) {
|
||||
printk(KERN_DEBUG "%s: failed to parse Auth(challenge)\n",
|
||||
dev->name);
|
||||
return;
|
||||
}
|
||||
ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
|
||||
if (!elems.challenge) {
|
||||
printk(KERN_DEBUG "%s: no challenge IE in shared key auth "
|
||||
"frame\n", dev->name);
|
||||
|
@ -1230,12 +1202,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev,
|
|||
aid &= ~(BIT(15) | BIT(14));
|
||||
|
||||
pos = mgmt->u.assoc_resp.variable;
|
||||
if (ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems)
|
||||
== ParseFailed) {
|
||||
printk(KERN_DEBUG "%s: failed to parse AssocResp\n",
|
||||
dev->name);
|
||||
return;
|
||||
}
|
||||
ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
|
||||
|
||||
if (!elems.supp_rates) {
|
||||
printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
|
||||
|
@ -1459,7 +1426,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
|
|||
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
|
||||
struct ieee802_11_elems elems;
|
||||
size_t baselen;
|
||||
int channel, invalid = 0, clen;
|
||||
int channel, clen;
|
||||
struct ieee80211_sta_bss *bss;
|
||||
struct sta_info *sta;
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
|
@ -1505,9 +1472,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
|
|||
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
|
||||
}
|
||||
|
||||
if (ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen,
|
||||
&elems) == ParseFailed)
|
||||
invalid = 1;
|
||||
ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
|
||||
|
||||
if (sdata->type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates &&
|
||||
memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 &&
|
||||
|
@ -1724,9 +1689,7 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
|
|||
if (baselen > len)
|
||||
return;
|
||||
|
||||
if (ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen,
|
||||
&elems) == ParseFailed)
|
||||
return;
|
||||
ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
|
||||
|
||||
if (elems.erp_info && elems.erp_info_len >= 1)
|
||||
ieee80211_handle_erp_ie(dev, elems.erp_info[0]);
|
||||
|
|
|
@ -1623,11 +1623,6 @@ static struct vm_operations_struct packet_mmap_ops = {
|
|||
.close =packet_mm_close,
|
||||
};
|
||||
|
||||
static inline struct page *pg_vec_endpage(char *one_pg_vec, unsigned int order)
|
||||
{
|
||||
return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1);
|
||||
}
|
||||
|
||||
static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -2,9 +2,7 @@
|
|||
# Traffic control configuration.
|
||||
#
|
||||
|
||||
menu "QoS and/or fair queueing"
|
||||
|
||||
config NET_SCHED
|
||||
menuconfig NET_SCHED
|
||||
bool "QoS and/or fair queueing"
|
||||
select NET_SCH_FIFO
|
||||
---help---
|
||||
|
@ -41,9 +39,6 @@ config NET_SCHED
|
|||
The available schedulers are listed in the following questions; you
|
||||
can say Y to as many as you like. If unsure, say N now.
|
||||
|
||||
config NET_SCH_FIFO
|
||||
bool
|
||||
|
||||
if NET_SCHED
|
||||
|
||||
comment "Queueing/Scheduling"
|
||||
|
@ -500,4 +495,5 @@ config NET_CLS_IND
|
|||
|
||||
endif # NET_SCHED
|
||||
|
||||
endmenu
|
||||
config NET_SCH_FIFO
|
||||
bool
|
||||
|
|
|
@ -556,6 +556,7 @@ void dev_deactivate(struct net_device *dev)
|
|||
{
|
||||
struct Qdisc *qdisc;
|
||||
struct sk_buff *skb;
|
||||
int running;
|
||||
|
||||
spin_lock_bh(&dev->queue_lock);
|
||||
qdisc = dev->qdisc;
|
||||
|
@ -571,12 +572,31 @@ void dev_deactivate(struct net_device *dev)
|
|||
|
||||
dev_watchdog_down(dev);
|
||||
|
||||
/* Wait for outstanding dev_queue_xmit calls. */
|
||||
/* Wait for outstanding qdisc-less dev_queue_xmit calls. */
|
||||
synchronize_rcu();
|
||||
|
||||
/* Wait for outstanding qdisc_run calls. */
|
||||
while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
|
||||
yield();
|
||||
do {
|
||||
while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
|
||||
yield();
|
||||
|
||||
/*
|
||||
* Double-check inside queue lock to ensure that all effects
|
||||
* of the queue run are visible when we return.
|
||||
*/
|
||||
spin_lock_bh(&dev->queue_lock);
|
||||
running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
|
||||
spin_unlock_bh(&dev->queue_lock);
|
||||
|
||||
/*
|
||||
* The running flag should never be set at this point because
|
||||
* we've already set dev->qdisc to noop_qdisc *inside* the same
|
||||
* pair of spin locks. That is, if any qdisc_run starts after
|
||||
* our initial test it should see the noop_qdisc and then
|
||||
* clear the RUNNING bit before dropping the queue lock. So
|
||||
* if it is set here then we've found a bug.
|
||||
*/
|
||||
} while (WARN_ON_ONCE(running));
|
||||
}
|
||||
|
||||
void dev_init_scheduler(struct net_device *dev)
|
||||
|
|
Loading…
Reference in a new issue