Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (21 commits) slip: fix wrong SLIP6 ifdef-endif placing natsemi: fix another dma-debug report sctp: ABORT if receive, reassmbly, or reodering queue is not empty while closing socket net: Fix default in docs for tcp_orphan_retries. hso: fix a use after free condition net/natsemi: Fix module parameter permissions XFRM: Fix memory leak in xfrm_state_update sctp: Enforce retransmission limit during shutdown mac80211: fix TKIP replay vulnerability mac80211: fix ie memory allocation for scheduled scans ssb: fix init regression of hostmode PCI core rtlwifi: rtl8192cu: Add new USB ID for Netgear WNA1000M ath9k: Fix tx throughput drops for AR9003 chips with AES encryption carl9170: add NEC WL300NU-AG usbid cfg80211: fix deadlock with rfkill/sched_scan by adding new mutex ath5k: fix incorrect use of drvdata in PCI suspend/resume code ath5k: fix incorrect use of drvdata in sysfs code Bluetooth: Fix memory leak under page timeouts Bluetooth: Fix regression with incoming L2CAP connections Bluetooth: Fix hidp disconnect deadlocks and lost wakeup ...
This commit is contained in:
commit
5d7d5d9332
29 changed files with 187 additions and 72 deletions
|
@ -346,7 +346,7 @@ tcp_orphan_retries - INTEGER
|
|||
when RTO retransmissions remain unacknowledged.
|
||||
See tcp_retries2 for more details.
|
||||
|
||||
The default value is 7.
|
||||
The default value is 8.
|
||||
If your machine is a loaded WEB server,
|
||||
you should think about lowering this value, such sockets
|
||||
may consume significant resources. Cf. tcp_max_orphans.
|
||||
|
|
|
@ -140,7 +140,7 @@ MODULE_LICENSE("GPL");
|
|||
module_param(mtu, int, 0);
|
||||
module_param(debug, int, 0);
|
||||
module_param(rx_copybreak, int, 0);
|
||||
module_param(dspcfg_workaround, int, 1);
|
||||
module_param(dspcfg_workaround, int, 0);
|
||||
module_param_array(options, int, NULL, 0);
|
||||
module_param_array(full_duplex, int, NULL, 0);
|
||||
MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
|
||||
|
@ -2028,8 +2028,8 @@ static void drain_rx(struct net_device *dev)
|
|||
np->rx_ring[i].cmd_status = 0;
|
||||
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
|
||||
if (np->rx_skbuff[i]) {
|
||||
pci_unmap_single(np->pci_dev,
|
||||
np->rx_dma[i], buflen,
|
||||
pci_unmap_single(np->pci_dev, np->rx_dma[i],
|
||||
buflen + NATSEMI_PADDING,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb(np->rx_skbuff[i]);
|
||||
}
|
||||
|
|
|
@ -182,10 +182,10 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
|
|||
#ifdef SL_INCLUDE_CSLIP
|
||||
cbuff = xchg(&sl->cbuff, cbuff);
|
||||
slcomp = xchg(&sl->slcomp, slcomp);
|
||||
#endif
|
||||
#ifdef CONFIG_SLIP_MODE_SLIP6
|
||||
sl->xdata = 0;
|
||||
sl->xbits = 0;
|
||||
#endif
|
||||
#endif
|
||||
spin_unlock_bh(&sl->lock);
|
||||
err = 0;
|
||||
|
|
|
@ -2421,10 +2421,8 @@ static void hso_free_net_device(struct hso_device *hso_dev)
|
|||
|
||||
remove_net_device(hso_net->parent);
|
||||
|
||||
if (hso_net->net) {
|
||||
if (hso_net->net)
|
||||
unregister_netdev(hso_net->net);
|
||||
free_netdev(hso_net->net);
|
||||
}
|
||||
|
||||
/* start freeing */
|
||||
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
|
||||
|
@ -2436,6 +2434,9 @@ static void hso_free_net_device(struct hso_device *hso_dev)
|
|||
kfree(hso_net->mux_bulk_tx_buf);
|
||||
hso_net->mux_bulk_tx_buf = NULL;
|
||||
|
||||
if (hso_net->net)
|
||||
free_netdev(hso_net->net);
|
||||
|
||||
kfree(hso_dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -297,7 +297,9 @@ ath5k_pci_remove(struct pci_dev *pdev)
|
|||
#ifdef CONFIG_PM_SLEEP
|
||||
static int ath5k_pci_suspend(struct device *dev)
|
||||
{
|
||||
struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
|
||||
ath5k_led_off(sc);
|
||||
return 0;
|
||||
|
@ -306,7 +308,8 @@ static int ath5k_pci_suspend(struct device *dev)
|
|||
static int ath5k_pci_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct ath5k_softc *sc = pci_get_drvdata(pdev);
|
||||
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
|
||||
/*
|
||||
* Suspend/Resume resets the PCI configuration space, so we have to
|
||||
|
|
|
@ -10,7 +10,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
|
|||
struct device_attribute *attr, \
|
||||
char *buf) \
|
||||
{ \
|
||||
struct ath5k_softc *sc = dev_get_drvdata(dev); \
|
||||
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||
struct ath5k_softc *sc = hw->priv; \
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
|
||||
} \
|
||||
\
|
||||
|
@ -18,7 +19,8 @@ static ssize_t ath5k_attr_store_##name(struct device *dev, \
|
|||
struct device_attribute *attr, \
|
||||
const char *buf, size_t count) \
|
||||
{ \
|
||||
struct ath5k_softc *sc = dev_get_drvdata(dev); \
|
||||
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||
struct ath5k_softc *sc = hw->priv; \
|
||||
int val; \
|
||||
\
|
||||
val = (int)simple_strtoul(buf, NULL, 10); \
|
||||
|
@ -33,7 +35,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
|
|||
struct device_attribute *attr, \
|
||||
char *buf) \
|
||||
{ \
|
||||
struct ath5k_softc *sc = dev_get_drvdata(dev); \
|
||||
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||
struct ath5k_softc *sc = hw->priv; \
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
|
||||
} \
|
||||
static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
|
||||
|
|
|
@ -671,7 +671,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
|
|||
* TODO - this could be improved to be dependent on the rate.
|
||||
* The hardware can keep up at lower rates, but not higher rates
|
||||
*/
|
||||
if (fi->keyix != ATH9K_TXKEYIX_INVALID)
|
||||
if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
|
||||
!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
|
||||
ndelim += ATH_AGGR_ENCRYPTDELIM;
|
||||
|
||||
/*
|
||||
|
|
|
@ -112,6 +112,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
|
|||
{ USB_DEVICE(0x04bb, 0x093f) },
|
||||
/* NEC WL300NU-G */
|
||||
{ USB_DEVICE(0x0409, 0x0249) },
|
||||
/* NEC WL300NU-AG */
|
||||
{ USB_DEVICE(0x0409, 0x02b4) },
|
||||
/* AVM FRITZ!WLAN USB Stick N */
|
||||
{ USB_DEVICE(0x057c, 0x8401) },
|
||||
/* AVM FRITZ!WLAN USB Stick N 2.4 */
|
||||
|
|
|
@ -298,6 +298,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
|
|||
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
|
||||
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
|
||||
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
|
||||
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
|
||||
{RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
|
||||
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
|
||||
/* HP - Lite-On ,8188CUS Slim Combo */
|
||||
|
|
|
@ -516,8 +516,17 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
|
|||
|
||||
static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
|
||||
{
|
||||
ssb_pcicore_fix_sprom_core_index(pc);
|
||||
|
||||
/* Disable PCI interrupts. */
|
||||
ssb_write32(pc->dev, SSB_INTVEC, 0);
|
||||
|
||||
/* Additional PCIe always once-executed workarounds */
|
||||
if (pc->dev->id.coreid == SSB_DEV_PCIE) {
|
||||
ssb_pcicore_serdes_workaround(pc);
|
||||
/* TODO: ASPM */
|
||||
/* TODO: Clock Request Update */
|
||||
}
|
||||
}
|
||||
|
||||
void ssb_pcicore_init(struct ssb_pcicore *pc)
|
||||
|
@ -529,8 +538,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
|
|||
if (!ssb_device_is_enabled(dev))
|
||||
ssb_device_enable(dev, 0);
|
||||
|
||||
ssb_pcicore_fix_sprom_core_index(pc);
|
||||
|
||||
#ifdef CONFIG_SSB_PCICORE_HOSTMODE
|
||||
pc->hostmode = pcicore_is_in_hostmode(pc);
|
||||
if (pc->hostmode)
|
||||
|
@ -538,13 +545,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
|
|||
#endif /* CONFIG_SSB_PCICORE_HOSTMODE */
|
||||
if (!pc->hostmode)
|
||||
ssb_pcicore_init_clientmode(pc);
|
||||
|
||||
/* Additional PCIe always once-executed workarounds */
|
||||
if (dev->id.coreid == SSB_DEV_PCIE) {
|
||||
ssb_pcicore_serdes_workaround(pc);
|
||||
/* TODO: ASPM */
|
||||
/* TODO: Clock Request Update */
|
||||
}
|
||||
}
|
||||
|
||||
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
|
||||
|
|
|
@ -63,6 +63,7 @@ typedef enum {
|
|||
SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */
|
||||
SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */
|
||||
SCTP_CMD_TIMER_START, /* Start a timer. */
|
||||
SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */
|
||||
SCTP_CMD_TIMER_RESTART, /* Restart a timer. */
|
||||
SCTP_CMD_TIMER_STOP, /* Stop a timer. */
|
||||
SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */
|
||||
|
|
|
@ -80,7 +80,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
|
|||
|
||||
void sctp_ulpevent_free(struct sctp_ulpevent *);
|
||||
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
|
||||
void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
|
||||
unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list);
|
||||
|
||||
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
|
||||
const struct sctp_association *asoc,
|
||||
|
|
|
@ -393,6 +393,9 @@ int hci_conn_del(struct hci_conn *conn)
|
|||
|
||||
hci_dev_put(hdev);
|
||||
|
||||
if (conn->handle == 0)
|
||||
kfree(conn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -464,7 +464,8 @@ static void hidp_idle_timeout(unsigned long arg)
|
|||
{
|
||||
struct hidp_session *session = (struct hidp_session *) arg;
|
||||
|
||||
kthread_stop(session->task);
|
||||
atomic_inc(&session->terminate);
|
||||
wake_up_process(session->task);
|
||||
}
|
||||
|
||||
static void hidp_set_timer(struct hidp_session *session)
|
||||
|
@ -535,7 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session,
|
|||
skb_queue_purge(&session->ctrl_transmit);
|
||||
skb_queue_purge(&session->intr_transmit);
|
||||
|
||||
kthread_stop(session->task);
|
||||
atomic_inc(&session->terminate);
|
||||
wake_up_process(current);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -706,9 +708,8 @@ static int hidp_session(void *arg)
|
|||
add_wait_queue(sk_sleep(intr_sk), &intr_wait);
|
||||
session->waiting_for_startup = 0;
|
||||
wake_up_interruptible(&session->startup_queue);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
while (!atomic_read(&session->terminate)) {
|
||||
if (ctrl_sk->sk_state != BT_CONNECTED ||
|
||||
intr_sk->sk_state != BT_CONNECTED)
|
||||
break;
|
||||
|
@ -726,6 +727,7 @@ static int hidp_session(void *arg)
|
|||
hidp_process_transmit(session);
|
||||
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
|
||||
|
@ -1060,7 +1062,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
|
|||
err_add_device:
|
||||
hid_destroy_device(session->hid);
|
||||
session->hid = NULL;
|
||||
kthread_stop(session->task);
|
||||
atomic_inc(&session->terminate);
|
||||
wake_up_process(session->task);
|
||||
|
||||
unlink:
|
||||
hidp_del_timer(session);
|
||||
|
@ -1111,7 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req)
|
|||
skb_queue_purge(&session->ctrl_transmit);
|
||||
skb_queue_purge(&session->intr_transmit);
|
||||
|
||||
kthread_stop(session->task);
|
||||
atomic_inc(&session->terminate);
|
||||
wake_up_process(session->task);
|
||||
}
|
||||
} else
|
||||
err = -ENOENT;
|
||||
|
|
|
@ -142,6 +142,7 @@ struct hidp_session {
|
|||
uint ctrl_mtu;
|
||||
uint intr_mtu;
|
||||
|
||||
atomic_t terminate;
|
||||
struct task_struct *task;
|
||||
|
||||
unsigned char keys[8];
|
||||
|
|
|
@ -2323,7 +2323,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
|
|||
|
||||
sk = chan->sk;
|
||||
|
||||
if (sk->sk_state != BT_CONFIG) {
|
||||
if ((bt_sk(sk)->defer_setup && sk->sk_state != BT_CONNECT2) ||
|
||||
(!bt_sk(sk)->defer_setup && sk->sk_state != BT_CONFIG)) {
|
||||
struct l2cap_cmd_rej rej;
|
||||
|
||||
rej.reason = cpu_to_le16(0x0002);
|
||||
|
@ -2334,7 +2335,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
|
|||
|
||||
/* Reject if config buffer is too small. */
|
||||
len = cmd_len - sizeof(*req);
|
||||
if (chan->conf_len + len > sizeof(chan->conf_req)) {
|
||||
if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
|
||||
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
|
||||
l2cap_build_conf_rsp(chan, rsp,
|
||||
L2CAP_CONF_REJECT, flags), rsp);
|
||||
|
|
|
@ -877,7 +877,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
|
|||
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
|
||||
local->sched_scan_ies.ie[i] = kzalloc(2 +
|
||||
IEEE80211_MAX_SSID_LEN +
|
||||
local->scan_ies_len,
|
||||
local->scan_ies_len +
|
||||
req->ie_len,
|
||||
GFP_KERNEL);
|
||||
if (!local->sched_scan_ies.ie[i]) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -86,6 +86,11 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
|
|||
struct sk_buff *skb = rx->skb;
|
||||
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
int queue = rx->queue;
|
||||
|
||||
/* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
|
||||
if (rx->queue == NUM_RX_DATA_QUEUES - 1)
|
||||
queue = 0;
|
||||
|
||||
/*
|
||||
* it makes no sense to check for MIC errors on anything other
|
||||
|
@ -148,8 +153,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
|
|||
|
||||
update_iv:
|
||||
/* update IV in key information to be able to detect replays */
|
||||
rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
|
||||
rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
|
||||
rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32;
|
||||
rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16;
|
||||
|
||||
return RX_CONTINUE;
|
||||
|
||||
|
@ -241,6 +246,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
|
|||
struct ieee80211_key *key = rx->key;
|
||||
struct sk_buff *skb = rx->skb;
|
||||
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
||||
int queue = rx->queue;
|
||||
|
||||
/* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
|
||||
if (rx->queue == NUM_RX_DATA_QUEUES - 1)
|
||||
queue = 0;
|
||||
|
||||
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
|
||||
|
@ -261,7 +271,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
|
|||
res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
|
||||
key, skb->data + hdrlen,
|
||||
skb->len - hdrlen, rx->sta->sta.addr,
|
||||
hdr->addr1, hwaccel, rx->queue,
|
||||
hdr->addr1, hwaccel, queue,
|
||||
&rx->tkip_iv32,
|
||||
&rx->tkip_iv16);
|
||||
if (res != TKIP_DECRYPT_OK)
|
||||
|
|
|
@ -1582,6 +1582,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
|
|||
#endif /* SCTP_DEBUG */
|
||||
if (transport) {
|
||||
if (bytes_acked) {
|
||||
struct sctp_association *asoc = transport->asoc;
|
||||
|
||||
/* We may have counted DATA that was migrated
|
||||
* to this transport due to DEL-IP operation.
|
||||
* Subtract those bytes, since the were never
|
||||
|
@ -1600,6 +1602,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
|
|||
transport->error_count = 0;
|
||||
transport->asoc->overall_error_count = 0;
|
||||
|
||||
/*
|
||||
* While in SHUTDOWN PENDING, we may have started
|
||||
* the T5 shutdown guard timer after reaching the
|
||||
* retransmission limit. Stop that timer as soon
|
||||
* as the receiver acknowledged any data.
|
||||
*/
|
||||
if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
|
||||
del_timer(&asoc->timers
|
||||
[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
|
||||
sctp_association_put(asoc);
|
||||
|
||||
/* Mark the destination transport address as
|
||||
* active if it is not so marked.
|
||||
*/
|
||||
|
@ -1629,10 +1642,15 @@ static void sctp_check_transmitted(struct sctp_outq *q,
|
|||
* A sender is doing zero window probing when the
|
||||
* receiver's advertised window is zero, and there is
|
||||
* only one data chunk in flight to the receiver.
|
||||
*
|
||||
* Allow the association to timeout while in SHUTDOWN
|
||||
* PENDING or SHUTDOWN RECEIVED in case the receiver
|
||||
* stays in zero window mode forever.
|
||||
*/
|
||||
if (!q->asoc->peer.rwnd &&
|
||||
!list_empty(&tlist) &&
|
||||
(sack_ctsn+2 == q->asoc->next_tsn)) {
|
||||
(sack_ctsn+2 == q->asoc->next_tsn) &&
|
||||
q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
|
||||
SCTP_DEBUG_PRINTK("%s: SACK received for zero "
|
||||
"window probe: %u\n",
|
||||
__func__, sack_ctsn);
|
||||
|
|
|
@ -670,9 +670,18 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
|
|||
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
|
||||
* HEARTBEAT should clear the error counter of the destination
|
||||
* transport address to which the HEARTBEAT was sent.
|
||||
* The association's overall error count is also cleared.
|
||||
*/
|
||||
t->error_count = 0;
|
||||
|
||||
/*
|
||||
* Although RFC4960 specifies that the overall error count must
|
||||
* be cleared when a HEARTBEAT ACK is received, we make an
|
||||
* exception while in SHUTDOWN PENDING. If the peer keeps its
|
||||
* window shut forever, we may never be able to transmit our
|
||||
* outstanding data and rely on the retransmission limit be reached
|
||||
* to shutdown the association.
|
||||
*/
|
||||
if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
|
||||
t->asoc->overall_error_count = 0;
|
||||
|
||||
/* Clear the hb_sent flag to signal that we had a good
|
||||
|
@ -1437,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
|
|||
sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
|
||||
break;
|
||||
|
||||
case SCTP_CMD_TIMER_START_ONCE:
|
||||
timer = &asoc->timers[cmd->obj.to];
|
||||
|
||||
if (timer_pending(timer))
|
||||
break;
|
||||
/* fall through */
|
||||
|
||||
case SCTP_CMD_TIMER_START:
|
||||
timer = &asoc->timers[cmd->obj.to];
|
||||
timeout = asoc->timeouts[cmd->obj.to];
|
||||
|
|
|
@ -5154,7 +5154,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
|
|||
* The sender of the SHUTDOWN MAY also start an overall guard timer
|
||||
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
|
||||
*/
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
|
||||
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
|
||||
|
||||
if (asoc->autoclose)
|
||||
|
@ -5299,6 +5299,19 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
|
|||
SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
|
||||
|
||||
if (asoc->overall_error_count >= asoc->max_retrans) {
|
||||
if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
|
||||
/*
|
||||
* We are here likely because the receiver had its rwnd
|
||||
* closed for a while and we have not been able to
|
||||
* transmit the locally queued data within the maximum
|
||||
* retransmission attempts limit. Start the T5
|
||||
* shutdown guard timer to give the receiver one last
|
||||
* chance and some additional time to recover before
|
||||
* aborting.
|
||||
*/
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
|
||||
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
|
||||
} else {
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
|
||||
SCTP_ERROR(ETIMEDOUT));
|
||||
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
|
||||
|
@ -5308,6 +5321,7 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
|
|||
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
|
||||
return SCTP_DISPOSITION_DELETE_TCB;
|
||||
}
|
||||
}
|
||||
|
||||
/* E1) For the destination address for which the timer
|
||||
* expires, adjust its ssthresh with rules defined in Section
|
||||
|
|
|
@ -827,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
|
|||
/* SCTP_STATE_ESTABLISHED */ \
|
||||
TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
|
||||
/* SCTP_STATE_SHUTDOWN_PENDING */ \
|
||||
TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
|
||||
TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
|
||||
/* SCTP_STATE_SHUTDOWN_SENT */ \
|
||||
TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
|
||||
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
|
||||
|
|
|
@ -1384,6 +1384,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
|
|||
struct sctp_endpoint *ep;
|
||||
struct sctp_association *asoc;
|
||||
struct list_head *pos, *temp;
|
||||
unsigned int data_was_unread;
|
||||
|
||||
SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
|
||||
|
||||
|
@ -1393,6 +1394,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
|
|||
|
||||
ep = sctp_sk(sk)->ep;
|
||||
|
||||
/* Clean up any skbs sitting on the receive queue. */
|
||||
data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
|
||||
data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
|
||||
|
||||
/* Walk all associations on an endpoint. */
|
||||
list_for_each_safe(pos, temp, &ep->asocs) {
|
||||
asoc = list_entry(pos, struct sctp_association, asocs);
|
||||
|
@ -1410,7 +1415,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
|
|||
}
|
||||
}
|
||||
|
||||
if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
|
||||
if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
|
||||
!skb_queue_empty(&asoc->ulpq.reasm) ||
|
||||
(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
|
||||
struct sctp_chunk *chunk;
|
||||
|
||||
chunk = sctp_make_abort_user(asoc, NULL, 0);
|
||||
|
@ -1420,10 +1427,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
|
|||
sctp_primitive_SHUTDOWN(asoc, NULL);
|
||||
}
|
||||
|
||||
/* Clean up any skbs sitting on the receive queue. */
|
||||
sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
|
||||
sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
|
||||
|
||||
/* On a TCP-style socket, block for at most linger_time if set. */
|
||||
if (sctp_style(sk, TCP) && timeout)
|
||||
sctp_wait_for_close(sk, timeout);
|
||||
|
|
|
@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event)
|
|||
}
|
||||
|
||||
/* Purge the skb lists holding ulpevents. */
|
||||
void sctp_queue_purge_ulpevents(struct sk_buff_head *list)
|
||||
unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
while ((skb = skb_dequeue(list)) != NULL)
|
||||
sctp_ulpevent_free(sctp_skb2event(skb));
|
||||
unsigned int data_unread = 0;
|
||||
|
||||
while ((skb = skb_dequeue(list)) != NULL) {
|
||||
struct sctp_ulpevent *event = sctp_skb2event(skb);
|
||||
|
||||
if (!sctp_ulpevent_is_notification(event))
|
||||
data_unread += skb->len;
|
||||
|
||||
sctp_ulpevent_free(event);
|
||||
}
|
||||
|
||||
return data_unread;
|
||||
}
|
||||
|
|
|
@ -366,6 +366,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
|
|||
|
||||
mutex_init(&rdev->mtx);
|
||||
mutex_init(&rdev->devlist_mtx);
|
||||
mutex_init(&rdev->sched_scan_mtx);
|
||||
INIT_LIST_HEAD(&rdev->netdev_list);
|
||||
spin_lock_init(&rdev->bss_lock);
|
||||
INIT_LIST_HEAD(&rdev->bss_list);
|
||||
|
@ -701,6 +702,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
|
|||
rfkill_destroy(rdev->rfkill);
|
||||
mutex_destroy(&rdev->mtx);
|
||||
mutex_destroy(&rdev->devlist_mtx);
|
||||
mutex_destroy(&rdev->sched_scan_mtx);
|
||||
list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
|
||||
cfg80211_put_bss(&scan->pub);
|
||||
cfg80211_rdev_free_wowlan(rdev);
|
||||
|
@ -737,12 +739,16 @@ static void wdev_cleanup_work(struct work_struct *work)
|
|||
___cfg80211_scan_done(rdev, true);
|
||||
}
|
||||
|
||||
cfg80211_unlock_rdev(rdev);
|
||||
|
||||
mutex_lock(&rdev->sched_scan_mtx);
|
||||
|
||||
if (WARN_ON(rdev->sched_scan_req &&
|
||||
rdev->sched_scan_req->dev == wdev->netdev)) {
|
||||
__cfg80211_stop_sched_scan(rdev, false);
|
||||
}
|
||||
|
||||
cfg80211_unlock_rdev(rdev);
|
||||
mutex_unlock(&rdev->sched_scan_mtx);
|
||||
|
||||
mutex_lock(&rdev->devlist_mtx);
|
||||
rdev->opencount--;
|
||||
|
@ -830,9 +836,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
|
|||
break;
|
||||
case NL80211_IFTYPE_P2P_CLIENT:
|
||||
case NL80211_IFTYPE_STATION:
|
||||
cfg80211_lock_rdev(rdev);
|
||||
mutex_lock(&rdev->sched_scan_mtx);
|
||||
__cfg80211_stop_sched_scan(rdev, false);
|
||||
cfg80211_unlock_rdev(rdev);
|
||||
mutex_unlock(&rdev->sched_scan_mtx);
|
||||
|
||||
wdev_lock(wdev);
|
||||
#ifdef CONFIG_CFG80211_WEXT
|
||||
|
|
|
@ -65,6 +65,8 @@ struct cfg80211_registered_device {
|
|||
struct work_struct scan_done_wk;
|
||||
struct work_struct sched_scan_results_wk;
|
||||
|
||||
struct mutex sched_scan_mtx;
|
||||
|
||||
#ifdef CONFIG_NL80211_TESTMODE
|
||||
struct genl_info *testmode_info;
|
||||
#endif
|
||||
|
|
|
@ -3461,9 +3461,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
|
|||
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
|
||||
return -EINVAL;
|
||||
|
||||
if (rdev->sched_scan_req)
|
||||
return -EINPROGRESS;
|
||||
|
||||
if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -3502,12 +3499,21 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
|
|||
if (ie_len > wiphy->max_scan_ie_len)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&rdev->sched_scan_mtx);
|
||||
|
||||
if (rdev->sched_scan_req) {
|
||||
err = -EINPROGRESS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
request = kzalloc(sizeof(*request)
|
||||
+ sizeof(*request->ssids) * n_ssids
|
||||
+ sizeof(*request->channels) * n_channels
|
||||
+ ie_len, GFP_KERNEL);
|
||||
if (!request)
|
||||
return -ENOMEM;
|
||||
if (!request) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (n_ssids)
|
||||
request->ssids = (void *)&request->channels[n_channels];
|
||||
|
@ -3605,6 +3611,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
|
|||
out_free:
|
||||
kfree(request);
|
||||
out:
|
||||
mutex_unlock(&rdev->sched_scan_mtx);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -3612,12 +3619,17 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
|
|||
struct genl_info *info)
|
||||
{
|
||||
struct cfg80211_registered_device *rdev = info->user_ptr[0];
|
||||
int err;
|
||||
|
||||
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
|
||||
!rdev->ops->sched_scan_stop)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return __cfg80211_stop_sched_scan(rdev, false);
|
||||
mutex_lock(&rdev->sched_scan_mtx);
|
||||
err = __cfg80211_stop_sched_scan(rdev, false);
|
||||
mutex_unlock(&rdev->sched_scan_mtx);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
|
||||
|
|
|
@ -100,14 +100,14 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
|
|||
rdev = container_of(wk, struct cfg80211_registered_device,
|
||||
sched_scan_results_wk);
|
||||
|
||||
cfg80211_lock_rdev(rdev);
|
||||
mutex_lock(&rdev->sched_scan_mtx);
|
||||
|
||||
/* we don't have sched_scan_req anymore if the scan is stopping */
|
||||
if (rdev->sched_scan_req)
|
||||
nl80211_send_sched_scan_results(rdev,
|
||||
rdev->sched_scan_req->dev);
|
||||
|
||||
cfg80211_unlock_rdev(rdev);
|
||||
mutex_unlock(&rdev->sched_scan_mtx);
|
||||
}
|
||||
|
||||
void cfg80211_sched_scan_results(struct wiphy *wiphy)
|
||||
|
@ -123,9 +123,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
|
|||
{
|
||||
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
|
||||
|
||||
cfg80211_lock_rdev(rdev);
|
||||
mutex_lock(&rdev->sched_scan_mtx);
|
||||
__cfg80211_stop_sched_scan(rdev, true);
|
||||
cfg80211_unlock_rdev(rdev);
|
||||
mutex_unlock(&rdev->sched_scan_mtx);
|
||||
}
|
||||
EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
|
||||
|
||||
|
@ -135,7 +135,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
|
|||
int err;
|
||||
struct net_device *dev;
|
||||
|
||||
ASSERT_RDEV_LOCK(rdev);
|
||||
lockdep_assert_held(&rdev->sched_scan_mtx);
|
||||
|
||||
if (!rdev->sched_scan_req)
|
||||
return 0;
|
||||
|
|
|
@ -1345,6 +1345,8 @@ int xfrm_state_update(struct xfrm_state *x)
|
|||
xfrm_state_check_expire(x1);
|
||||
|
||||
err = 0;
|
||||
x->km.state = XFRM_STATE_DEAD;
|
||||
__xfrm_state_put(x);
|
||||
}
|
||||
spin_unlock_bh(&x1->lock);
|
||||
|
||||
|
|
Loading…
Reference in a new issue