Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) The forcedeth conversion from pci_*() DMA interfaces to dma_*() ones missed one spot. From Zhu Yanjun. 2) Missing CRYPTO_SHA256 Kconfig dep in cfg80211, from Johannes Berg. 3) Fix checksum offloading in thunderx driver, from Sunil Goutham. 4) Add SPDX to vm_sockets_diag.h, from Stephen Hemminger. 5) Fix use after free of packet headers in TIPC, from Jon Maloy. 6) "sizeof(ptr)" vs "sizeof(*ptr)" bug in i40e, from Gustavo A R Silva. 7) Tunneling fixes in mlxsw driver, from Petr Machata. 8) Fix crash in fanout_demux_rollover() of AF_PACKET, from Mike Maloney. 9) Fix race in AF_PACKET bind() vs. NETDEV_UP notifier, from Eric Dumazet. 10) Fix regression in sch_sfq.c due to one of the timer_setup() conversions. From Paolo Abeni. 11) SCTP does list_for_each_entry() using wrong struct member, fix from Xin Long. 12) Don't use big endian netlink attribute read for IFLA_BOND_AD_ACTOR_SYSTEM, it is in cpu endianness. Also from Xin Long. 13) Fix mis-initialization of q->link.clock in CBQ scheduler, preventing adding filters there. From Jiri Pirko. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (67 commits) ethernet: dwmac-stm32: Fix copyright net: via: via-rhine: use %p to format void * address instead of %x net: ethernet: xilinx: Mark XILINX_LL_TEMAC broken on 64-bit myri10ge: Update MAINTAINERS net: sched: cbq: create block for q->link.block atm: suni: remove extraneous space to fix indentation atm: lanai: use %p to format kernel addresses instead of %x VSOCK: Don't set sk_state to TCP_CLOSE before testing it atm: fore200e: use %pK to format kernel addresses instead of %x ambassador: fix incorrect indentation of assignment statement vxlan: use __be32 type for the param vni in __vxlan_fdb_delete bonding: use nla_get_u64 to extract the value for IFLA_BOND_AD_ACTOR_SYSTEM sctp: use right member as the param of list_for_each_entry sch_sfq: fix null pointer dereference at timer expiration cls_bpf: don't decrement net's refcount when offload fails net/packet: fix a race in packet_bind() and packet_notifier() packet: fix crash in fanout_demux_rollover() sctp: remove extern from stream sched sctp: force the params with right types for sctp csum apis sctp: force SCTP_ERROR_INV_STRM with __u32 when calling sctp_chunk_fail ...
This commit is contained in:
commit
96c22a49ac
72 changed files with 1179 additions and 579 deletions
|
@ -9331,9 +9331,9 @@ F: drivers/gpu/drm/mxsfb/
|
||||||
F: Documentation/devicetree/bindings/display/mxsfb-drm.txt
|
F: Documentation/devicetree/bindings/display/mxsfb-drm.txt
|
||||||
|
|
||||||
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
|
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
|
||||||
M: Hyong-Youb Kim <hykim@myri.com>
|
M: Chris Lee <christopher.lee@cspi.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
W: https://www.myricom.com/support/downloads/myri10ge.html
|
W: https://www.cspi.com/ethernet-products/support/downloads/
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/myricom/myri10ge/
|
F: drivers/net/ethernet/myricom/myri10ge/
|
||||||
|
|
||||||
|
|
|
@ -2258,7 +2258,7 @@ static int amb_probe(struct pci_dev *pci_dev,
|
||||||
|
|
||||||
PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
|
PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
|
||||||
dev->atm_dev->number, dev, dev->atm_dev);
|
dev->atm_dev->number, dev, dev->atm_dev);
|
||||||
dev->atm_dev->dev_data = (void *) dev;
|
dev->atm_dev->dev_data = (void *) dev;
|
||||||
|
|
||||||
// register our address
|
// register our address
|
||||||
amb_esi (dev, dev->atm_dev->esi);
|
amb_esi (dev, dev->atm_dev->esi);
|
||||||
|
|
|
@ -3083,8 +3083,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
|
||||||
ASSERT(fore200e_vcc);
|
ASSERT(fore200e_vcc);
|
||||||
|
|
||||||
len = sprintf(page,
|
len = sprintf(page,
|
||||||
" %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
|
" %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
|
||||||
(u32)(unsigned long)vcc,
|
vcc,
|
||||||
vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
|
vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
|
||||||
fore200e_vcc->tx_pdu,
|
fore200e_vcc->tx_pdu,
|
||||||
fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
|
fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
|
||||||
|
|
|
@ -1586,8 +1586,8 @@ static int service_buffer_allocate(struct lanai_dev *lanai)
|
||||||
lanai->pci);
|
lanai->pci);
|
||||||
if (unlikely(lanai->service.start == NULL))
|
if (unlikely(lanai->service.start == NULL))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
DPRINTK("allocated service buffer at 0x%08lX, size %zu(%d)\n",
|
DPRINTK("allocated service buffer at %p, size %zu(%d)\n",
|
||||||
(unsigned long) lanai->service.start,
|
lanai->service.start,
|
||||||
lanai_buf_size(&lanai->service),
|
lanai_buf_size(&lanai->service),
|
||||||
lanai_buf_size_cardorder(&lanai->service));
|
lanai_buf_size_cardorder(&lanai->service));
|
||||||
/* Clear ServWrite register to be safe */
|
/* Clear ServWrite register to be safe */
|
||||||
|
@ -2218,9 +2218,9 @@ static int lanai_dev_open(struct atm_dev *atmdev)
|
||||||
#endif
|
#endif
|
||||||
memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
|
memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
|
||||||
lanai_timed_poll_start(lanai);
|
lanai_timed_poll_start(lanai);
|
||||||
printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
|
printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=%p, irq=%u "
|
||||||
"(%pMF)\n", lanai->number, (int) lanai->pci->revision,
|
"(%pMF)\n", lanai->number, (int) lanai->pci->revision,
|
||||||
(unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
|
lanai->base, lanai->pci->irq, atmdev->esi);
|
||||||
printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
|
printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
|
||||||
"board_rev=%d\n", lanai->number,
|
"board_rev=%d\n", lanai->number,
|
||||||
lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
|
lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
|
||||||
|
|
|
@ -177,7 +177,7 @@ static int set_loopback(struct atm_dev *dev,int mode)
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
dev->ops->phy_put(dev, control, reg);
|
dev->ops->phy_put(dev, control, reg);
|
||||||
PRIV(dev)->loop_mode = mode;
|
PRIV(dev)->loop_mode = mode;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -423,7 +423,7 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
bond_opt_initval(&newval,
|
bond_opt_initval(&newval,
|
||||||
nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
|
nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
|
||||||
err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
|
err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -2136,8 +2136,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
|
||||||
/* Read A2 portion of the EEPROM */
|
/* Read A2 portion of the EEPROM */
|
||||||
if (length) {
|
if (length) {
|
||||||
start -= ETH_MODULE_SFF_8436_LEN;
|
start -= ETH_MODULE_SFF_8436_LEN;
|
||||||
bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
|
rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
|
||||||
length, data);
|
start, length, data);
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1355,7 +1355,6 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
|
||||||
|
|
||||||
/* Offload checksum calculation to HW */
|
/* Offload checksum calculation to HW */
|
||||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||||
hdr->csum_l3 = 1; /* Enable IP csum calculation */
|
|
||||||
hdr->l3_offset = skb_network_offset(skb);
|
hdr->l3_offset = skb_network_offset(skb);
|
||||||
hdr->l4_offset = skb_transport_offset(skb);
|
hdr->l4_offset = skb_transport_offset(skb);
|
||||||
|
|
||||||
|
|
|
@ -4307,8 +4307,10 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
|
||||||
|
|
||||||
rar_num = E1000_RAR_ENTRIES;
|
rar_num = E1000_RAR_ENTRIES;
|
||||||
|
|
||||||
/* Zero out the other 15 receive addresses. */
|
/* Zero out the following 14 receive addresses. RAR[15] is for
|
||||||
e_dbg("Clearing RAR[1-15]\n");
|
* manageability
|
||||||
|
*/
|
||||||
|
e_dbg("Clearing RAR[1-14]\n");
|
||||||
for (i = 1; i < rar_num; i++) {
|
for (i = 1; i < rar_num; i++) {
|
||||||
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
|
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
|
||||||
E1000_WRITE_FLUSH();
|
E1000_WRITE_FLUSH();
|
||||||
|
|
|
@ -113,7 +113,8 @@
|
||||||
#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
|
#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
|
||||||
#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
|
#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
|
||||||
#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
|
#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
|
||||||
#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
|
#define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000
|
||||||
|
#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000
|
||||||
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
|
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
|
||||||
|
|
||||||
#define E1000_ICH_RAR_ENTRIES 7
|
#define E1000_ICH_RAR_ENTRIES 7
|
||||||
|
|
|
@ -3034,9 +3034,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
|
||||||
ew32(IOSFPC, reg_val);
|
ew32(IOSFPC, reg_val);
|
||||||
|
|
||||||
reg_val = er32(TARC(0));
|
reg_val = er32(TARC(0));
|
||||||
/* SPT and KBL Si errata workaround to avoid Tx hang */
|
/* SPT and KBL Si errata workaround to avoid Tx hang.
|
||||||
reg_val &= ~BIT(28);
|
* Dropping the number of outstanding requests from
|
||||||
reg_val |= BIT(29);
|
* 3 to 2 in order to avoid a buffer overrun.
|
||||||
|
*/
|
||||||
|
reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
|
||||||
|
reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
|
||||||
ew32(TARC(0), reg_val);
|
ew32(TARC(0), reg_val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7401,7 +7401,6 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
|
||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"Failed to add cloud filter, err %s\n",
|
"Failed to add cloud filter, err %s\n",
|
||||||
i40e_stat_str(&pf->hw, err));
|
i40e_stat_str(&pf->hw, err));
|
||||||
err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
|
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2086,7 +2086,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
|
||||||
}
|
}
|
||||||
|
|
||||||
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
|
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
|
||||||
(u8 *)vfres, sizeof(vfres));
|
(u8 *)vfres, sizeof(*vfres));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -4629,11 +4629,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
|
||||||
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
|
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
|
||||||
val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
|
val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
|
||||||
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
|
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
|
||||||
|
|
||||||
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
|
|
||||||
val |= MVPP2_GMAC_DISABLE_PADDING;
|
|
||||||
val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
|
|
||||||
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
|
|
||||||
} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
|
} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
|
||||||
val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
|
val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
|
||||||
val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
|
val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
|
||||||
|
@ -4641,10 +4636,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
|
||||||
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
|
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
|
||||||
val &= ~MVPP22_CTRL4_DP_CLK_SEL;
|
val &= ~MVPP22_CTRL4_DP_CLK_SEL;
|
||||||
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
|
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
|
||||||
|
|
||||||
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
|
|
||||||
val &= ~MVPP2_GMAC_DISABLE_PADDING;
|
|
||||||
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The port is connected to a copper PHY */
|
/* The port is connected to a copper PHY */
|
||||||
|
@ -5805,7 +5796,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
|
||||||
sizeof(*txq_pcpu->buffs),
|
sizeof(*txq_pcpu->buffs),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!txq_pcpu->buffs)
|
if (!txq_pcpu->buffs)
|
||||||
goto cleanup;
|
return -ENOMEM;
|
||||||
|
|
||||||
txq_pcpu->count = 0;
|
txq_pcpu->count = 0;
|
||||||
txq_pcpu->reserved_num = 0;
|
txq_pcpu->reserved_num = 0;
|
||||||
|
@ -5821,26 +5812,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
|
||||||
&txq_pcpu->tso_headers_dma,
|
&txq_pcpu->tso_headers_dma,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!txq_pcpu->tso_headers)
|
if (!txq_pcpu->tso_headers)
|
||||||
goto cleanup;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
cleanup:
|
|
||||||
for_each_present_cpu(cpu) {
|
|
||||||
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
|
|
||||||
kfree(txq_pcpu->buffs);
|
|
||||||
|
|
||||||
dma_free_coherent(port->dev->dev.parent,
|
|
||||||
txq_pcpu->size * TSO_HEADER_SIZE,
|
|
||||||
txq_pcpu->tso_headers,
|
|
||||||
txq_pcpu->tso_headers_dma);
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_free_coherent(port->dev->dev.parent,
|
|
||||||
txq->size * MVPP2_DESC_ALIGNED_SIZE,
|
|
||||||
txq->descs, txq->descs_dma);
|
|
||||||
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free allocated TXQ resources */
|
/* Free allocated TXQ resources */
|
||||||
|
@ -6867,6 +6842,12 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
|
||||||
else if (!IS_ALIGNED(ring->tx_pending, 32))
|
else if (!IS_ALIGNED(ring->tx_pending, 32))
|
||||||
new_tx_pending = ALIGN(ring->tx_pending, 32);
|
new_tx_pending = ALIGN(ring->tx_pending, 32);
|
||||||
|
|
||||||
|
/* The Tx ring size cannot be smaller than the minimum number of
|
||||||
|
* descriptors needed for TSO.
|
||||||
|
*/
|
||||||
|
if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
|
||||||
|
new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
|
||||||
|
|
||||||
if (ring->rx_pending != new_rx_pending) {
|
if (ring->rx_pending != new_rx_pending) {
|
||||||
netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
|
netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
|
||||||
ring->rx_pending, new_rx_pending);
|
ring->rx_pending, new_rx_pending);
|
||||||
|
@ -8345,7 +8326,7 @@ static int mvpp2_probe(struct platform_device *pdev)
|
||||||
for_each_available_child_of_node(dn, port_node) {
|
for_each_available_child_of_node(dn, port_node) {
|
||||||
err = mvpp2_port_probe(pdev, port_node, priv, i);
|
err = mvpp2_port_probe(pdev, port_node, priv, i);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto err_mg_clk;
|
goto err_port_probe;
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8361,12 +8342,19 @@ static int mvpp2_probe(struct platform_device *pdev)
|
||||||
priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
|
priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
|
||||||
if (!priv->stats_queue) {
|
if (!priv->stats_queue) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto err_mg_clk;
|
goto err_port_probe;
|
||||||
}
|
}
|
||||||
|
|
||||||
platform_set_drvdata(pdev, priv);
|
platform_set_drvdata(pdev, priv);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_port_probe:
|
||||||
|
i = 0;
|
||||||
|
for_each_available_child_of_node(dn, port_node) {
|
||||||
|
if (priv->port_list[i])
|
||||||
|
mvpp2_port_remove(priv->port_list[i]);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
err_mg_clk:
|
err_mg_clk:
|
||||||
clk_disable_unprepare(priv->axi_clk);
|
clk_disable_unprepare(priv->axi_clk);
|
||||||
if (priv->hw_version == MVPP22)
|
if (priv->hw_version == MVPP22)
|
||||||
|
|
|
@ -1370,8 +1370,9 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
|
||||||
mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
|
mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
|
static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
|
||||||
struct mlxsw_sp_rif *rif);
|
struct mlxsw_sp_rif *old_rif,
|
||||||
|
struct mlxsw_sp_rif *new_rif);
|
||||||
static int
|
static int
|
||||||
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
|
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
|
||||||
struct mlxsw_sp_ipip_entry *ipip_entry,
|
struct mlxsw_sp_ipip_entry *ipip_entry,
|
||||||
|
@ -1389,17 +1390,18 @@ mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
|
||||||
return PTR_ERR(new_lb_rif);
|
return PTR_ERR(new_lb_rif);
|
||||||
ipip_entry->ol_lb = new_lb_rif;
|
ipip_entry->ol_lb = new_lb_rif;
|
||||||
|
|
||||||
if (keep_encap) {
|
if (keep_encap)
|
||||||
list_splice_init(&old_lb_rif->common.nexthop_list,
|
mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
|
||||||
&new_lb_rif->common.nexthop_list);
|
&new_lb_rif->common);
|
||||||
mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
|
|
||||||
}
|
|
||||||
|
|
||||||
mlxsw_sp_rif_destroy(&old_lb_rif->common);
|
mlxsw_sp_rif_destroy(&old_lb_rif->common);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
|
||||||
|
struct mlxsw_sp_rif *rif);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update the offload related to an IPIP entry. This always updates decap, and
|
* Update the offload related to an IPIP entry. This always updates decap, and
|
||||||
* in addition to that it also:
|
* in addition to that it also:
|
||||||
|
@ -1449,9 +1451,27 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
|
||||||
{
|
{
|
||||||
struct mlxsw_sp_ipip_entry *ipip_entry =
|
struct mlxsw_sp_ipip_entry *ipip_entry =
|
||||||
mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
|
mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
|
||||||
|
enum mlxsw_sp_l3proto ul_proto;
|
||||||
|
union mlxsw_sp_l3addr saddr;
|
||||||
|
u32 ul_tb_id;
|
||||||
|
|
||||||
if (!ipip_entry)
|
if (!ipip_entry)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/* For flat configuration cases, moving overlay to a different VRF might
|
||||||
|
* cause local address conflict, and the conflicting tunnels need to be
|
||||||
|
* demoted.
|
||||||
|
*/
|
||||||
|
ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
|
||||||
|
ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
|
||||||
|
saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
|
||||||
|
if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
|
||||||
|
saddr, ul_tb_id,
|
||||||
|
ipip_entry)) {
|
||||||
|
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
|
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
|
||||||
true, false, false, extack);
|
true, false, false, extack);
|
||||||
}
|
}
|
||||||
|
@ -3343,22 +3363,19 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
|
||||||
return ul_dev ? (ul_dev->flags & IFF_UP) : true;
|
return ul_dev ? (ul_dev->flags & IFF_UP) : true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
|
static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
|
||||||
struct mlxsw_sp_nexthop *nh,
|
struct mlxsw_sp_nexthop *nh,
|
||||||
struct net_device *ol_dev)
|
struct mlxsw_sp_ipip_entry *ipip_entry)
|
||||||
{
|
{
|
||||||
bool removing;
|
bool removing;
|
||||||
|
|
||||||
if (!nh->nh_grp->gateway || nh->ipip_entry)
|
if (!nh->nh_grp->gateway || nh->ipip_entry)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
|
nh->ipip_entry = ipip_entry;
|
||||||
if (!nh->ipip_entry)
|
removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
|
||||||
return -ENOENT;
|
|
||||||
|
|
||||||
removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev);
|
|
||||||
__mlxsw_sp_nexthop_neigh_update(nh, removing);
|
__mlxsw_sp_nexthop_neigh_update(nh, removing);
|
||||||
return 0;
|
mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
|
static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
|
||||||
|
@ -3403,21 +3420,21 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
|
||||||
struct mlxsw_sp_nexthop *nh,
|
struct mlxsw_sp_nexthop *nh,
|
||||||
struct fib_nh *fib_nh)
|
struct fib_nh *fib_nh)
|
||||||
{
|
{
|
||||||
struct mlxsw_sp_router *router = mlxsw_sp->router;
|
const struct mlxsw_sp_ipip_ops *ipip_ops;
|
||||||
struct net_device *dev = fib_nh->nh_dev;
|
struct net_device *dev = fib_nh->nh_dev;
|
||||||
enum mlxsw_sp_ipip_type ipipt;
|
struct mlxsw_sp_ipip_entry *ipip_entry;
|
||||||
struct mlxsw_sp_rif *rif;
|
struct mlxsw_sp_rif *rif;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
|
ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
|
||||||
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
|
if (ipip_entry) {
|
||||||
MLXSW_SP_L3_PROTO_IPV4)) {
|
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
|
||||||
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
|
if (ipip_ops->can_offload(mlxsw_sp, dev,
|
||||||
err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
|
MLXSW_SP_L3_PROTO_IPV4)) {
|
||||||
if (err)
|
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
|
||||||
return err;
|
mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
|
||||||
mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
|
return 0;
|
||||||
return 0;
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
|
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
|
||||||
|
@ -3545,6 +3562,18 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
|
||||||
|
struct mlxsw_sp_rif *old_rif,
|
||||||
|
struct mlxsw_sp_rif *new_rif)
|
||||||
|
{
|
||||||
|
struct mlxsw_sp_nexthop *nh;
|
||||||
|
|
||||||
|
list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
|
||||||
|
list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
|
||||||
|
nh->rif = new_rif;
|
||||||
|
mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
|
||||||
|
}
|
||||||
|
|
||||||
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
|
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
|
||||||
struct mlxsw_sp_rif *rif)
|
struct mlxsw_sp_rif *rif)
|
||||||
{
|
{
|
||||||
|
@ -3996,7 +4025,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
|
||||||
case RTN_LOCAL:
|
case RTN_LOCAL:
|
||||||
ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
|
ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
|
||||||
MLXSW_SP_L3_PROTO_IPV4, dip);
|
MLXSW_SP_L3_PROTO_IPV4, dip);
|
||||||
if (ipip_entry) {
|
if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
|
||||||
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
|
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
|
||||||
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
|
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
|
||||||
fib_entry,
|
fib_entry,
|
||||||
|
@ -4694,21 +4723,21 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
|
||||||
struct mlxsw_sp_nexthop *nh,
|
struct mlxsw_sp_nexthop *nh,
|
||||||
const struct rt6_info *rt)
|
const struct rt6_info *rt)
|
||||||
{
|
{
|
||||||
struct mlxsw_sp_router *router = mlxsw_sp->router;
|
const struct mlxsw_sp_ipip_ops *ipip_ops;
|
||||||
|
struct mlxsw_sp_ipip_entry *ipip_entry;
|
||||||
struct net_device *dev = rt->dst.dev;
|
struct net_device *dev = rt->dst.dev;
|
||||||
enum mlxsw_sp_ipip_type ipipt;
|
|
||||||
struct mlxsw_sp_rif *rif;
|
struct mlxsw_sp_rif *rif;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
|
ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
|
||||||
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
|
if (ipip_entry) {
|
||||||
MLXSW_SP_L3_PROTO_IPV6)) {
|
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
|
||||||
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
|
if (ipip_ops->can_offload(mlxsw_sp, dev,
|
||||||
err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
|
MLXSW_SP_L3_PROTO_IPV6)) {
|
||||||
if (err)
|
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
|
||||||
return err;
|
mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
|
||||||
mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
|
return 0;
|
||||||
return 0;
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
|
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
|
||||||
|
|
|
@ -1986,9 +1986,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
|
||||||
tx_skb->dma_len,
|
tx_skb->dma_len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
else
|
else
|
||||||
pci_unmap_page(np->pci_dev, tx_skb->dma,
|
dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
|
||||||
tx_skb->dma_len,
|
tx_skb->dma_len,
|
||||||
PCI_DMA_TODEVICE);
|
DMA_TO_DEVICE);
|
||||||
tx_skb->dma = 0;
|
tx_skb->dma = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
/*
|
/*
|
||||||
* dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
|
* dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
|
||||||
*
|
*
|
||||||
* Copyright (C) Alexandre Torgue 2015
|
* Copyright (C) STMicroelectronics SA 2017
|
||||||
* Author: Alexandre Torgue <alexandre.torgue@gmail.com>
|
* Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
|
||||||
* License terms: GNU General Public License (GPL), version 2
|
* License terms: GNU General Public License (GPL), version 2
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -995,8 +995,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
|
||||||
else
|
else
|
||||||
name = "Rhine III";
|
name = "Rhine III";
|
||||||
|
|
||||||
netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
|
netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
|
||||||
name, (long)ioaddr, dev->dev_addr, rp->irq);
|
name, ioaddr, dev->dev_addr, rp->irq);
|
||||||
|
|
||||||
dev_set_drvdata(hwdev, dev);
|
dev_set_drvdata(hwdev, dev);
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
|
||||||
config XILINX_LL_TEMAC
|
config XILINX_LL_TEMAC
|
||||||
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
|
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
|
||||||
depends on (PPC || MICROBLAZE)
|
depends on (PPC || MICROBLAZE)
|
||||||
|
depends on !64BIT || BROKEN
|
||||||
select PHYLIB
|
select PHYLIB
|
||||||
---help---
|
---help---
|
||||||
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
|
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
* link takes priority and the other port is completely locked out.
|
* link takes priority and the other port is completely locked out.
|
||||||
*/
|
*/
|
||||||
#include <linux/phy.h>
|
#include <linux/phy.h>
|
||||||
|
#include <linux/marvell_phy.h>
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MV_PCS_BASE_T = 0x0000,
|
MV_PCS_BASE_T = 0x0000,
|
||||||
|
@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev)
|
||||||
static struct phy_driver mv3310_drivers[] = {
|
static struct phy_driver mv3310_drivers[] = {
|
||||||
{
|
{
|
||||||
.phy_id = 0x002b09aa,
|
.phy_id = 0x002b09aa,
|
||||||
.phy_id_mask = 0xffffffff,
|
.phy_id_mask = MARVELL_PHY_ID_MASK,
|
||||||
.name = "mv88x3310",
|
.name = "mv88x3310",
|
||||||
.features = SUPPORTED_10baseT_Full |
|
.features = SUPPORTED_10baseT_Full |
|
||||||
SUPPORTED_100baseT_Full |
|
SUPPORTED_100baseT_Full |
|
||||||
|
@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = {
|
||||||
module_phy_driver(mv3310_drivers);
|
module_phy_driver(mv3310_drivers);
|
||||||
|
|
||||||
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
|
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
|
||||||
{ 0x002b09aa, 0xffffffff },
|
{ 0x002b09aa, MARVELL_PHY_ID_MASK },
|
||||||
{ },
|
{ },
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
|
MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
|
||||||
|
|
|
@ -335,7 +335,7 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
|
||||||
if (ring->ring->is_tx) {
|
if (ring->ring->is_tx) {
|
||||||
dir = DMA_TO_DEVICE;
|
dir = DMA_TO_DEVICE;
|
||||||
order = 0;
|
order = 0;
|
||||||
size = tbnet_frame_size(tf);
|
size = TBNET_FRAME_SIZE;
|
||||||
} else {
|
} else {
|
||||||
dir = DMA_FROM_DEVICE;
|
dir = DMA_FROM_DEVICE;
|
||||||
order = TBNET_RX_PAGE_ORDER;
|
order = TBNET_RX_PAGE_ORDER;
|
||||||
|
@ -512,6 +512,7 @@ static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
|
||||||
static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
|
static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
|
||||||
{
|
{
|
||||||
struct tbnet_ring *ring = &net->tx_ring;
|
struct tbnet_ring *ring = &net->tx_ring;
|
||||||
|
struct device *dma_dev = tb_ring_dma_device(ring->ring);
|
||||||
struct tbnet_frame *tf;
|
struct tbnet_frame *tf;
|
||||||
unsigned int index;
|
unsigned int index;
|
||||||
|
|
||||||
|
@ -522,7 +523,9 @@ static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
|
||||||
|
|
||||||
tf = &ring->frames[index];
|
tf = &ring->frames[index];
|
||||||
tf->frame.size = 0;
|
tf->frame.size = 0;
|
||||||
tf->frame.buffer_phy = 0;
|
|
||||||
|
dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
|
||||||
|
tbnet_frame_size(tf), DMA_TO_DEVICE);
|
||||||
|
|
||||||
return tf;
|
return tf;
|
||||||
}
|
}
|
||||||
|
@ -531,13 +534,8 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
|
||||||
bool canceled)
|
bool canceled)
|
||||||
{
|
{
|
||||||
struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
|
struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
|
||||||
struct device *dma_dev = tb_ring_dma_device(ring);
|
|
||||||
struct tbnet *net = netdev_priv(tf->dev);
|
struct tbnet *net = netdev_priv(tf->dev);
|
||||||
|
|
||||||
dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
tf->frame.buffer_phy = 0;
|
|
||||||
|
|
||||||
/* Return buffer to the ring */
|
/* Return buffer to the ring */
|
||||||
net->tx_ring.prod++;
|
net->tx_ring.prod++;
|
||||||
|
|
||||||
|
@ -548,10 +546,12 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
|
||||||
static int tbnet_alloc_tx_buffers(struct tbnet *net)
|
static int tbnet_alloc_tx_buffers(struct tbnet *net)
|
||||||
{
|
{
|
||||||
struct tbnet_ring *ring = &net->tx_ring;
|
struct tbnet_ring *ring = &net->tx_ring;
|
||||||
|
struct device *dma_dev = tb_ring_dma_device(ring->ring);
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < TBNET_RING_SIZE; i++) {
|
for (i = 0; i < TBNET_RING_SIZE; i++) {
|
||||||
struct tbnet_frame *tf = &ring->frames[i];
|
struct tbnet_frame *tf = &ring->frames[i];
|
||||||
|
dma_addr_t dma_addr;
|
||||||
|
|
||||||
tf->page = alloc_page(GFP_KERNEL);
|
tf->page = alloc_page(GFP_KERNEL);
|
||||||
if (!tf->page) {
|
if (!tf->page) {
|
||||||
|
@ -559,7 +559,17 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
if (dma_mapping_error(dma_dev, dma_addr)) {
|
||||||
|
__free_page(tf->page);
|
||||||
|
tf->page = NULL;
|
||||||
|
tbnet_free_buffers(ring);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
tf->dev = net->dev;
|
tf->dev = net->dev;
|
||||||
|
tf->frame.buffer_phy = dma_addr;
|
||||||
tf->frame.callback = tbnet_tx_callback;
|
tf->frame.callback = tbnet_tx_callback;
|
||||||
tf->frame.sof = TBIP_PDF_FRAME_START;
|
tf->frame.sof = TBIP_PDF_FRAME_START;
|
||||||
tf->frame.eof = TBIP_PDF_FRAME_END;
|
tf->frame.eof = TBIP_PDF_FRAME_END;
|
||||||
|
@ -881,19 +891,6 @@ static int tbnet_stop(struct net_device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf)
|
|
||||||
{
|
|
||||||
dma_addr_t dma_addr;
|
|
||||||
|
|
||||||
dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
if (dma_mapping_error(dma_dev, dma_addr))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
tf->frame.buffer_phy = dma_addr;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
|
static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
|
||||||
struct tbnet_frame **frames, u32 frame_count)
|
struct tbnet_frame **frames, u32 frame_count)
|
||||||
{
|
{
|
||||||
|
@ -908,13 +905,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
|
||||||
|
|
||||||
if (skb->ip_summed != CHECKSUM_PARTIAL) {
|
if (skb->ip_summed != CHECKSUM_PARTIAL) {
|
||||||
/* No need to calculate checksum so we just update the
|
/* No need to calculate checksum so we just update the
|
||||||
* total frame count and map the frames for DMA.
|
* total frame count and sync the frames for DMA.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < frame_count; i++) {
|
for (i = 0; i < frame_count; i++) {
|
||||||
hdr = page_address(frames[i]->page);
|
hdr = page_address(frames[i]->page);
|
||||||
hdr->frame_count = cpu_to_le32(frame_count);
|
hdr->frame_count = cpu_to_le32(frame_count);
|
||||||
if (!tbnet_xmit_map(dma_dev, frames[i]))
|
dma_sync_single_for_device(dma_dev,
|
||||||
goto err_unmap;
|
frames[i]->frame.buffer_phy,
|
||||||
|
tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -983,21 +981,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
|
||||||
*tucso = csum_fold(wsum);
|
*tucso = csum_fold(wsum);
|
||||||
|
|
||||||
/* Checksum is finally calculated and we don't touch the memory
|
/* Checksum is finally calculated and we don't touch the memory
|
||||||
* anymore, so DMA map the frames now.
|
* anymore, so DMA sync the frames now.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < frame_count; i++) {
|
for (i = 0; i < frame_count; i++) {
|
||||||
if (!tbnet_xmit_map(dma_dev, frames[i]))
|
dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
|
||||||
goto err_unmap;
|
tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
err_unmap:
|
|
||||||
while (i--)
|
|
||||||
dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy,
|
|
||||||
tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
|
static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
|
||||||
|
|
|
@ -874,8 +874,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
|
||||||
|
|
||||||
static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
|
static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
|
||||||
const unsigned char *addr, union vxlan_addr ip,
|
const unsigned char *addr, union vxlan_addr ip,
|
||||||
__be16 port, __be32 src_vni, u32 vni, u32 ifindex,
|
__be16 port, __be32 src_vni, __be32 vni,
|
||||||
u16 vid)
|
u32 ifindex, u16 vid)
|
||||||
{
|
{
|
||||||
struct vxlan_fdb *f;
|
struct vxlan_fdb *f;
|
||||||
struct vxlan_rdst *rd = NULL;
|
struct vxlan_rdst *rd = NULL;
|
||||||
|
|
|
@ -494,18 +494,11 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
data = kmalloc(xc.len, GFP_KERNEL);
|
data = memdup_user(xc.data, xc.len);
|
||||||
if (!data) {
|
if (IS_ERR(data)) {
|
||||||
ret = -ENOMEM;
|
ret = PTR_ERR(data);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(copy_from_user(data, xc.data, xc.len))
|
|
||||||
{
|
|
||||||
kfree(data);
|
|
||||||
ret = -ENOMEM;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
|
printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
|
||||||
|
|
||||||
|
|
|
@ -1113,7 +1113,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
|
||||||
if (!avp->assoc)
|
if (!avp->assoc)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
skb = ieee80211_nullfunc_get(sc->hw, vif);
|
skb = ieee80211_nullfunc_get(sc->hw, vif, false);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
|
|
@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
|
||||||
|
|
||||||
priv->bss_loss_state++;
|
priv->bss_loss_state++;
|
||||||
|
|
||||||
skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
|
skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
|
||||||
WARN_ON(!skb);
|
WARN_ON(!skb);
|
||||||
if (skb)
|
if (skb)
|
||||||
cw1200_tx(priv->hw, NULL, skb);
|
cw1200_tx(priv->hw, NULL, skb);
|
||||||
|
@ -2265,7 +2265,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
|
||||||
.rate = 0xFF,
|
.rate = 0xFF,
|
||||||
};
|
};
|
||||||
|
|
||||||
frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
|
frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
|
||||||
if (!frame.skb)
|
if (!frame.skb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
|
||||||
size = sizeof(struct wl12xx_null_data_template);
|
size = sizeof(struct wl12xx_null_data_template);
|
||||||
ptr = NULL;
|
ptr = NULL;
|
||||||
} else {
|
} else {
|
||||||
skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
|
skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
goto out;
|
goto out;
|
||||||
size = skb->len;
|
size = skb->len;
|
||||||
|
|
|
@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
|
||||||
ptr = NULL;
|
ptr = NULL;
|
||||||
} else {
|
} else {
|
||||||
skb = ieee80211_nullfunc_get(wl->hw,
|
skb = ieee80211_nullfunc_get(wl->hw,
|
||||||
wl12xx_wlvif_to_vif(wlvif));
|
wl12xx_wlvif_to_vif(wlvif),
|
||||||
|
false);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
goto out;
|
goto out;
|
||||||
size = skb->len;
|
size = skb->len;
|
||||||
|
@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
|
||||||
struct sk_buff *skb = NULL;
|
struct sk_buff *skb = NULL;
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
skb = ieee80211_nullfunc_get(wl->hw, vif);
|
skb = ieee80211_nullfunc_get(wl->hw, vif, false);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
|
@ -87,6 +87,8 @@ struct netfront_cb {
|
||||||
/* IRQ name is queue name with "-tx" or "-rx" appended */
|
/* IRQ name is queue name with "-tx" or "-rx" appended */
|
||||||
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
|
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
|
||||||
|
|
||||||
|
static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
|
||||||
|
|
||||||
struct netfront_stats {
|
struct netfront_stats {
|
||||||
u64 packets;
|
u64 packets;
|
||||||
u64 bytes;
|
u64 bytes;
|
||||||
|
@ -2020,10 +2022,12 @@ static void netback_changed(struct xenbus_device *dev,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case XenbusStateClosed:
|
case XenbusStateClosed:
|
||||||
|
wake_up_all(&module_unload_q);
|
||||||
if (dev->state == XenbusStateClosed)
|
if (dev->state == XenbusStateClosed)
|
||||||
break;
|
break;
|
||||||
/* Missed the backend's CLOSING state -- fallthrough */
|
/* Missed the backend's CLOSING state -- fallthrough */
|
||||||
case XenbusStateClosing:
|
case XenbusStateClosing:
|
||||||
|
wake_up_all(&module_unload_q);
|
||||||
xenbus_frontend_closed(dev);
|
xenbus_frontend_closed(dev);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2129,6 +2133,20 @@ static int xennet_remove(struct xenbus_device *dev)
|
||||||
|
|
||||||
dev_dbg(&dev->dev, "%s\n", dev->nodename);
|
dev_dbg(&dev->dev, "%s\n", dev->nodename);
|
||||||
|
|
||||||
|
if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
|
||||||
|
xenbus_switch_state(dev, XenbusStateClosing);
|
||||||
|
wait_event(module_unload_q,
|
||||||
|
xenbus_read_driver_state(dev->otherend) ==
|
||||||
|
XenbusStateClosing);
|
||||||
|
|
||||||
|
xenbus_switch_state(dev, XenbusStateClosed);
|
||||||
|
wait_event(module_unload_q,
|
||||||
|
xenbus_read_driver_state(dev->otherend) ==
|
||||||
|
XenbusStateClosed ||
|
||||||
|
xenbus_read_driver_state(dev->otherend) ==
|
||||||
|
XenbusStateUnknown);
|
||||||
|
}
|
||||||
|
|
||||||
xennet_disconnect_backend(info);
|
xennet_disconnect_backend(info);
|
||||||
|
|
||||||
unregister_netdev(info->netdev);
|
unregister_netdev(info->netdev);
|
||||||
|
|
|
@ -4470,18 +4470,24 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
|
||||||
* ieee80211_nullfunc_get - retrieve a nullfunc template
|
* ieee80211_nullfunc_get - retrieve a nullfunc template
|
||||||
* @hw: pointer obtained from ieee80211_alloc_hw().
|
* @hw: pointer obtained from ieee80211_alloc_hw().
|
||||||
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
|
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
|
||||||
|
* @qos_ok: QoS NDP is acceptable to the caller, this should be set
|
||||||
|
* if at all possible
|
||||||
*
|
*
|
||||||
* Creates a Nullfunc template which can, for example, uploaded to
|
* Creates a Nullfunc template which can, for example, uploaded to
|
||||||
* hardware. The template must be updated after association so that correct
|
* hardware. The template must be updated after association so that correct
|
||||||
* BSSID and address is used.
|
* BSSID and address is used.
|
||||||
*
|
*
|
||||||
|
* If @qos_ndp is set and the association is to an AP with QoS/WMM, the
|
||||||
|
* returned packet will be QoS NDP.
|
||||||
|
*
|
||||||
* Note: Caller (or hardware) is responsible for setting the
|
* Note: Caller (or hardware) is responsible for setting the
|
||||||
* &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
|
* &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
|
||||||
*
|
*
|
||||||
* Return: The nullfunc template. %NULL on error.
|
* Return: The nullfunc template. %NULL on error.
|
||||||
*/
|
*/
|
||||||
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
|
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
|
||||||
struct ieee80211_vif *vif);
|
struct ieee80211_vif *vif,
|
||||||
|
bool qos_ok);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ieee80211_probereq_get - retrieve a Probe Request template
|
* ieee80211_probereq_get - retrieve a Probe Request template
|
||||||
|
|
|
@ -48,31 +48,32 @@ static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
|
||||||
/* This uses the crypto implementation of crc32c, which is either
|
/* This uses the crypto implementation of crc32c, which is either
|
||||||
* implemented w/ hardware support or resolves to __crc32c_le().
|
* implemented w/ hardware support or resolves to __crc32c_le().
|
||||||
*/
|
*/
|
||||||
return crc32c(sum, buff, len);
|
return (__force __wsum)crc32c((__force __u32)sum, buff, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
|
static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
|
||||||
int offset, int len)
|
int offset, int len)
|
||||||
{
|
{
|
||||||
return __crc32c_le_combine(csum, csum2, len);
|
return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
|
||||||
|
(__force __u32)csum2, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
|
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
|
||||||
unsigned int offset)
|
unsigned int offset)
|
||||||
{
|
{
|
||||||
struct sctphdr *sh = sctp_hdr(skb);
|
struct sctphdr *sh = sctp_hdr(skb);
|
||||||
__le32 ret, old = sh->checksum;
|
|
||||||
const struct skb_checksum_ops ops = {
|
const struct skb_checksum_ops ops = {
|
||||||
.update = sctp_csum_update,
|
.update = sctp_csum_update,
|
||||||
.combine = sctp_csum_combine,
|
.combine = sctp_csum_combine,
|
||||||
};
|
};
|
||||||
|
__le32 old = sh->checksum;
|
||||||
|
__wsum new;
|
||||||
|
|
||||||
sh->checksum = 0;
|
sh->checksum = 0;
|
||||||
ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset,
|
new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0, &ops);
|
||||||
~(__u32)0, &ops));
|
|
||||||
sh->checksum = old;
|
sh->checksum = old;
|
||||||
|
|
||||||
return ret;
|
return cpu_to_le32((__force __u32)new);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __sctp_checksum_h__ */
|
#endif /* __sctp_checksum_h__ */
|
||||||
|
|
|
@ -194,6 +194,11 @@ void sctp_remaddr_proc_exit(struct net *net);
|
||||||
*/
|
*/
|
||||||
int sctp_offload_init(void);
|
int sctp_offload_init(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* sctp/stream_sched.c
|
||||||
|
*/
|
||||||
|
void sctp_sched_ops_init(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sctp/stream.c
|
* sctp/stream.c
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -69,4 +69,9 @@ void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
|
||||||
int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
|
int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
|
||||||
struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
|
struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
|
||||||
|
|
||||||
|
void sctp_sched_ops_register(enum sctp_sched_type sched,
|
||||||
|
struct sctp_sched_ops *sched_ops);
|
||||||
|
void sctp_sched_ops_prio_init(void);
|
||||||
|
void sctp_sched_ops_rr_init(void);
|
||||||
|
|
||||||
#endif /* __sctp_stream_sched_h__ */
|
#endif /* __sctp_stream_sched_h__ */
|
||||||
|
|
|
@ -49,6 +49,7 @@ enum rxrpc_conn_trace {
|
||||||
rxrpc_conn_put_client,
|
rxrpc_conn_put_client,
|
||||||
rxrpc_conn_put_service,
|
rxrpc_conn_put_service,
|
||||||
rxrpc_conn_queued,
|
rxrpc_conn_queued,
|
||||||
|
rxrpc_conn_reap_service,
|
||||||
rxrpc_conn_seen,
|
rxrpc_conn_seen,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -138,10 +139,24 @@ enum rxrpc_rtt_rx_trace {
|
||||||
|
|
||||||
enum rxrpc_timer_trace {
|
enum rxrpc_timer_trace {
|
||||||
rxrpc_timer_begin,
|
rxrpc_timer_begin,
|
||||||
|
rxrpc_timer_exp_ack,
|
||||||
|
rxrpc_timer_exp_hard,
|
||||||
|
rxrpc_timer_exp_idle,
|
||||||
|
rxrpc_timer_exp_keepalive,
|
||||||
|
rxrpc_timer_exp_lost_ack,
|
||||||
|
rxrpc_timer_exp_normal,
|
||||||
|
rxrpc_timer_exp_ping,
|
||||||
|
rxrpc_timer_exp_resend,
|
||||||
rxrpc_timer_expired,
|
rxrpc_timer_expired,
|
||||||
rxrpc_timer_init_for_reply,
|
rxrpc_timer_init_for_reply,
|
||||||
rxrpc_timer_init_for_send_reply,
|
rxrpc_timer_init_for_send_reply,
|
||||||
|
rxrpc_timer_restart,
|
||||||
rxrpc_timer_set_for_ack,
|
rxrpc_timer_set_for_ack,
|
||||||
|
rxrpc_timer_set_for_hard,
|
||||||
|
rxrpc_timer_set_for_idle,
|
||||||
|
rxrpc_timer_set_for_keepalive,
|
||||||
|
rxrpc_timer_set_for_lost_ack,
|
||||||
|
rxrpc_timer_set_for_normal,
|
||||||
rxrpc_timer_set_for_ping,
|
rxrpc_timer_set_for_ping,
|
||||||
rxrpc_timer_set_for_resend,
|
rxrpc_timer_set_for_resend,
|
||||||
rxrpc_timer_set_for_send,
|
rxrpc_timer_set_for_send,
|
||||||
|
@ -150,6 +165,7 @@ enum rxrpc_timer_trace {
|
||||||
enum rxrpc_propose_ack_trace {
|
enum rxrpc_propose_ack_trace {
|
||||||
rxrpc_propose_ack_client_tx_end,
|
rxrpc_propose_ack_client_tx_end,
|
||||||
rxrpc_propose_ack_input_data,
|
rxrpc_propose_ack_input_data,
|
||||||
|
rxrpc_propose_ack_ping_for_keepalive,
|
||||||
rxrpc_propose_ack_ping_for_lost_ack,
|
rxrpc_propose_ack_ping_for_lost_ack,
|
||||||
rxrpc_propose_ack_ping_for_lost_reply,
|
rxrpc_propose_ack_ping_for_lost_reply,
|
||||||
rxrpc_propose_ack_ping_for_params,
|
rxrpc_propose_ack_ping_for_params,
|
||||||
|
@ -206,6 +222,7 @@ enum rxrpc_congest_change {
|
||||||
EM(rxrpc_conn_put_client, "PTc") \
|
EM(rxrpc_conn_put_client, "PTc") \
|
||||||
EM(rxrpc_conn_put_service, "PTs") \
|
EM(rxrpc_conn_put_service, "PTs") \
|
||||||
EM(rxrpc_conn_queued, "QUE") \
|
EM(rxrpc_conn_queued, "QUE") \
|
||||||
|
EM(rxrpc_conn_reap_service, "RPs") \
|
||||||
E_(rxrpc_conn_seen, "SEE")
|
E_(rxrpc_conn_seen, "SEE")
|
||||||
|
|
||||||
#define rxrpc_client_traces \
|
#define rxrpc_client_traces \
|
||||||
|
@ -296,16 +313,31 @@ enum rxrpc_congest_change {
|
||||||
#define rxrpc_timer_traces \
|
#define rxrpc_timer_traces \
|
||||||
EM(rxrpc_timer_begin, "Begin ") \
|
EM(rxrpc_timer_begin, "Begin ") \
|
||||||
EM(rxrpc_timer_expired, "*EXPR*") \
|
EM(rxrpc_timer_expired, "*EXPR*") \
|
||||||
|
EM(rxrpc_timer_exp_ack, "ExpAck") \
|
||||||
|
EM(rxrpc_timer_exp_hard, "ExpHrd") \
|
||||||
|
EM(rxrpc_timer_exp_idle, "ExpIdl") \
|
||||||
|
EM(rxrpc_timer_exp_keepalive, "ExpKA ") \
|
||||||
|
EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \
|
||||||
|
EM(rxrpc_timer_exp_normal, "ExpNml") \
|
||||||
|
EM(rxrpc_timer_exp_ping, "ExpPng") \
|
||||||
|
EM(rxrpc_timer_exp_resend, "ExpRsn") \
|
||||||
EM(rxrpc_timer_init_for_reply, "IniRpl") \
|
EM(rxrpc_timer_init_for_reply, "IniRpl") \
|
||||||
EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
|
EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
|
||||||
|
EM(rxrpc_timer_restart, "Restrt") \
|
||||||
EM(rxrpc_timer_set_for_ack, "SetAck") \
|
EM(rxrpc_timer_set_for_ack, "SetAck") \
|
||||||
|
EM(rxrpc_timer_set_for_hard, "SetHrd") \
|
||||||
|
EM(rxrpc_timer_set_for_idle, "SetIdl") \
|
||||||
|
EM(rxrpc_timer_set_for_keepalive, "KeepAl") \
|
||||||
|
EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \
|
||||||
|
EM(rxrpc_timer_set_for_normal, "SetNml") \
|
||||||
EM(rxrpc_timer_set_for_ping, "SetPng") \
|
EM(rxrpc_timer_set_for_ping, "SetPng") \
|
||||||
EM(rxrpc_timer_set_for_resend, "SetRTx") \
|
EM(rxrpc_timer_set_for_resend, "SetRTx") \
|
||||||
E_(rxrpc_timer_set_for_send, "SetTx ")
|
E_(rxrpc_timer_set_for_send, "SetSnd")
|
||||||
|
|
||||||
#define rxrpc_propose_ack_traces \
|
#define rxrpc_propose_ack_traces \
|
||||||
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
|
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
|
||||||
EM(rxrpc_propose_ack_input_data, "DataIn ") \
|
EM(rxrpc_propose_ack_input_data, "DataIn ") \
|
||||||
|
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
|
||||||
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
|
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
|
||||||
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
|
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
|
||||||
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
|
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
|
||||||
|
@ -932,39 +964,47 @@ TRACE_EVENT(rxrpc_rtt_rx,
|
||||||
|
|
||||||
TRACE_EVENT(rxrpc_timer,
|
TRACE_EVENT(rxrpc_timer,
|
||||||
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
||||||
ktime_t now, unsigned long now_j),
|
unsigned long now),
|
||||||
|
|
||||||
TP_ARGS(call, why, now, now_j),
|
TP_ARGS(call, why, now),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(struct rxrpc_call *, call )
|
__field(struct rxrpc_call *, call )
|
||||||
__field(enum rxrpc_timer_trace, why )
|
__field(enum rxrpc_timer_trace, why )
|
||||||
__field_struct(ktime_t, now )
|
__field(long, now )
|
||||||
__field_struct(ktime_t, expire_at )
|
__field(long, ack_at )
|
||||||
__field_struct(ktime_t, ack_at )
|
__field(long, ack_lost_at )
|
||||||
__field_struct(ktime_t, resend_at )
|
__field(long, resend_at )
|
||||||
__field(unsigned long, now_j )
|
__field(long, ping_at )
|
||||||
__field(unsigned long, timer )
|
__field(long, expect_rx_by )
|
||||||
|
__field(long, expect_req_by )
|
||||||
|
__field(long, expect_term_by )
|
||||||
|
__field(long, timer )
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->call = call;
|
__entry->call = call;
|
||||||
__entry->why = why;
|
__entry->why = why;
|
||||||
__entry->now = now;
|
__entry->now = now;
|
||||||
__entry->expire_at = call->expire_at;
|
__entry->ack_at = call->ack_at;
|
||||||
__entry->ack_at = call->ack_at;
|
__entry->ack_lost_at = call->ack_lost_at;
|
||||||
__entry->resend_at = call->resend_at;
|
__entry->resend_at = call->resend_at;
|
||||||
__entry->now_j = now_j;
|
__entry->expect_rx_by = call->expect_rx_by;
|
||||||
__entry->timer = call->timer.expires;
|
__entry->expect_req_by = call->expect_req_by;
|
||||||
|
__entry->expect_term_by = call->expect_term_by;
|
||||||
|
__entry->timer = call->timer.expires;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
|
TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
|
||||||
__entry->call,
|
__entry->call,
|
||||||
__print_symbolic(__entry->why, rxrpc_timer_traces),
|
__print_symbolic(__entry->why, rxrpc_timer_traces),
|
||||||
ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
|
__entry->ack_at - __entry->now,
|
||||||
ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
|
__entry->ack_lost_at - __entry->now,
|
||||||
ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
|
__entry->resend_at - __entry->now,
|
||||||
__entry->timer - __entry->now_j)
|
__entry->expect_rx_by - __entry->now,
|
||||||
|
__entry->expect_req_by - __entry->now,
|
||||||
|
__entry->expect_term_by - __entry->now,
|
||||||
|
__entry->timer - __entry->now)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(rxrpc_rx_lose,
|
TRACE_EVENT(rxrpc_rx_lose,
|
||||||
|
@ -1080,7 +1120,7 @@ TRACE_EVENT(rxrpc_congest,
|
||||||
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
|
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
|
TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
|
||||||
__entry->call,
|
__entry->call,
|
||||||
__entry->ack_serial,
|
__entry->ack_serial,
|
||||||
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
|
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
|
||||||
|
|
|
@ -59,6 +59,7 @@ enum rxrpc_cmsg_type {
|
||||||
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
|
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
|
||||||
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
|
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
|
||||||
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
|
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
|
||||||
|
RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
|
||||||
RXRPC__SUPPORTED
|
RXRPC__SUPPORTED
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||||
/* AF_VSOCK sock_diag(7) interface for querying open sockets */
|
/* AF_VSOCK sock_diag(7) interface for querying open sockets */
|
||||||
|
|
||||||
#ifndef _UAPI__VM_SOCKETS_DIAG_H__
|
#ifndef _UAPI__VM_SOCKETS_DIAG_H__
|
||||||
|
|
|
@ -51,9 +51,7 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index)
|
||||||
INIT_LIST_HEAD(&dst->list);
|
INIT_LIST_HEAD(&dst->list);
|
||||||
list_add_tail(&dsa_tree_list, &dst->list);
|
list_add_tail(&dsa_tree_list, &dst->list);
|
||||||
|
|
||||||
/* Initialize the reference counter to the number of switches, not 1 */
|
|
||||||
kref_init(&dst->refcount);
|
kref_init(&dst->refcount);
|
||||||
refcount_set(&dst->refcount.refcount, 0);
|
|
||||||
|
|
||||||
return dst;
|
return dst;
|
||||||
}
|
}
|
||||||
|
@ -64,20 +62,23 @@ static void dsa_tree_free(struct dsa_switch_tree *dst)
|
||||||
kfree(dst);
|
kfree(dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
|
||||||
|
{
|
||||||
|
if (dst)
|
||||||
|
kref_get(&dst->refcount);
|
||||||
|
|
||||||
|
return dst;
|
||||||
|
}
|
||||||
|
|
||||||
static struct dsa_switch_tree *dsa_tree_touch(int index)
|
static struct dsa_switch_tree *dsa_tree_touch(int index)
|
||||||
{
|
{
|
||||||
struct dsa_switch_tree *dst;
|
struct dsa_switch_tree *dst;
|
||||||
|
|
||||||
dst = dsa_tree_find(index);
|
dst = dsa_tree_find(index);
|
||||||
if (!dst)
|
if (dst)
|
||||||
dst = dsa_tree_alloc(index);
|
return dsa_tree_get(dst);
|
||||||
|
else
|
||||||
return dst;
|
return dsa_tree_alloc(index);
|
||||||
}
|
|
||||||
|
|
||||||
static void dsa_tree_get(struct dsa_switch_tree *dst)
|
|
||||||
{
|
|
||||||
kref_get(&dst->refcount);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dsa_tree_release(struct kref *ref)
|
static void dsa_tree_release(struct kref *ref)
|
||||||
|
@ -91,7 +92,8 @@ static void dsa_tree_release(struct kref *ref)
|
||||||
|
|
||||||
static void dsa_tree_put(struct dsa_switch_tree *dst)
|
static void dsa_tree_put(struct dsa_switch_tree *dst)
|
||||||
{
|
{
|
||||||
kref_put(&dst->refcount, dsa_tree_release);
|
if (dst)
|
||||||
|
kref_put(&dst->refcount, dsa_tree_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dsa_port_is_dsa(struct dsa_port *port)
|
static bool dsa_port_is_dsa(struct dsa_port *port)
|
||||||
|
@ -765,6 +767,7 @@ int dsa_register_switch(struct dsa_switch *ds)
|
||||||
|
|
||||||
mutex_lock(&dsa2_mutex);
|
mutex_lock(&dsa2_mutex);
|
||||||
err = dsa_switch_probe(ds);
|
err = dsa_switch_probe(ds);
|
||||||
|
dsa_tree_put(ds->dst);
|
||||||
mutex_unlock(&dsa2_mutex);
|
mutex_unlock(&dsa2_mutex);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -292,7 +292,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
|
||||||
|
|
||||||
mutex_lock(&sta->ampdu_mlme.mtx);
|
mutex_lock(&sta->ampdu_mlme.mtx);
|
||||||
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
|
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
|
||||||
___ieee80211_stop_tx_ba_session(sta, i, reason);
|
|
||||||
___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
|
___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
|
||||||
WLAN_REASON_QSTA_LEAVE_QBSS,
|
WLAN_REASON_QSTA_LEAVE_QBSS,
|
||||||
reason != AGG_STOP_DESTROY_STA &&
|
reason != AGG_STOP_DESTROY_STA &&
|
||||||
|
@ -300,6 +299,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
|
||||||
}
|
}
|
||||||
mutex_unlock(&sta->ampdu_mlme.mtx);
|
mutex_unlock(&sta->ampdu_mlme.mtx);
|
||||||
|
|
||||||
|
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
|
||||||
|
___ieee80211_stop_tx_ba_session(sta, i, reason);
|
||||||
|
|
||||||
/* stopping might queue the work again - so cancel only afterwards */
|
/* stopping might queue the work again - so cancel only afterwards */
|
||||||
cancel_work_sync(&sta->ampdu_mlme.work);
|
cancel_work_sync(&sta->ampdu_mlme.work);
|
||||||
|
|
||||||
|
|
|
@ -797,7 +797,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
|
||||||
struct mesh_path *mpath;
|
struct mesh_path *mpath;
|
||||||
u8 ttl, flags, hopcount;
|
u8 ttl, flags, hopcount;
|
||||||
const u8 *orig_addr;
|
const u8 *orig_addr;
|
||||||
u32 orig_sn, metric, metric_txsta, interval;
|
u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
|
||||||
bool root_is_gate;
|
bool root_is_gate;
|
||||||
|
|
||||||
ttl = rann->rann_ttl;
|
ttl = rann->rann_ttl;
|
||||||
|
@ -808,7 +808,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
|
||||||
interval = le32_to_cpu(rann->rann_interval);
|
interval = le32_to_cpu(rann->rann_interval);
|
||||||
hopcount = rann->rann_hopcount;
|
hopcount = rann->rann_hopcount;
|
||||||
hopcount++;
|
hopcount++;
|
||||||
metric = le32_to_cpu(rann->rann_metric);
|
orig_metric = le32_to_cpu(rann->rann_metric);
|
||||||
|
|
||||||
/* Ignore our own RANNs */
|
/* Ignore our own RANNs */
|
||||||
if (ether_addr_equal(orig_addr, sdata->vif.addr))
|
if (ether_addr_equal(orig_addr, sdata->vif.addr))
|
||||||
|
@ -825,7 +825,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
metric_txsta = airtime_link_metric_get(local, sta);
|
last_hop_metric = airtime_link_metric_get(local, sta);
|
||||||
|
new_metric = orig_metric + last_hop_metric;
|
||||||
|
if (new_metric < orig_metric)
|
||||||
|
new_metric = MAX_METRIC;
|
||||||
|
|
||||||
mpath = mesh_path_lookup(sdata, orig_addr);
|
mpath = mesh_path_lookup(sdata, orig_addr);
|
||||||
if (!mpath) {
|
if (!mpath) {
|
||||||
|
@ -838,7 +841,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(SN_LT(mpath->sn, orig_sn)) &&
|
if (!(SN_LT(mpath->sn, orig_sn)) &&
|
||||||
!(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
|
!(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -856,7 +859,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
|
||||||
}
|
}
|
||||||
|
|
||||||
mpath->sn = orig_sn;
|
mpath->sn = orig_sn;
|
||||||
mpath->rann_metric = metric + metric_txsta;
|
mpath->rann_metric = new_metric;
|
||||||
mpath->is_root = true;
|
mpath->is_root = true;
|
||||||
/* Recording RANNs sender address to send individually
|
/* Recording RANNs sender address to send individually
|
||||||
* addressed PREQs destined for root mesh STA */
|
* addressed PREQs destined for root mesh STA */
|
||||||
|
@ -876,7 +879,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
|
||||||
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
|
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
|
||||||
orig_sn, 0, NULL, 0, broadcast_addr,
|
orig_sn, 0, NULL, 0, broadcast_addr,
|
||||||
hopcount, ttl, interval,
|
hopcount, ttl, interval,
|
||||||
metric + metric_txsta, 0, sdata);
|
new_metric, 0, sdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
|
@ -895,7 +895,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
|
||||||
struct ieee80211_hdr_3addr *nullfunc;
|
struct ieee80211_hdr_3addr *nullfunc;
|
||||||
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
|
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
|
||||||
|
|
||||||
skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
|
skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
|
@ -4438,13 +4438,15 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
|
||||||
EXPORT_SYMBOL(ieee80211_pspoll_get);
|
EXPORT_SYMBOL(ieee80211_pspoll_get);
|
||||||
|
|
||||||
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
|
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
|
||||||
struct ieee80211_vif *vif)
|
struct ieee80211_vif *vif,
|
||||||
|
bool qos_ok)
|
||||||
{
|
{
|
||||||
struct ieee80211_hdr_3addr *nullfunc;
|
struct ieee80211_hdr_3addr *nullfunc;
|
||||||
struct ieee80211_sub_if_data *sdata;
|
struct ieee80211_sub_if_data *sdata;
|
||||||
struct ieee80211_if_managed *ifmgd;
|
struct ieee80211_if_managed *ifmgd;
|
||||||
struct ieee80211_local *local;
|
struct ieee80211_local *local;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
bool qos = false;
|
||||||
|
|
||||||
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
|
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -4453,7 +4455,17 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
|
||||||
ifmgd = &sdata->u.mgd;
|
ifmgd = &sdata->u.mgd;
|
||||||
local = sdata->local;
|
local = sdata->local;
|
||||||
|
|
||||||
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
|
if (qos_ok) {
|
||||||
|
struct sta_info *sta;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
sta = sta_info_get(sdata, ifmgd->bssid);
|
||||||
|
qos = sta && sta->sta.wme;
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
|
||||||
|
sizeof(*nullfunc) + 2);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -4463,6 +4475,19 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
|
||||||
nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
|
nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
|
||||||
IEEE80211_STYPE_NULLFUNC |
|
IEEE80211_STYPE_NULLFUNC |
|
||||||
IEEE80211_FCTL_TODS);
|
IEEE80211_FCTL_TODS);
|
||||||
|
if (qos) {
|
||||||
|
__le16 qos = cpu_to_le16(7);
|
||||||
|
|
||||||
|
BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
|
||||||
|
IEEE80211_STYPE_NULLFUNC) !=
|
||||||
|
IEEE80211_STYPE_QOS_NULLFUNC);
|
||||||
|
nullfunc->frame_control |=
|
||||||
|
cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
|
||||||
|
skb->priority = 7;
|
||||||
|
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
|
||||||
|
skb_put_data(skb, &qos, sizeof(qos));
|
||||||
|
}
|
||||||
|
|
||||||
memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
|
memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
|
||||||
memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
|
memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
|
||||||
memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
|
memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
|
||||||
|
|
|
@ -308,7 +308,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
|
||||||
const struct dp_upcall_info *upcall_info,
|
const struct dp_upcall_info *upcall_info,
|
||||||
uint32_t cutlen)
|
uint32_t cutlen)
|
||||||
{
|
{
|
||||||
unsigned short gso_type = skb_shinfo(skb)->gso_type;
|
unsigned int gso_type = skb_shinfo(skb)->gso_type;
|
||||||
struct sw_flow_key later_key;
|
struct sw_flow_key later_key;
|
||||||
struct sk_buff *segs, *nskb;
|
struct sk_buff *segs, *nskb;
|
||||||
int err;
|
int err;
|
||||||
|
|
|
@ -2241,14 +2241,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
|
||||||
|
|
||||||
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
|
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
|
||||||
|
|
||||||
static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
|
static struct sw_flow_actions *nla_alloc_flow_actions(int size)
|
||||||
{
|
{
|
||||||
struct sw_flow_actions *sfa;
|
struct sw_flow_actions *sfa;
|
||||||
|
|
||||||
if (size > MAX_ACTIONS_BUFSIZE) {
|
WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
|
||||||
OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
|
sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
|
||||||
if (!sfa)
|
if (!sfa)
|
||||||
|
@ -2321,12 +2318,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
|
||||||
new_acts_size = ksize(*sfa) * 2;
|
new_acts_size = ksize(*sfa) * 2;
|
||||||
|
|
||||||
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
|
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
|
||||||
if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
|
if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
|
||||||
|
OVS_NLERR(log, "Flow action size exceeds max %u",
|
||||||
|
MAX_ACTIONS_BUFSIZE);
|
||||||
return ERR_PTR(-EMSGSIZE);
|
return ERR_PTR(-EMSGSIZE);
|
||||||
|
}
|
||||||
new_acts_size = MAX_ACTIONS_BUFSIZE;
|
new_acts_size = MAX_ACTIONS_BUFSIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
acts = nla_alloc_flow_actions(new_acts_size, log);
|
acts = nla_alloc_flow_actions(new_acts_size);
|
||||||
if (IS_ERR(acts))
|
if (IS_ERR(acts))
|
||||||
return (void *)acts;
|
return (void *)acts;
|
||||||
|
|
||||||
|
@ -3059,7 +3059,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
*sfa = nla_alloc_flow_actions(nla_len(attr), log);
|
*sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
|
||||||
if (IS_ERR(*sfa))
|
if (IS_ERR(*sfa))
|
||||||
return PTR_ERR(*sfa);
|
return PTR_ERR(*sfa);
|
||||||
|
|
||||||
|
|
|
@ -1687,7 +1687,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
||||||
atomic_long_set(&rollover->num, 0);
|
atomic_long_set(&rollover->num, 0);
|
||||||
atomic_long_set(&rollover->num_huge, 0);
|
atomic_long_set(&rollover->num_huge, 0);
|
||||||
atomic_long_set(&rollover->num_failed, 0);
|
atomic_long_set(&rollover->num_failed, 0);
|
||||||
po->rollover = rollover;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
|
if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
|
||||||
|
@ -1745,6 +1744,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
||||||
if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
|
if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
|
||||||
__dev_remove_pack(&po->prot_hook);
|
__dev_remove_pack(&po->prot_hook);
|
||||||
po->fanout = match;
|
po->fanout = match;
|
||||||
|
po->rollover = rollover;
|
||||||
|
rollover = NULL;
|
||||||
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
|
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
|
||||||
__fanout_link(sk, po);
|
__fanout_link(sk, po);
|
||||||
err = 0;
|
err = 0;
|
||||||
|
@ -1758,10 +1759,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (err && rollover) {
|
kfree(rollover);
|
||||||
kfree_rcu(rollover, rcu);
|
|
||||||
po->rollover = NULL;
|
|
||||||
}
|
|
||||||
mutex_unlock(&fanout_mutex);
|
mutex_unlock(&fanout_mutex);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1785,11 +1783,6 @@ static struct packet_fanout *fanout_release(struct sock *sk)
|
||||||
list_del(&f->list);
|
list_del(&f->list);
|
||||||
else
|
else
|
||||||
f = NULL;
|
f = NULL;
|
||||||
|
|
||||||
if (po->rollover) {
|
|
||||||
kfree_rcu(po->rollover, rcu);
|
|
||||||
po->rollover = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&fanout_mutex);
|
mutex_unlock(&fanout_mutex);
|
||||||
|
|
||||||
|
@ -3029,6 +3022,7 @@ static int packet_release(struct socket *sock)
|
||||||
synchronize_net();
|
synchronize_net();
|
||||||
|
|
||||||
if (f) {
|
if (f) {
|
||||||
|
kfree(po->rollover);
|
||||||
fanout_release_data(f);
|
fanout_release_data(f);
|
||||||
kfree(f);
|
kfree(f);
|
||||||
}
|
}
|
||||||
|
@ -3097,6 +3091,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
|
||||||
if (need_rehook) {
|
if (need_rehook) {
|
||||||
if (po->running) {
|
if (po->running) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
/* prevents packet_notifier() from calling
|
||||||
|
* register_prot_hook()
|
||||||
|
*/
|
||||||
|
po->num = 0;
|
||||||
__unregister_prot_hook(sk, true);
|
__unregister_prot_hook(sk, true);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
dev_curr = po->prot_hook.dev;
|
dev_curr = po->prot_hook.dev;
|
||||||
|
@ -3105,6 +3103,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
|
||||||
dev->ifindex);
|
dev->ifindex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BUG_ON(po->running);
|
||||||
po->num = proto;
|
po->num = proto;
|
||||||
po->prot_hook.type = proto;
|
po->prot_hook.type = proto;
|
||||||
|
|
||||||
|
@ -3843,7 +3842,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
|
||||||
void *data = &val;
|
void *data = &val;
|
||||||
union tpacket_stats_u st;
|
union tpacket_stats_u st;
|
||||||
struct tpacket_rollover_stats rstats;
|
struct tpacket_rollover_stats rstats;
|
||||||
struct packet_rollover *rollover;
|
|
||||||
|
|
||||||
if (level != SOL_PACKET)
|
if (level != SOL_PACKET)
|
||||||
return -ENOPROTOOPT;
|
return -ENOPROTOOPT;
|
||||||
|
@ -3922,18 +3920,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
|
||||||
0);
|
0);
|
||||||
break;
|
break;
|
||||||
case PACKET_ROLLOVER_STATS:
|
case PACKET_ROLLOVER_STATS:
|
||||||
rcu_read_lock();
|
if (!po->rollover)
|
||||||
rollover = rcu_dereference(po->rollover);
|
|
||||||
if (rollover) {
|
|
||||||
rstats.tp_all = atomic_long_read(&rollover->num);
|
|
||||||
rstats.tp_huge = atomic_long_read(&rollover->num_huge);
|
|
||||||
rstats.tp_failed = atomic_long_read(&rollover->num_failed);
|
|
||||||
data = &rstats;
|
|
||||||
lv = sizeof(rstats);
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
if (!rollover)
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
rstats.tp_all = atomic_long_read(&po->rollover->num);
|
||||||
|
rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
|
||||||
|
rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
|
||||||
|
data = &rstats;
|
||||||
|
lv = sizeof(rstats);
|
||||||
break;
|
break;
|
||||||
case PACKET_TX_HAS_OFF:
|
case PACKET_TX_HAS_OFF:
|
||||||
val = po->tp_tx_has_off;
|
val = po->tp_tx_has_off;
|
||||||
|
|
|
@ -95,7 +95,6 @@ struct packet_fanout {
|
||||||
|
|
||||||
struct packet_rollover {
|
struct packet_rollover {
|
||||||
int sock;
|
int sock;
|
||||||
struct rcu_head rcu;
|
|
||||||
atomic_long_t num;
|
atomic_long_t num;
|
||||||
atomic_long_t num_huge;
|
atomic_long_t num_huge;
|
||||||
atomic_long_t num_failed;
|
atomic_long_t num_failed;
|
||||||
|
|
|
@ -285,6 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
|
||||||
bool upgrade)
|
bool upgrade)
|
||||||
{
|
{
|
||||||
struct rxrpc_conn_parameters cp;
|
struct rxrpc_conn_parameters cp;
|
||||||
|
struct rxrpc_call_params p;
|
||||||
struct rxrpc_call *call;
|
struct rxrpc_call *call;
|
||||||
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -302,6 +303,10 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
|
||||||
if (key && !key->payload.data[0])
|
if (key && !key->payload.data[0])
|
||||||
key = NULL; /* a no-security key */
|
key = NULL; /* a no-security key */
|
||||||
|
|
||||||
|
memset(&p, 0, sizeof(p));
|
||||||
|
p.user_call_ID = user_call_ID;
|
||||||
|
p.tx_total_len = tx_total_len;
|
||||||
|
|
||||||
memset(&cp, 0, sizeof(cp));
|
memset(&cp, 0, sizeof(cp));
|
||||||
cp.local = rx->local;
|
cp.local = rx->local;
|
||||||
cp.key = key;
|
cp.key = key;
|
||||||
|
@ -309,8 +314,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
|
||||||
cp.exclusive = false;
|
cp.exclusive = false;
|
||||||
cp.upgrade = upgrade;
|
cp.upgrade = upgrade;
|
||||||
cp.service_id = srx->srx_service;
|
cp.service_id = srx->srx_service;
|
||||||
call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
|
call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp);
|
||||||
gfp);
|
|
||||||
/* The socket has been unlocked. */
|
/* The socket has been unlocked. */
|
||||||
if (!IS_ERR(call)) {
|
if (!IS_ERR(call)) {
|
||||||
call->notify_rx = notify_rx;
|
call->notify_rx = notify_rx;
|
||||||
|
@ -863,6 +867,19 @@ static int rxrpc_release_sock(struct sock *sk)
|
||||||
sock_orphan(sk);
|
sock_orphan(sk);
|
||||||
sk->sk_shutdown = SHUTDOWN_MASK;
|
sk->sk_shutdown = SHUTDOWN_MASK;
|
||||||
|
|
||||||
|
/* We want to kill off all connections from a service socket
|
||||||
|
* as fast as possible because we can't share these; client
|
||||||
|
* sockets, on the other hand, can share an endpoint.
|
||||||
|
*/
|
||||||
|
switch (sk->sk_state) {
|
||||||
|
case RXRPC_SERVER_BOUND:
|
||||||
|
case RXRPC_SERVER_BOUND2:
|
||||||
|
case RXRPC_SERVER_LISTENING:
|
||||||
|
case RXRPC_SERVER_LISTEN_DISABLED:
|
||||||
|
rx->local->service_closed = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||||
sk->sk_state = RXRPC_CLOSE;
|
sk->sk_state = RXRPC_CLOSE;
|
||||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||||
|
@ -878,6 +895,8 @@ static int rxrpc_release_sock(struct sock *sk)
|
||||||
rxrpc_release_calls_on_socket(rx);
|
rxrpc_release_calls_on_socket(rx);
|
||||||
flush_workqueue(rxrpc_workqueue);
|
flush_workqueue(rxrpc_workqueue);
|
||||||
rxrpc_purge_queue(&sk->sk_receive_queue);
|
rxrpc_purge_queue(&sk->sk_receive_queue);
|
||||||
|
rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper);
|
||||||
|
rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper);
|
||||||
|
|
||||||
rxrpc_put_local(rx->local);
|
rxrpc_put_local(rx->local);
|
||||||
rx->local = NULL;
|
rx->local = NULL;
|
||||||
|
|
|
@ -79,17 +79,20 @@ struct rxrpc_net {
|
||||||
struct list_head conn_proc_list; /* List of conns in this namespace for proc */
|
struct list_head conn_proc_list; /* List of conns in this namespace for proc */
|
||||||
struct list_head service_conns; /* Service conns in this namespace */
|
struct list_head service_conns; /* Service conns in this namespace */
|
||||||
rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
|
rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
|
||||||
struct delayed_work service_conn_reaper;
|
struct work_struct service_conn_reaper;
|
||||||
|
struct timer_list service_conn_reap_timer;
|
||||||
|
|
||||||
unsigned int nr_client_conns;
|
unsigned int nr_client_conns;
|
||||||
unsigned int nr_active_client_conns;
|
unsigned int nr_active_client_conns;
|
||||||
bool kill_all_client_conns;
|
bool kill_all_client_conns;
|
||||||
|
bool live;
|
||||||
spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
|
spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
|
||||||
spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
|
spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
|
||||||
struct list_head waiting_client_conns;
|
struct list_head waiting_client_conns;
|
||||||
struct list_head active_client_conns;
|
struct list_head active_client_conns;
|
||||||
struct list_head idle_client_conns;
|
struct list_head idle_client_conns;
|
||||||
struct delayed_work client_conn_reaper;
|
struct work_struct client_conn_reaper;
|
||||||
|
struct timer_list client_conn_reap_timer;
|
||||||
|
|
||||||
struct list_head local_endpoints;
|
struct list_head local_endpoints;
|
||||||
struct mutex local_mutex; /* Lock for ->local_endpoints */
|
struct mutex local_mutex; /* Lock for ->local_endpoints */
|
||||||
|
@ -265,6 +268,7 @@ struct rxrpc_local {
|
||||||
rwlock_t services_lock; /* lock for services list */
|
rwlock_t services_lock; /* lock for services list */
|
||||||
int debug_id; /* debug ID for printks */
|
int debug_id; /* debug ID for printks */
|
||||||
bool dead;
|
bool dead;
|
||||||
|
bool service_closed; /* Service socket closed */
|
||||||
struct sockaddr_rxrpc srx; /* local address */
|
struct sockaddr_rxrpc srx; /* local address */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -338,8 +342,17 @@ enum rxrpc_conn_flag {
|
||||||
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
|
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
|
||||||
RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
|
RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
|
||||||
RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
|
RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
|
||||||
|
RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
|
||||||
|
RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
|
||||||
|
RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */
|
||||||
|
RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
|
||||||
|
(1UL << RXRPC_CONN_FINAL_ACK_1) | \
|
||||||
|
(1UL << RXRPC_CONN_FINAL_ACK_2) | \
|
||||||
|
(1UL << RXRPC_CONN_FINAL_ACK_3))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Events that can be raised upon a connection.
|
* Events that can be raised upon a connection.
|
||||||
*/
|
*/
|
||||||
|
@ -393,6 +406,7 @@ struct rxrpc_connection {
|
||||||
#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
|
#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
|
||||||
struct list_head waiting_calls; /* Calls waiting for channels */
|
struct list_head waiting_calls; /* Calls waiting for channels */
|
||||||
struct rxrpc_channel {
|
struct rxrpc_channel {
|
||||||
|
unsigned long final_ack_at; /* Time at which to issue final ACK */
|
||||||
struct rxrpc_call __rcu *call; /* Active call */
|
struct rxrpc_call __rcu *call; /* Active call */
|
||||||
u32 call_id; /* ID of current call */
|
u32 call_id; /* ID of current call */
|
||||||
u32 call_counter; /* Call ID counter */
|
u32 call_counter; /* Call ID counter */
|
||||||
|
@ -404,6 +418,7 @@ struct rxrpc_connection {
|
||||||
};
|
};
|
||||||
} channels[RXRPC_MAXCALLS];
|
} channels[RXRPC_MAXCALLS];
|
||||||
|
|
||||||
|
struct timer_list timer; /* Conn event timer */
|
||||||
struct work_struct processor; /* connection event processor */
|
struct work_struct processor; /* connection event processor */
|
||||||
union {
|
union {
|
||||||
struct rb_node client_node; /* Node in local->client_conns */
|
struct rb_node client_node; /* Node in local->client_conns */
|
||||||
|
@ -457,9 +472,10 @@ enum rxrpc_call_flag {
|
||||||
enum rxrpc_call_event {
|
enum rxrpc_call_event {
|
||||||
RXRPC_CALL_EV_ACK, /* need to generate ACK */
|
RXRPC_CALL_EV_ACK, /* need to generate ACK */
|
||||||
RXRPC_CALL_EV_ABORT, /* need to generate abort */
|
RXRPC_CALL_EV_ABORT, /* need to generate abort */
|
||||||
RXRPC_CALL_EV_TIMER, /* Timer expired */
|
|
||||||
RXRPC_CALL_EV_RESEND, /* Tx resend required */
|
RXRPC_CALL_EV_RESEND, /* Tx resend required */
|
||||||
RXRPC_CALL_EV_PING, /* Ping send required */
|
RXRPC_CALL_EV_PING, /* Ping send required */
|
||||||
|
RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
|
||||||
|
RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -503,10 +519,16 @@ struct rxrpc_call {
|
||||||
struct rxrpc_peer *peer; /* Peer record for remote address */
|
struct rxrpc_peer *peer; /* Peer record for remote address */
|
||||||
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
||||||
struct mutex user_mutex; /* User access mutex */
|
struct mutex user_mutex; /* User access mutex */
|
||||||
ktime_t ack_at; /* When deferred ACK needs to happen */
|
unsigned long ack_at; /* When deferred ACK needs to happen */
|
||||||
ktime_t resend_at; /* When next resend needs to happen */
|
unsigned long ack_lost_at; /* When ACK is figured as lost */
|
||||||
ktime_t ping_at; /* When next to send a ping */
|
unsigned long resend_at; /* When next resend needs to happen */
|
||||||
ktime_t expire_at; /* When the call times out */
|
unsigned long ping_at; /* When next to send a ping */
|
||||||
|
unsigned long keepalive_at; /* When next to send a keepalive ping */
|
||||||
|
unsigned long expect_rx_by; /* When we expect to get a packet by */
|
||||||
|
unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
|
||||||
|
unsigned long expect_term_by; /* When we expect call termination by */
|
||||||
|
u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
|
||||||
|
u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
|
||||||
struct timer_list timer; /* Combined event timer */
|
struct timer_list timer; /* Combined event timer */
|
||||||
struct work_struct processor; /* Event processor */
|
struct work_struct processor; /* Event processor */
|
||||||
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
|
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
|
||||||
|
@ -609,6 +631,8 @@ struct rxrpc_call {
|
||||||
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
|
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
|
||||||
rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
|
rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
|
||||||
rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
|
rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
|
||||||
|
rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
|
||||||
|
rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -632,6 +656,35 @@ struct rxrpc_ack_summary {
|
||||||
u8 cumulative_acks;
|
u8 cumulative_acks;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* sendmsg() cmsg-specified parameters.
|
||||||
|
*/
|
||||||
|
enum rxrpc_command {
|
||||||
|
RXRPC_CMD_SEND_DATA, /* send data message */
|
||||||
|
RXRPC_CMD_SEND_ABORT, /* request abort generation */
|
||||||
|
RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
|
||||||
|
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct rxrpc_call_params {
|
||||||
|
s64 tx_total_len; /* Total Tx data length (if send data) */
|
||||||
|
unsigned long user_call_ID; /* User's call ID */
|
||||||
|
struct {
|
||||||
|
u32 hard; /* Maximum lifetime (sec) */
|
||||||
|
u32 idle; /* Max time since last data packet (msec) */
|
||||||
|
u32 normal; /* Max time since last call packet (msec) */
|
||||||
|
} timeouts;
|
||||||
|
u8 nr_timeouts; /* Number of timeouts specified */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct rxrpc_send_params {
|
||||||
|
struct rxrpc_call_params call;
|
||||||
|
u32 abort_code; /* Abort code to Tx (if abort) */
|
||||||
|
enum rxrpc_command command : 8; /* The command to implement */
|
||||||
|
bool exclusive; /* Shared or exclusive call */
|
||||||
|
bool upgrade; /* If the connection is upgradeable */
|
||||||
|
};
|
||||||
|
|
||||||
#include <trace/events/rxrpc.h>
|
#include <trace/events/rxrpc.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -657,12 +710,19 @@ int rxrpc_reject_call(struct rxrpc_sock *);
|
||||||
/*
|
/*
|
||||||
* call_event.c
|
* call_event.c
|
||||||
*/
|
*/
|
||||||
void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
|
|
||||||
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
|
|
||||||
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
|
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
|
||||||
enum rxrpc_propose_ack_trace);
|
enum rxrpc_propose_ack_trace);
|
||||||
void rxrpc_process_call(struct work_struct *);
|
void rxrpc_process_call(struct work_struct *);
|
||||||
|
|
||||||
|
static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
|
||||||
|
unsigned long expire_at,
|
||||||
|
unsigned long now,
|
||||||
|
enum rxrpc_timer_trace why)
|
||||||
|
{
|
||||||
|
trace_rxrpc_timer(call, why, now);
|
||||||
|
timer_reduce(&call->timer, expire_at);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* call_object.c
|
* call_object.c
|
||||||
*/
|
*/
|
||||||
|
@ -672,11 +732,11 @@ extern unsigned int rxrpc_max_call_lifetime;
|
||||||
extern struct kmem_cache *rxrpc_call_jar;
|
extern struct kmem_cache *rxrpc_call_jar;
|
||||||
|
|
||||||
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
|
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
|
||||||
struct rxrpc_call *rxrpc_alloc_call(gfp_t);
|
struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
|
||||||
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
|
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
|
||||||
struct rxrpc_conn_parameters *,
|
struct rxrpc_conn_parameters *,
|
||||||
struct sockaddr_rxrpc *,
|
struct sockaddr_rxrpc *,
|
||||||
unsigned long, s64, gfp_t);
|
struct rxrpc_call_params *, gfp_t);
|
||||||
int rxrpc_retry_client_call(struct rxrpc_sock *,
|
int rxrpc_retry_client_call(struct rxrpc_sock *,
|
||||||
struct rxrpc_call *,
|
struct rxrpc_call *,
|
||||||
struct rxrpc_conn_parameters *,
|
struct rxrpc_conn_parameters *,
|
||||||
|
@ -803,8 +863,8 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
|
||||||
*/
|
*/
|
||||||
extern unsigned int rxrpc_max_client_connections;
|
extern unsigned int rxrpc_max_client_connections;
|
||||||
extern unsigned int rxrpc_reap_client_connections;
|
extern unsigned int rxrpc_reap_client_connections;
|
||||||
extern unsigned int rxrpc_conn_idle_client_expiry;
|
extern unsigned long rxrpc_conn_idle_client_expiry;
|
||||||
extern unsigned int rxrpc_conn_idle_client_fast_expiry;
|
extern unsigned long rxrpc_conn_idle_client_fast_expiry;
|
||||||
extern struct idr rxrpc_client_conn_ids;
|
extern struct idr rxrpc_client_conn_ids;
|
||||||
|
|
||||||
void rxrpc_destroy_client_conn_ids(void);
|
void rxrpc_destroy_client_conn_ids(void);
|
||||||
|
@ -825,6 +885,7 @@ void rxrpc_process_connection(struct work_struct *);
|
||||||
* conn_object.c
|
* conn_object.c
|
||||||
*/
|
*/
|
||||||
extern unsigned int rxrpc_connection_expiry;
|
extern unsigned int rxrpc_connection_expiry;
|
||||||
|
extern unsigned int rxrpc_closed_conn_expiry;
|
||||||
|
|
||||||
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
|
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
|
||||||
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
|
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
|
||||||
|
@ -861,6 +922,12 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
|
||||||
rxrpc_put_service_conn(conn);
|
rxrpc_put_service_conn(conn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
|
||||||
|
unsigned long expire_at)
|
||||||
|
{
|
||||||
|
timer_reduce(&conn->timer, expire_at);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* conn_service.c
|
* conn_service.c
|
||||||
*/
|
*/
|
||||||
|
@ -930,13 +997,13 @@ static inline void rxrpc_queue_local(struct rxrpc_local *local)
|
||||||
* misc.c
|
* misc.c
|
||||||
*/
|
*/
|
||||||
extern unsigned int rxrpc_max_backlog __read_mostly;
|
extern unsigned int rxrpc_max_backlog __read_mostly;
|
||||||
extern unsigned int rxrpc_requested_ack_delay;
|
extern unsigned long rxrpc_requested_ack_delay;
|
||||||
extern unsigned int rxrpc_soft_ack_delay;
|
extern unsigned long rxrpc_soft_ack_delay;
|
||||||
extern unsigned int rxrpc_idle_ack_delay;
|
extern unsigned long rxrpc_idle_ack_delay;
|
||||||
extern unsigned int rxrpc_rx_window_size;
|
extern unsigned int rxrpc_rx_window_size;
|
||||||
extern unsigned int rxrpc_rx_mtu;
|
extern unsigned int rxrpc_rx_mtu;
|
||||||
extern unsigned int rxrpc_rx_jumbo_max;
|
extern unsigned int rxrpc_rx_jumbo_max;
|
||||||
extern unsigned int rxrpc_resend_timeout;
|
extern unsigned long rxrpc_resend_timeout;
|
||||||
|
|
||||||
extern const s8 rxrpc_ack_priority[];
|
extern const s8 rxrpc_ack_priority[];
|
||||||
|
|
||||||
|
@ -954,7 +1021,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
|
||||||
/*
|
/*
|
||||||
* output.c
|
* output.c
|
||||||
*/
|
*/
|
||||||
int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
|
int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
|
||||||
int rxrpc_send_abort_packet(struct rxrpc_call *);
|
int rxrpc_send_abort_packet(struct rxrpc_call *);
|
||||||
int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
|
int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
|
||||||
void rxrpc_reject_packets(struct rxrpc_local *);
|
void rxrpc_reject_packets(struct rxrpc_local *);
|
||||||
|
|
|
@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
||||||
/* Now it gets complicated, because calls get registered with the
|
/* Now it gets complicated, because calls get registered with the
|
||||||
* socket here, particularly if a user ID is preassigned by the user.
|
* socket here, particularly if a user ID is preassigned by the user.
|
||||||
*/
|
*/
|
||||||
call = rxrpc_alloc_call(gfp);
|
call = rxrpc_alloc_call(rx, gfp);
|
||||||
if (!call)
|
if (!call)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
|
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
|
||||||
|
|
|
@ -21,80 +21,6 @@
|
||||||
#include <net/af_rxrpc.h>
|
#include <net/af_rxrpc.h>
|
||||||
#include "ar-internal.h"
|
#include "ar-internal.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the timer
|
|
||||||
*/
|
|
||||||
void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
|
||||||
ktime_t now)
|
|
||||||
{
|
|
||||||
unsigned long t_j, now_j = jiffies;
|
|
||||||
ktime_t t;
|
|
||||||
bool queue = false;
|
|
||||||
|
|
||||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
|
||||||
t = call->expire_at;
|
|
||||||
if (!ktime_after(t, now)) {
|
|
||||||
trace_rxrpc_timer(call, why, now, now_j);
|
|
||||||
queue = true;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ktime_after(call->resend_at, now)) {
|
|
||||||
call->resend_at = call->expire_at;
|
|
||||||
if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
|
|
||||||
queue = true;
|
|
||||||
} else if (ktime_before(call->resend_at, t)) {
|
|
||||||
t = call->resend_at;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ktime_after(call->ack_at, now)) {
|
|
||||||
call->ack_at = call->expire_at;
|
|
||||||
if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
|
|
||||||
queue = true;
|
|
||||||
} else if (ktime_before(call->ack_at, t)) {
|
|
||||||
t = call->ack_at;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ktime_after(call->ping_at, now)) {
|
|
||||||
call->ping_at = call->expire_at;
|
|
||||||
if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
|
|
||||||
queue = true;
|
|
||||||
} else if (ktime_before(call->ping_at, t)) {
|
|
||||||
t = call->ping_at;
|
|
||||||
}
|
|
||||||
|
|
||||||
t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
|
|
||||||
t_j += jiffies;
|
|
||||||
|
|
||||||
/* We have to make sure that the calculated jiffies value falls
|
|
||||||
* at or after the nsec value, or we may loop ceaselessly
|
|
||||||
* because the timer times out, but we haven't reached the nsec
|
|
||||||
* timeout yet.
|
|
||||||
*/
|
|
||||||
t_j++;
|
|
||||||
|
|
||||||
if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
|
|
||||||
mod_timer(&call->timer, t_j);
|
|
||||||
trace_rxrpc_timer(call, why, now, now_j);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (queue)
|
|
||||||
rxrpc_queue_call(call);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the timer
|
|
||||||
*/
|
|
||||||
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
|
||||||
ktime_t now)
|
|
||||||
{
|
|
||||||
read_lock_bh(&call->state_lock);
|
|
||||||
__rxrpc_set_timer(call, why, now);
|
|
||||||
read_unlock_bh(&call->state_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Propose a PING ACK be sent.
|
* Propose a PING ACK be sent.
|
||||||
*/
|
*/
|
||||||
|
@ -106,12 +32,13 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
|
||||||
!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
|
!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
|
||||||
rxrpc_queue_call(call);
|
rxrpc_queue_call(call);
|
||||||
} else {
|
} else {
|
||||||
ktime_t now = ktime_get_real();
|
unsigned long now = jiffies;
|
||||||
ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);
|
unsigned long ping_at = now + rxrpc_idle_ack_delay;
|
||||||
|
|
||||||
if (ktime_before(ping_at, call->ping_at)) {
|
if (time_before(ping_at, call->ping_at)) {
|
||||||
call->ping_at = ping_at;
|
WRITE_ONCE(call->ping_at, ping_at);
|
||||||
rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
|
rxrpc_reduce_call_timer(call, ping_at, now,
|
||||||
|
rxrpc_timer_set_for_ping);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -125,8 +52,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
||||||
enum rxrpc_propose_ack_trace why)
|
enum rxrpc_propose_ack_trace why)
|
||||||
{
|
{
|
||||||
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
|
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
|
||||||
unsigned int expiry = rxrpc_soft_ack_delay;
|
unsigned long expiry = rxrpc_soft_ack_delay;
|
||||||
ktime_t now, ack_at;
|
|
||||||
s8 prior = rxrpc_ack_priority[ack_reason];
|
s8 prior = rxrpc_ack_priority[ack_reason];
|
||||||
|
|
||||||
/* Pings are handled specially because we don't want to accidentally
|
/* Pings are handled specially because we don't want to accidentally
|
||||||
|
@ -190,11 +116,18 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
||||||
background)
|
background)
|
||||||
rxrpc_queue_call(call);
|
rxrpc_queue_call(call);
|
||||||
} else {
|
} else {
|
||||||
now = ktime_get_real();
|
unsigned long now = jiffies, ack_at;
|
||||||
ack_at = ktime_add_ms(now, expiry);
|
|
||||||
if (ktime_before(ack_at, call->ack_at)) {
|
if (call->peer->rtt_usage > 0)
|
||||||
call->ack_at = ack_at;
|
ack_at = nsecs_to_jiffies(call->peer->rtt);
|
||||||
rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
|
else
|
||||||
|
ack_at = expiry;
|
||||||
|
|
||||||
|
ack_at = jiffies + expiry;
|
||||||
|
if (time_before(ack_at, call->ack_at)) {
|
||||||
|
WRITE_ONCE(call->ack_at, ack_at);
|
||||||
|
rxrpc_reduce_call_timer(call, ack_at, now,
|
||||||
|
rxrpc_timer_set_for_ack);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,18 +160,28 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
|
||||||
/*
|
/*
|
||||||
* Perform retransmission of NAK'd and unack'd packets.
|
* Perform retransmission of NAK'd and unack'd packets.
|
||||||
*/
|
*/
|
||||||
static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
|
static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
|
||||||
{
|
{
|
||||||
struct rxrpc_skb_priv *sp;
|
struct rxrpc_skb_priv *sp;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
unsigned long resend_at;
|
||||||
rxrpc_seq_t cursor, seq, top;
|
rxrpc_seq_t cursor, seq, top;
|
||||||
ktime_t max_age, oldest, ack_ts;
|
ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo;
|
||||||
int ix;
|
int ix;
|
||||||
u8 annotation, anno_type, retrans = 0, unacked = 0;
|
u8 annotation, anno_type, retrans = 0, unacked = 0;
|
||||||
|
|
||||||
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
|
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
|
||||||
|
|
||||||
max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
|
if (call->peer->rtt_usage > 1)
|
||||||
|
timeout = ns_to_ktime(call->peer->rtt * 3 / 2);
|
||||||
|
else
|
||||||
|
timeout = ms_to_ktime(rxrpc_resend_timeout);
|
||||||
|
min_timeo = ns_to_ktime((1000000000 / HZ) * 4);
|
||||||
|
if (ktime_before(timeout, min_timeo))
|
||||||
|
timeout = min_timeo;
|
||||||
|
|
||||||
|
now = ktime_get_real();
|
||||||
|
max_age = ktime_sub(now, timeout);
|
||||||
|
|
||||||
spin_lock_bh(&call->lock);
|
spin_lock_bh(&call->lock);
|
||||||
|
|
||||||
|
@ -282,7 +225,9 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
|
||||||
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
|
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
|
||||||
}
|
}
|
||||||
|
|
||||||
call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
|
resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
|
||||||
|
resend_at += jiffies + rxrpc_resend_timeout;
|
||||||
|
WRITE_ONCE(call->resend_at, resend_at);
|
||||||
|
|
||||||
if (unacked)
|
if (unacked)
|
||||||
rxrpc_congestion_timeout(call);
|
rxrpc_congestion_timeout(call);
|
||||||
|
@ -292,14 +237,15 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
|
||||||
* retransmitting data.
|
* retransmitting data.
|
||||||
*/
|
*/
|
||||||
if (!retrans) {
|
if (!retrans) {
|
||||||
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
|
rxrpc_reduce_call_timer(call, resend_at, now,
|
||||||
|
rxrpc_timer_set_for_resend);
|
||||||
spin_unlock_bh(&call->lock);
|
spin_unlock_bh(&call->lock);
|
||||||
ack_ts = ktime_sub(now, call->acks_latest_ts);
|
ack_ts = ktime_sub(now, call->acks_latest_ts);
|
||||||
if (ktime_to_ns(ack_ts) < call->peer->rtt)
|
if (ktime_to_ns(ack_ts) < call->peer->rtt)
|
||||||
goto out;
|
goto out;
|
||||||
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
|
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
|
||||||
rxrpc_propose_ack_ping_for_lost_ack);
|
rxrpc_propose_ack_ping_for_lost_ack);
|
||||||
rxrpc_send_ack_packet(call, true);
|
rxrpc_send_ack_packet(call, true, NULL);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -364,7 +310,8 @@ void rxrpc_process_call(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct rxrpc_call *call =
|
struct rxrpc_call *call =
|
||||||
container_of(work, struct rxrpc_call, processor);
|
container_of(work, struct rxrpc_call, processor);
|
||||||
ktime_t now;
|
rxrpc_serial_t *send_ack;
|
||||||
|
unsigned long now, next, t;
|
||||||
|
|
||||||
rxrpc_see_call(call);
|
rxrpc_see_call(call);
|
||||||
|
|
||||||
|
@ -384,22 +331,89 @@ void rxrpc_process_call(struct work_struct *work)
|
||||||
goto out_put;
|
goto out_put;
|
||||||
}
|
}
|
||||||
|
|
||||||
now = ktime_get_real();
|
/* Work out if any timeouts tripped */
|
||||||
if (ktime_before(call->expire_at, now)) {
|
now = jiffies;
|
||||||
|
t = READ_ONCE(call->expect_rx_by);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
|
||||||
|
set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->expect_req_by);
|
||||||
|
if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
|
||||||
|
time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
|
||||||
|
set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->expect_term_by);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
|
||||||
|
set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->ack_at);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
|
||||||
|
cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
|
||||||
|
set_bit(RXRPC_CALL_EV_ACK, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->ack_lost_at);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
|
||||||
|
cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
|
||||||
|
set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->keepalive_at);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
|
||||||
|
cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
|
||||||
|
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true,
|
||||||
|
rxrpc_propose_ack_ping_for_keepalive);
|
||||||
|
set_bit(RXRPC_CALL_EV_PING, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->ping_at);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
|
||||||
|
cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
|
||||||
|
set_bit(RXRPC_CALL_EV_PING, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
t = READ_ONCE(call->resend_at);
|
||||||
|
if (time_after_eq(now, t)) {
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
|
||||||
|
cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
|
||||||
|
set_bit(RXRPC_CALL_EV_RESEND, &call->events);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Process events */
|
||||||
|
if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
|
||||||
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
|
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
|
||||||
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
|
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
|
||||||
goto recheck_state;
|
goto recheck_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
|
send_ack = NULL;
|
||||||
|
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
|
||||||
|
call->acks_lost_top = call->tx_top;
|
||||||
|
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
|
||||||
|
rxrpc_propose_ack_ping_for_lost_ack);
|
||||||
|
send_ack = &call->acks_lost_ping;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
|
||||||
|
send_ack) {
|
||||||
if (call->ackr_reason) {
|
if (call->ackr_reason) {
|
||||||
rxrpc_send_ack_packet(call, false);
|
rxrpc_send_ack_packet(call, false, send_ack);
|
||||||
goto recheck_state;
|
goto recheck_state;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
|
if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
|
||||||
rxrpc_send_ack_packet(call, true);
|
rxrpc_send_ack_packet(call, true, NULL);
|
||||||
goto recheck_state;
|
goto recheck_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -408,7 +422,24 @@ void rxrpc_process_call(struct work_struct *work)
|
||||||
goto recheck_state;
|
goto recheck_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
|
/* Make sure the timer is restarted */
|
||||||
|
next = call->expect_rx_by;
|
||||||
|
|
||||||
|
#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
|
||||||
|
|
||||||
|
set(call->expect_req_by);
|
||||||
|
set(call->expect_term_by);
|
||||||
|
set(call->ack_at);
|
||||||
|
set(call->ack_lost_at);
|
||||||
|
set(call->resend_at);
|
||||||
|
set(call->keepalive_at);
|
||||||
|
set(call->ping_at);
|
||||||
|
|
||||||
|
now = jiffies;
|
||||||
|
if (time_after_eq(now, next))
|
||||||
|
goto recheck_state;
|
||||||
|
|
||||||
|
rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
|
||||||
|
|
||||||
/* other events may have been raised since we started checking */
|
/* other events may have been raised since we started checking */
|
||||||
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
|
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
|
||||||
|
|
|
@ -51,10 +51,14 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
|
||||||
|
|
||||||
_enter("%d", call->debug_id);
|
_enter("%d", call->debug_id);
|
||||||
|
|
||||||
if (call->state < RXRPC_CALL_COMPLETE)
|
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||||
rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
|
trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
|
||||||
|
rxrpc_queue_call(call);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* find an extant server call
|
* find an extant server call
|
||||||
* - called in process context with IRQs enabled
|
* - called in process context with IRQs enabled
|
||||||
|
@ -95,7 +99,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
|
||||||
/*
|
/*
|
||||||
* allocate a new call
|
* allocate a new call
|
||||||
*/
|
*/
|
||||||
struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
|
struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct rxrpc_call *call;
|
struct rxrpc_call *call;
|
||||||
|
|
||||||
|
@ -114,6 +118,14 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
|
||||||
goto nomem_2;
|
goto nomem_2;
|
||||||
|
|
||||||
mutex_init(&call->user_mutex);
|
mutex_init(&call->user_mutex);
|
||||||
|
|
||||||
|
/* Prevent lockdep reporting a deadlock false positive between the afs
|
||||||
|
* filesystem and sys_sendmsg() via the mmap sem.
|
||||||
|
*/
|
||||||
|
if (rx->sk.sk_kern_sock)
|
||||||
|
lockdep_set_class(&call->user_mutex,
|
||||||
|
&rxrpc_call_user_mutex_lock_class_key);
|
||||||
|
|
||||||
timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
|
timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
|
||||||
INIT_WORK(&call->processor, &rxrpc_process_call);
|
INIT_WORK(&call->processor, &rxrpc_process_call);
|
||||||
INIT_LIST_HEAD(&call->link);
|
INIT_LIST_HEAD(&call->link);
|
||||||
|
@ -128,6 +140,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
|
||||||
atomic_set(&call->usage, 1);
|
atomic_set(&call->usage, 1);
|
||||||
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
||||||
call->tx_total_len = -1;
|
call->tx_total_len = -1;
|
||||||
|
call->next_rx_timo = 20 * HZ;
|
||||||
|
call->next_req_timo = 1 * HZ;
|
||||||
|
|
||||||
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
|
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
|
||||||
|
|
||||||
|
@ -150,7 +164,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
|
||||||
/*
|
/*
|
||||||
* Allocate a new client call.
|
* Allocate a new client call.
|
||||||
*/
|
*/
|
||||||
static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
|
static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
|
||||||
|
struct sockaddr_rxrpc *srx,
|
||||||
gfp_t gfp)
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct rxrpc_call *call;
|
struct rxrpc_call *call;
|
||||||
|
@ -158,7 +173,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
|
||||||
|
|
||||||
_enter("");
|
_enter("");
|
||||||
|
|
||||||
call = rxrpc_alloc_call(gfp);
|
call = rxrpc_alloc_call(rx, gfp);
|
||||||
if (!call)
|
if (!call)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
|
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
|
||||||
|
@ -177,15 +192,17 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
|
||||||
*/
|
*/
|
||||||
static void rxrpc_start_call_timer(struct rxrpc_call *call)
|
static void rxrpc_start_call_timer(struct rxrpc_call *call)
|
||||||
{
|
{
|
||||||
ktime_t now = ktime_get_real(), expire_at;
|
unsigned long now = jiffies;
|
||||||
|
unsigned long j = now + MAX_JIFFY_OFFSET;
|
||||||
|
|
||||||
expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
|
call->ack_at = j;
|
||||||
call->expire_at = expire_at;
|
call->ack_lost_at = j;
|
||||||
call->ack_at = expire_at;
|
call->resend_at = j;
|
||||||
call->ping_at = expire_at;
|
call->ping_at = j;
|
||||||
call->resend_at = expire_at;
|
call->expect_rx_by = j;
|
||||||
call->timer.expires = jiffies + LONG_MAX / 2;
|
call->expect_req_by = j;
|
||||||
rxrpc_set_timer(call, rxrpc_timer_begin, now);
|
call->expect_term_by = j;
|
||||||
|
call->timer.expires = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -196,8 +213,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
|
||||||
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
||||||
struct rxrpc_conn_parameters *cp,
|
struct rxrpc_conn_parameters *cp,
|
||||||
struct sockaddr_rxrpc *srx,
|
struct sockaddr_rxrpc *srx,
|
||||||
unsigned long user_call_ID,
|
struct rxrpc_call_params *p,
|
||||||
s64 tx_total_len,
|
|
||||||
gfp_t gfp)
|
gfp_t gfp)
|
||||||
__releases(&rx->sk.sk_lock.slock)
|
__releases(&rx->sk.sk_lock.slock)
|
||||||
{
|
{
|
||||||
|
@ -207,18 +223,18 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
||||||
const void *here = __builtin_return_address(0);
|
const void *here = __builtin_return_address(0);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
_enter("%p,%lx", rx, user_call_ID);
|
_enter("%p,%lx", rx, p->user_call_ID);
|
||||||
|
|
||||||
call = rxrpc_alloc_client_call(srx, gfp);
|
call = rxrpc_alloc_client_call(rx, srx, gfp);
|
||||||
if (IS_ERR(call)) {
|
if (IS_ERR(call)) {
|
||||||
release_sock(&rx->sk);
|
release_sock(&rx->sk);
|
||||||
_leave(" = %ld", PTR_ERR(call));
|
_leave(" = %ld", PTR_ERR(call));
|
||||||
return call;
|
return call;
|
||||||
}
|
}
|
||||||
|
|
||||||
call->tx_total_len = tx_total_len;
|
call->tx_total_len = p->tx_total_len;
|
||||||
trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
|
trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
|
||||||
here, (const void *)user_call_ID);
|
here, (const void *)p->user_call_ID);
|
||||||
|
|
||||||
/* We need to protect a partially set up call against the user as we
|
/* We need to protect a partially set up call against the user as we
|
||||||
* will be acting outside the socket lock.
|
* will be acting outside the socket lock.
|
||||||
|
@ -234,16 +250,16 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
||||||
parent = *pp;
|
parent = *pp;
|
||||||
xcall = rb_entry(parent, struct rxrpc_call, sock_node);
|
xcall = rb_entry(parent, struct rxrpc_call, sock_node);
|
||||||
|
|
||||||
if (user_call_ID < xcall->user_call_ID)
|
if (p->user_call_ID < xcall->user_call_ID)
|
||||||
pp = &(*pp)->rb_left;
|
pp = &(*pp)->rb_left;
|
||||||
else if (user_call_ID > xcall->user_call_ID)
|
else if (p->user_call_ID > xcall->user_call_ID)
|
||||||
pp = &(*pp)->rb_right;
|
pp = &(*pp)->rb_right;
|
||||||
else
|
else
|
||||||
goto error_dup_user_ID;
|
goto error_dup_user_ID;
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_assign_pointer(call->socket, rx);
|
rcu_assign_pointer(call->socket, rx);
|
||||||
call->user_call_ID = user_call_ID;
|
call->user_call_ID = p->user_call_ID;
|
||||||
__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
|
__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
|
||||||
rxrpc_get_call(call, rxrpc_call_got_userid);
|
rxrpc_get_call(call, rxrpc_call_got_userid);
|
||||||
rb_link_node(&call->sock_node, parent, pp);
|
rb_link_node(&call->sock_node, parent, pp);
|
||||||
|
|
|
@ -85,8 +85,8 @@
|
||||||
|
|
||||||
__read_mostly unsigned int rxrpc_max_client_connections = 1000;
|
__read_mostly unsigned int rxrpc_max_client_connections = 1000;
|
||||||
__read_mostly unsigned int rxrpc_reap_client_connections = 900;
|
__read_mostly unsigned int rxrpc_reap_client_connections = 900;
|
||||||
__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
|
__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
|
||||||
__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
|
__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use machine-unique IDs for our client connections.
|
* We use machine-unique IDs for our client connections.
|
||||||
|
@ -554,6 +554,11 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
|
||||||
|
|
||||||
trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
|
trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
|
||||||
|
|
||||||
|
/* Cancel the final ACK on the previous call if it hasn't been sent yet
|
||||||
|
* as the DATA packet will implicitly ACK it.
|
||||||
|
*/
|
||||||
|
clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
|
||||||
|
|
||||||
write_lock_bh(&call->state_lock);
|
write_lock_bh(&call->state_lock);
|
||||||
if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
|
if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
|
||||||
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
|
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
|
||||||
|
@ -686,7 +691,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
|
||||||
|
|
||||||
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
|
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
|
||||||
|
|
||||||
rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work);
|
rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
|
||||||
rxrpc_cull_active_client_conns(rxnet);
|
rxrpc_cull_active_client_conns(rxnet);
|
||||||
|
|
||||||
ret = rxrpc_get_client_conn(call, cp, srx, gfp);
|
ret = rxrpc_get_client_conn(call, cp, srx, gfp);
|
||||||
|
@ -751,6 +756,18 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set the reap timer.
|
||||||
|
*/
|
||||||
|
static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
|
||||||
|
{
|
||||||
|
unsigned long now = jiffies;
|
||||||
|
unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
|
||||||
|
|
||||||
|
if (rxnet->live)
|
||||||
|
timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disconnect a client call.
|
* Disconnect a client call.
|
||||||
*/
|
*/
|
||||||
|
@ -813,6 +830,19 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
|
||||||
goto out_2;
|
goto out_2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Schedule the final ACK to be transmitted in a short while so that it
|
||||||
|
* can be skipped if we find a follow-on call. The first DATA packet
|
||||||
|
* of the follow on call will implicitly ACK this call.
|
||||||
|
*/
|
||||||
|
if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
|
||||||
|
unsigned long final_ack_at = jiffies + 2;
|
||||||
|
|
||||||
|
WRITE_ONCE(chan->final_ack_at, final_ack_at);
|
||||||
|
smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
|
||||||
|
set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
|
||||||
|
rxrpc_reduce_conn_timer(conn, final_ack_at);
|
||||||
|
}
|
||||||
|
|
||||||
/* Things are more complex and we need the cache lock. We might be
|
/* Things are more complex and we need the cache lock. We might be
|
||||||
* able to simply idle the conn or it might now be lurking on the wait
|
* able to simply idle the conn or it might now be lurking on the wait
|
||||||
* list. It might even get moved back to the active list whilst we're
|
* list. It might even get moved back to the active list whilst we're
|
||||||
|
@ -878,9 +908,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
|
||||||
list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
|
list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
|
||||||
if (rxnet->idle_client_conns.next == &conn->cache_link &&
|
if (rxnet->idle_client_conns.next == &conn->cache_link &&
|
||||||
!rxnet->kill_all_client_conns)
|
!rxnet->kill_all_client_conns)
|
||||||
queue_delayed_work(rxrpc_workqueue,
|
rxrpc_set_client_reap_timer(rxnet);
|
||||||
&rxnet->client_conn_reaper,
|
|
||||||
rxrpc_conn_idle_client_expiry);
|
|
||||||
} else {
|
} else {
|
||||||
trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
|
trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
|
||||||
conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
|
conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
|
||||||
|
@ -1018,8 +1046,7 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct rxrpc_connection *conn;
|
struct rxrpc_connection *conn;
|
||||||
struct rxrpc_net *rxnet =
|
struct rxrpc_net *rxnet =
|
||||||
container_of(to_delayed_work(work),
|
container_of(work, struct rxrpc_net, client_conn_reaper);
|
||||||
struct rxrpc_net, client_conn_reaper);
|
|
||||||
unsigned long expiry, conn_expires_at, now;
|
unsigned long expiry, conn_expires_at, now;
|
||||||
unsigned int nr_conns;
|
unsigned int nr_conns;
|
||||||
bool did_discard = false;
|
bool did_discard = false;
|
||||||
|
@ -1061,6 +1088,8 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
|
||||||
expiry = rxrpc_conn_idle_client_expiry;
|
expiry = rxrpc_conn_idle_client_expiry;
|
||||||
if (nr_conns > rxrpc_reap_client_connections)
|
if (nr_conns > rxrpc_reap_client_connections)
|
||||||
expiry = rxrpc_conn_idle_client_fast_expiry;
|
expiry = rxrpc_conn_idle_client_fast_expiry;
|
||||||
|
if (conn->params.local->service_closed)
|
||||||
|
expiry = rxrpc_closed_conn_expiry * HZ;
|
||||||
|
|
||||||
conn_expires_at = conn->idle_timestamp + expiry;
|
conn_expires_at = conn->idle_timestamp + expiry;
|
||||||
|
|
||||||
|
@ -1096,9 +1125,8 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
|
||||||
*/
|
*/
|
||||||
_debug("not yet");
|
_debug("not yet");
|
||||||
if (!rxnet->kill_all_client_conns)
|
if (!rxnet->kill_all_client_conns)
|
||||||
queue_delayed_work(rxrpc_workqueue,
|
timer_reduce(&rxnet->client_conn_reap_timer,
|
||||||
&rxnet->client_conn_reaper,
|
conn_expires_at);
|
||||||
conn_expires_at - now);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&rxnet->client_conn_cache_lock);
|
spin_unlock(&rxnet->client_conn_cache_lock);
|
||||||
|
@ -1118,9 +1146,9 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
|
||||||
rxnet->kill_all_client_conns = true;
|
rxnet->kill_all_client_conns = true;
|
||||||
spin_unlock(&rxnet->client_conn_cache_lock);
|
spin_unlock(&rxnet->client_conn_cache_lock);
|
||||||
|
|
||||||
cancel_delayed_work(&rxnet->client_conn_reaper);
|
del_timer_sync(&rxnet->client_conn_reap_timer);
|
||||||
|
|
||||||
if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0))
|
if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
|
||||||
_debug("destroy: queue failed");
|
_debug("destroy: queue failed");
|
||||||
|
|
||||||
_leave("");
|
_leave("");
|
||||||
|
|
|
@ -24,9 +24,10 @@
|
||||||
* Retransmit terminal ACK or ABORT of the previous call.
|
* Retransmit terminal ACK or ABORT of the previous call.
|
||||||
*/
|
*/
|
||||||
static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb,
|
||||||
|
unsigned int channel)
|
||||||
{
|
{
|
||||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
|
||||||
struct rxrpc_channel *chan;
|
struct rxrpc_channel *chan;
|
||||||
struct msghdr msg;
|
struct msghdr msg;
|
||||||
struct kvec iov;
|
struct kvec iov;
|
||||||
|
@ -48,7 +49,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||||
|
|
||||||
_enter("%d", conn->debug_id);
|
_enter("%d", conn->debug_id);
|
||||||
|
|
||||||
chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK];
|
chan = &conn->channels[channel];
|
||||||
|
|
||||||
/* If the last call got moved on whilst we were waiting to run, just
|
/* If the last call got moved on whilst we were waiting to run, just
|
||||||
* ignore this packet.
|
* ignore this packet.
|
||||||
|
@ -56,7 +57,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||||
call_id = READ_ONCE(chan->last_call);
|
call_id = READ_ONCE(chan->last_call);
|
||||||
/* Sync with __rxrpc_disconnect_call() */
|
/* Sync with __rxrpc_disconnect_call() */
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
if (call_id != sp->hdr.callNumber)
|
if (skb && call_id != sp->hdr.callNumber)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
msg.msg_name = &conn->params.peer->srx.transport;
|
msg.msg_name = &conn->params.peer->srx.transport;
|
||||||
|
@ -65,9 +66,9 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||||
msg.msg_controllen = 0;
|
msg.msg_controllen = 0;
|
||||||
msg.msg_flags = 0;
|
msg.msg_flags = 0;
|
||||||
|
|
||||||
pkt.whdr.epoch = htonl(sp->hdr.epoch);
|
pkt.whdr.epoch = htonl(conn->proto.epoch);
|
||||||
pkt.whdr.cid = htonl(sp->hdr.cid);
|
pkt.whdr.cid = htonl(conn->proto.cid);
|
||||||
pkt.whdr.callNumber = htonl(sp->hdr.callNumber);
|
pkt.whdr.callNumber = htonl(call_id);
|
||||||
pkt.whdr.seq = 0;
|
pkt.whdr.seq = 0;
|
||||||
pkt.whdr.type = chan->last_type;
|
pkt.whdr.type = chan->last_type;
|
||||||
pkt.whdr.flags = conn->out_clientflag;
|
pkt.whdr.flags = conn->out_clientflag;
|
||||||
|
@ -87,11 +88,11 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||||
mtu = conn->params.peer->if_mtu;
|
mtu = conn->params.peer->if_mtu;
|
||||||
mtu -= conn->params.peer->hdrsize;
|
mtu -= conn->params.peer->hdrsize;
|
||||||
pkt.ack.bufferSpace = 0;
|
pkt.ack.bufferSpace = 0;
|
||||||
pkt.ack.maxSkew = htons(skb->priority);
|
pkt.ack.maxSkew = htons(skb ? skb->priority : 0);
|
||||||
pkt.ack.firstPacket = htonl(chan->last_seq);
|
pkt.ack.firstPacket = htonl(chan->last_seq + 1);
|
||||||
pkt.ack.previousPacket = htonl(chan->last_seq - 1);
|
pkt.ack.previousPacket = htonl(chan->last_seq);
|
||||||
pkt.ack.serial = htonl(sp->hdr.serial);
|
pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
|
||||||
pkt.ack.reason = RXRPC_ACK_DUPLICATE;
|
pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
|
||||||
pkt.ack.nAcks = 0;
|
pkt.ack.nAcks = 0;
|
||||||
pkt.info.rxMTU = htonl(rxrpc_rx_mtu);
|
pkt.info.rxMTU = htonl(rxrpc_rx_mtu);
|
||||||
pkt.info.maxMTU = htonl(mtu);
|
pkt.info.maxMTU = htonl(mtu);
|
||||||
|
@ -272,7 +273,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
|
||||||
switch (sp->hdr.type) {
|
switch (sp->hdr.type) {
|
||||||
case RXRPC_PACKET_TYPE_DATA:
|
case RXRPC_PACKET_TYPE_DATA:
|
||||||
case RXRPC_PACKET_TYPE_ACK:
|
case RXRPC_PACKET_TYPE_ACK:
|
||||||
rxrpc_conn_retransmit_call(conn, skb);
|
rxrpc_conn_retransmit_call(conn, skb,
|
||||||
|
sp->hdr.cid & RXRPC_CHANNELMASK);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
case RXRPC_PACKET_TYPE_BUSY:
|
case RXRPC_PACKET_TYPE_BUSY:
|
||||||
|
@ -378,6 +380,48 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
|
||||||
_leave(" [aborted]");
|
_leave(" [aborted]");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Process delayed final ACKs that we haven't subsumed into a subsequent call.
|
||||||
|
*/
|
||||||
|
static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
|
||||||
|
{
|
||||||
|
unsigned long j = jiffies, next_j;
|
||||||
|
unsigned int channel;
|
||||||
|
bool set;
|
||||||
|
|
||||||
|
again:
|
||||||
|
next_j = j + LONG_MAX;
|
||||||
|
set = false;
|
||||||
|
for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
|
||||||
|
struct rxrpc_channel *chan = &conn->channels[channel];
|
||||||
|
unsigned long ack_at;
|
||||||
|
|
||||||
|
if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
smp_rmb(); /* vs rxrpc_disconnect_client_call */
|
||||||
|
ack_at = READ_ONCE(chan->final_ack_at);
|
||||||
|
|
||||||
|
if (time_before(j, ack_at)) {
|
||||||
|
if (time_before(ack_at, next_j)) {
|
||||||
|
next_j = ack_at;
|
||||||
|
set = true;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
|
||||||
|
&conn->flags))
|
||||||
|
rxrpc_conn_retransmit_call(conn, NULL, channel);
|
||||||
|
}
|
||||||
|
|
||||||
|
j = jiffies;
|
||||||
|
if (time_before_eq(next_j, j))
|
||||||
|
goto again;
|
||||||
|
if (set)
|
||||||
|
rxrpc_reduce_conn_timer(conn, next_j);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* connection-level event processor
|
* connection-level event processor
|
||||||
*/
|
*/
|
||||||
|
@ -394,6 +438,10 @@ void rxrpc_process_connection(struct work_struct *work)
|
||||||
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
|
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
|
||||||
rxrpc_secure_connection(conn);
|
rxrpc_secure_connection(conn);
|
||||||
|
|
||||||
|
/* Process delayed ACKs whose time has come. */
|
||||||
|
if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
|
||||||
|
rxrpc_process_delayed_final_acks(conn);
|
||||||
|
|
||||||
/* go through the conn-level event packets, releasing the ref on this
|
/* go through the conn-level event packets, releasing the ref on this
|
||||||
* connection that each one has when we've finished with it */
|
* connection that each one has when we've finished with it */
|
||||||
while ((skb = skb_dequeue(&conn->rx_queue))) {
|
while ((skb = skb_dequeue(&conn->rx_queue))) {
|
||||||
|
|
|
@ -20,10 +20,19 @@
|
||||||
/*
|
/*
|
||||||
* Time till a connection expires after last use (in seconds).
|
* Time till a connection expires after last use (in seconds).
|
||||||
*/
|
*/
|
||||||
unsigned int rxrpc_connection_expiry = 10 * 60;
|
unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
|
||||||
|
unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
|
||||||
|
|
||||||
static void rxrpc_destroy_connection(struct rcu_head *);
|
static void rxrpc_destroy_connection(struct rcu_head *);
|
||||||
|
|
||||||
|
static void rxrpc_connection_timer(struct timer_list *timer)
|
||||||
|
{
|
||||||
|
struct rxrpc_connection *conn =
|
||||||
|
container_of(timer, struct rxrpc_connection, timer);
|
||||||
|
|
||||||
|
rxrpc_queue_conn(conn);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* allocate a new connection
|
* allocate a new connection
|
||||||
*/
|
*/
|
||||||
|
@ -38,6 +47,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
|
||||||
INIT_LIST_HEAD(&conn->cache_link);
|
INIT_LIST_HEAD(&conn->cache_link);
|
||||||
spin_lock_init(&conn->channel_lock);
|
spin_lock_init(&conn->channel_lock);
|
||||||
INIT_LIST_HEAD(&conn->waiting_calls);
|
INIT_LIST_HEAD(&conn->waiting_calls);
|
||||||
|
timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
|
||||||
INIT_WORK(&conn->processor, &rxrpc_process_connection);
|
INIT_WORK(&conn->processor, &rxrpc_process_connection);
|
||||||
INIT_LIST_HEAD(&conn->proc_link);
|
INIT_LIST_HEAD(&conn->proc_link);
|
||||||
INIT_LIST_HEAD(&conn->link);
|
INIT_LIST_HEAD(&conn->link);
|
||||||
|
@ -300,22 +310,30 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
|
||||||
return conn;
|
return conn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set the service connection reap timer.
|
||||||
|
*/
|
||||||
|
static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
|
||||||
|
unsigned long reap_at)
|
||||||
|
{
|
||||||
|
if (rxnet->live)
|
||||||
|
timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Release a service connection
|
* Release a service connection
|
||||||
*/
|
*/
|
||||||
void rxrpc_put_service_conn(struct rxrpc_connection *conn)
|
void rxrpc_put_service_conn(struct rxrpc_connection *conn)
|
||||||
{
|
{
|
||||||
struct rxrpc_net *rxnet;
|
|
||||||
const void *here = __builtin_return_address(0);
|
const void *here = __builtin_return_address(0);
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
n = atomic_dec_return(&conn->usage);
|
n = atomic_dec_return(&conn->usage);
|
||||||
trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
|
trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
|
||||||
ASSERTCMP(n, >=, 0);
|
ASSERTCMP(n, >=, 0);
|
||||||
if (n == 0) {
|
if (n == 1)
|
||||||
rxnet = conn->params.local->rxnet;
|
rxrpc_set_service_reap_timer(conn->params.local->rxnet,
|
||||||
rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0);
|
jiffies + rxrpc_connection_expiry);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -332,6 +350,7 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
|
||||||
|
|
||||||
_net("DESTROY CONN %d", conn->debug_id);
|
_net("DESTROY CONN %d", conn->debug_id);
|
||||||
|
|
||||||
|
del_timer_sync(&conn->timer);
|
||||||
rxrpc_purge_queue(&conn->rx_queue);
|
rxrpc_purge_queue(&conn->rx_queue);
|
||||||
|
|
||||||
conn->security->clear(conn);
|
conn->security->clear(conn);
|
||||||
|
@ -351,17 +370,15 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct rxrpc_connection *conn, *_p;
|
struct rxrpc_connection *conn, *_p;
|
||||||
struct rxrpc_net *rxnet =
|
struct rxrpc_net *rxnet =
|
||||||
container_of(to_delayed_work(work),
|
container_of(work, struct rxrpc_net, service_conn_reaper);
|
||||||
struct rxrpc_net, service_conn_reaper);
|
unsigned long expire_at, earliest, idle_timestamp, now;
|
||||||
unsigned long reap_older_than, earliest, idle_timestamp, now;
|
|
||||||
|
|
||||||
LIST_HEAD(graveyard);
|
LIST_HEAD(graveyard);
|
||||||
|
|
||||||
_enter("");
|
_enter("");
|
||||||
|
|
||||||
now = jiffies;
|
now = jiffies;
|
||||||
reap_older_than = now - rxrpc_connection_expiry * HZ;
|
earliest = now + MAX_JIFFY_OFFSET;
|
||||||
earliest = ULONG_MAX;
|
|
||||||
|
|
||||||
write_lock(&rxnet->conn_lock);
|
write_lock(&rxnet->conn_lock);
|
||||||
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
|
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
|
||||||
|
@ -371,15 +388,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
|
||||||
if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
|
if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
idle_timestamp = READ_ONCE(conn->idle_timestamp);
|
if (rxnet->live) {
|
||||||
_debug("reap CONN %d { u=%d,t=%ld }",
|
idle_timestamp = READ_ONCE(conn->idle_timestamp);
|
||||||
conn->debug_id, atomic_read(&conn->usage),
|
expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
|
||||||
(long)reap_older_than - (long)idle_timestamp);
|
if (conn->params.local->service_closed)
|
||||||
|
expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
|
||||||
|
|
||||||
if (time_after(idle_timestamp, reap_older_than)) {
|
_debug("reap CONN %d { u=%d,t=%ld }",
|
||||||
if (time_before(idle_timestamp, earliest))
|
conn->debug_id, atomic_read(&conn->usage),
|
||||||
earliest = idle_timestamp;
|
(long)expire_at - (long)now);
|
||||||
continue;
|
|
||||||
|
if (time_before(now, expire_at)) {
|
||||||
|
if (time_before(expire_at, earliest))
|
||||||
|
earliest = expire_at;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The usage count sits at 1 whilst the object is unused on the
|
/* The usage count sits at 1 whilst the object is unused on the
|
||||||
|
@ -387,6 +410,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
|
||||||
*/
|
*/
|
||||||
if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
|
if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
|
||||||
continue;
|
continue;
|
||||||
|
trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
|
||||||
|
|
||||||
if (rxrpc_conn_is_client(conn))
|
if (rxrpc_conn_is_client(conn))
|
||||||
BUG();
|
BUG();
|
||||||
|
@ -397,11 +421,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
|
||||||
}
|
}
|
||||||
write_unlock(&rxnet->conn_lock);
|
write_unlock(&rxnet->conn_lock);
|
||||||
|
|
||||||
if (earliest != ULONG_MAX) {
|
if (earliest != now + MAX_JIFFY_OFFSET) {
|
||||||
_debug("reschedule reaper %ld", (long) earliest - now);
|
_debug("reschedule reaper %ld", (long)earliest - (long)now);
|
||||||
ASSERT(time_after(earliest, now));
|
ASSERT(time_after(earliest, now));
|
||||||
rxrpc_queue_delayed_work(&rxnet->client_conn_reaper,
|
rxrpc_set_service_reap_timer(rxnet, earliest);
|
||||||
earliest - now);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
while (!list_empty(&graveyard)) {
|
while (!list_empty(&graveyard)) {
|
||||||
|
@ -429,9 +452,8 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
|
||||||
|
|
||||||
rxrpc_destroy_all_client_connections(rxnet);
|
rxrpc_destroy_all_client_connections(rxnet);
|
||||||
|
|
||||||
rxrpc_connection_expiry = 0;
|
del_timer_sync(&rxnet->service_conn_reap_timer);
|
||||||
cancel_delayed_work(&rxnet->client_conn_reaper);
|
rxrpc_queue_work(&rxnet->service_conn_reaper);
|
||||||
rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
|
|
||||||
flush_workqueue(rxrpc_workqueue);
|
flush_workqueue(rxrpc_workqueue);
|
||||||
|
|
||||||
write_lock(&rxnet->conn_lock);
|
write_lock(&rxnet->conn_lock);
|
||||||
|
|
|
@ -318,16 +318,18 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
|
||||||
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
|
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
|
||||||
{
|
{
|
||||||
struct rxrpc_ack_summary summary = { 0 };
|
struct rxrpc_ack_summary summary = { 0 };
|
||||||
|
unsigned long now, timo;
|
||||||
rxrpc_seq_t top = READ_ONCE(call->tx_top);
|
rxrpc_seq_t top = READ_ONCE(call->tx_top);
|
||||||
|
|
||||||
if (call->ackr_reason) {
|
if (call->ackr_reason) {
|
||||||
spin_lock_bh(&call->lock);
|
spin_lock_bh(&call->lock);
|
||||||
call->ackr_reason = 0;
|
call->ackr_reason = 0;
|
||||||
call->resend_at = call->expire_at;
|
|
||||||
call->ack_at = call->expire_at;
|
|
||||||
spin_unlock_bh(&call->lock);
|
spin_unlock_bh(&call->lock);
|
||||||
rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
|
now = jiffies;
|
||||||
ktime_get_real());
|
timo = now + MAX_JIFFY_OFFSET;
|
||||||
|
WRITE_ONCE(call->resend_at, timo);
|
||||||
|
WRITE_ONCE(call->ack_at, timo);
|
||||||
|
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
|
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
|
||||||
|
@ -437,6 +439,19 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
|
||||||
if (state >= RXRPC_CALL_COMPLETE)
|
if (state >= RXRPC_CALL_COMPLETE)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
|
||||||
|
unsigned long timo = READ_ONCE(call->next_req_timo);
|
||||||
|
unsigned long now, expect_req_by;
|
||||||
|
|
||||||
|
if (timo) {
|
||||||
|
now = jiffies;
|
||||||
|
expect_req_by = now + timo;
|
||||||
|
WRITE_ONCE(call->expect_req_by, expect_req_by);
|
||||||
|
rxrpc_reduce_call_timer(call, expect_req_by, now,
|
||||||
|
rxrpc_timer_set_for_idle);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Received data implicitly ACKs all of the request packets we sent
|
/* Received data implicitly ACKs all of the request packets we sent
|
||||||
* when we're acting as a client.
|
* when we're acting as a client.
|
||||||
*/
|
*/
|
||||||
|
@ -615,6 +630,43 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
|
||||||
orig_serial, ack_serial, sent_at, resp_time);
|
orig_serial, ack_serial, sent_at, resp_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Process the response to a ping that we sent to find out if we lost an ACK.
|
||||||
|
*
|
||||||
|
* If we got back a ping response that indicates a lower tx_top than what we
|
||||||
|
* had at the time of the ping transmission, we adjudge all the DATA packets
|
||||||
|
* sent between the response tx_top and the ping-time tx_top to have been lost.
|
||||||
|
*/
|
||||||
|
static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
|
||||||
|
{
|
||||||
|
rxrpc_seq_t top, bottom, seq;
|
||||||
|
bool resend = false;
|
||||||
|
|
||||||
|
spin_lock_bh(&call->lock);
|
||||||
|
|
||||||
|
bottom = call->tx_hard_ack + 1;
|
||||||
|
top = call->acks_lost_top;
|
||||||
|
if (before(bottom, top)) {
|
||||||
|
for (seq = bottom; before_eq(seq, top); seq++) {
|
||||||
|
int ix = seq & RXRPC_RXTX_BUFF_MASK;
|
||||||
|
u8 annotation = call->rxtx_annotations[ix];
|
||||||
|
u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
|
||||||
|
|
||||||
|
if (anno_type != RXRPC_TX_ANNO_UNACK)
|
||||||
|
continue;
|
||||||
|
annotation &= ~RXRPC_TX_ANNO_MASK;
|
||||||
|
annotation |= RXRPC_TX_ANNO_RETRANS;
|
||||||
|
call->rxtx_annotations[ix] = annotation;
|
||||||
|
resend = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_bh(&call->lock);
|
||||||
|
|
||||||
|
if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
|
||||||
|
rxrpc_queue_call(call);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Process a ping response.
|
* Process a ping response.
|
||||||
*/
|
*/
|
||||||
|
@ -630,6 +682,9 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
ping_serial = call->ping_serial;
|
ping_serial = call->ping_serial;
|
||||||
|
|
||||||
|
if (orig_serial == call->acks_lost_ping)
|
||||||
|
rxrpc_input_check_for_lost_ack(call);
|
||||||
|
|
||||||
if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
|
if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
|
||||||
before(orig_serial, ping_serial))
|
before(orig_serial, ping_serial))
|
||||||
return;
|
return;
|
||||||
|
@ -908,9 +963,20 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
|
||||||
struct sk_buff *skb, u16 skew)
|
struct sk_buff *skb, u16 skew)
|
||||||
{
|
{
|
||||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||||
|
unsigned long timo;
|
||||||
|
|
||||||
_enter("%p,%p", call, skb);
|
_enter("%p,%p", call, skb);
|
||||||
|
|
||||||
|
timo = READ_ONCE(call->next_rx_timo);
|
||||||
|
if (timo) {
|
||||||
|
unsigned long now = jiffies, expect_rx_by;
|
||||||
|
|
||||||
|
expect_rx_by = jiffies + timo;
|
||||||
|
WRITE_ONCE(call->expect_rx_by, expect_rx_by);
|
||||||
|
rxrpc_reduce_call_timer(call, expect_rx_by, now,
|
||||||
|
rxrpc_timer_set_for_normal);
|
||||||
|
}
|
||||||
|
|
||||||
switch (sp->hdr.type) {
|
switch (sp->hdr.type) {
|
||||||
case RXRPC_PACKET_TYPE_DATA:
|
case RXRPC_PACKET_TYPE_DATA:
|
||||||
rxrpc_input_data(call, skb, skew);
|
rxrpc_input_data(call, skb, skew);
|
||||||
|
|
|
@ -20,34 +20,29 @@
|
||||||
*/
|
*/
|
||||||
unsigned int rxrpc_max_backlog __read_mostly = 10;
|
unsigned int rxrpc_max_backlog __read_mostly = 10;
|
||||||
|
|
||||||
/*
|
|
||||||
* Maximum lifetime of a call (in mx).
|
|
||||||
*/
|
|
||||||
unsigned int rxrpc_max_call_lifetime = 60 * 1000;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* How long to wait before scheduling ACK generation after seeing a
|
* How long to wait before scheduling ACK generation after seeing a
|
||||||
* packet with RXRPC_REQUEST_ACK set (in ms).
|
* packet with RXRPC_REQUEST_ACK set (in jiffies).
|
||||||
*/
|
*/
|
||||||
unsigned int rxrpc_requested_ack_delay = 1;
|
unsigned long rxrpc_requested_ack_delay = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* How long to wait before scheduling an ACK with subtype DELAY (in ms).
|
* How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
|
||||||
*
|
*
|
||||||
* We use this when we've received new data packets. If those packets aren't
|
* We use this when we've received new data packets. If those packets aren't
|
||||||
* all consumed within this time we will send a DELAY ACK if an ACK was not
|
* all consumed within this time we will send a DELAY ACK if an ACK was not
|
||||||
* requested to let the sender know it doesn't need to resend.
|
* requested to let the sender know it doesn't need to resend.
|
||||||
*/
|
*/
|
||||||
unsigned int rxrpc_soft_ack_delay = 1 * 1000;
|
unsigned long rxrpc_soft_ack_delay = HZ;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* How long to wait before scheduling an ACK with subtype IDLE (in ms).
|
* How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
|
||||||
*
|
*
|
||||||
* We use this when we've consumed some previously soft-ACK'd packets when
|
* We use this when we've consumed some previously soft-ACK'd packets when
|
||||||
* further packets aren't immediately received to decide when to send an IDLE
|
* further packets aren't immediately received to decide when to send an IDLE
|
||||||
* ACK let the other end know that it can free up its Tx buffer space.
|
* ACK let the other end know that it can free up its Tx buffer space.
|
||||||
*/
|
*/
|
||||||
unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
|
unsigned long rxrpc_idle_ack_delay = HZ / 2;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Receive window size in packets. This indicates the maximum number of
|
* Receive window size in packets. This indicates the maximum number of
|
||||||
|
@ -75,7 +70,7 @@ unsigned int rxrpc_rx_jumbo_max = 4;
|
||||||
/*
|
/*
|
||||||
* Time till packet resend (in milliseconds).
|
* Time till packet resend (in milliseconds).
|
||||||
*/
|
*/
|
||||||
unsigned int rxrpc_resend_timeout = 4 * 1000;
|
unsigned long rxrpc_resend_timeout = 4 * HZ;
|
||||||
|
|
||||||
const s8 rxrpc_ack_priority[] = {
|
const s8 rxrpc_ack_priority[] = {
|
||||||
[0] = 0,
|
[0] = 0,
|
||||||
|
|
|
@ -14,6 +14,24 @@
|
||||||
|
|
||||||
unsigned int rxrpc_net_id;
|
unsigned int rxrpc_net_id;
|
||||||
|
|
||||||
|
static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
|
||||||
|
{
|
||||||
|
struct rxrpc_net *rxnet =
|
||||||
|
container_of(timer, struct rxrpc_net, client_conn_reap_timer);
|
||||||
|
|
||||||
|
if (rxnet->live)
|
||||||
|
rxrpc_queue_work(&rxnet->client_conn_reaper);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
|
||||||
|
{
|
||||||
|
struct rxrpc_net *rxnet =
|
||||||
|
container_of(timer, struct rxrpc_net, service_conn_reap_timer);
|
||||||
|
|
||||||
|
if (rxnet->live)
|
||||||
|
rxrpc_queue_work(&rxnet->service_conn_reaper);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialise a per-network namespace record.
|
* Initialise a per-network namespace record.
|
||||||
*/
|
*/
|
||||||
|
@ -22,6 +40,7 @@ static __net_init int rxrpc_init_net(struct net *net)
|
||||||
struct rxrpc_net *rxnet = rxrpc_net(net);
|
struct rxrpc_net *rxnet = rxrpc_net(net);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
rxnet->live = true;
|
||||||
get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
|
get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
|
||||||
rxnet->epoch |= RXRPC_RANDOM_EPOCH;
|
rxnet->epoch |= RXRPC_RANDOM_EPOCH;
|
||||||
|
|
||||||
|
@ -31,8 +50,10 @@ static __net_init int rxrpc_init_net(struct net *net)
|
||||||
INIT_LIST_HEAD(&rxnet->conn_proc_list);
|
INIT_LIST_HEAD(&rxnet->conn_proc_list);
|
||||||
INIT_LIST_HEAD(&rxnet->service_conns);
|
INIT_LIST_HEAD(&rxnet->service_conns);
|
||||||
rwlock_init(&rxnet->conn_lock);
|
rwlock_init(&rxnet->conn_lock);
|
||||||
INIT_DELAYED_WORK(&rxnet->service_conn_reaper,
|
INIT_WORK(&rxnet->service_conn_reaper,
|
||||||
rxrpc_service_connection_reaper);
|
rxrpc_service_connection_reaper);
|
||||||
|
timer_setup(&rxnet->service_conn_reap_timer,
|
||||||
|
rxrpc_service_conn_reap_timeout, 0);
|
||||||
|
|
||||||
rxnet->nr_client_conns = 0;
|
rxnet->nr_client_conns = 0;
|
||||||
rxnet->nr_active_client_conns = 0;
|
rxnet->nr_active_client_conns = 0;
|
||||||
|
@ -42,8 +63,10 @@ static __net_init int rxrpc_init_net(struct net *net)
|
||||||
INIT_LIST_HEAD(&rxnet->waiting_client_conns);
|
INIT_LIST_HEAD(&rxnet->waiting_client_conns);
|
||||||
INIT_LIST_HEAD(&rxnet->active_client_conns);
|
INIT_LIST_HEAD(&rxnet->active_client_conns);
|
||||||
INIT_LIST_HEAD(&rxnet->idle_client_conns);
|
INIT_LIST_HEAD(&rxnet->idle_client_conns);
|
||||||
INIT_DELAYED_WORK(&rxnet->client_conn_reaper,
|
INIT_WORK(&rxnet->client_conn_reaper,
|
||||||
rxrpc_discard_expired_client_conns);
|
rxrpc_discard_expired_client_conns);
|
||||||
|
timer_setup(&rxnet->client_conn_reap_timer,
|
||||||
|
rxrpc_client_conn_reap_timeout, 0);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&rxnet->local_endpoints);
|
INIT_LIST_HEAD(&rxnet->local_endpoints);
|
||||||
mutex_init(&rxnet->local_mutex);
|
mutex_init(&rxnet->local_mutex);
|
||||||
|
@ -60,6 +83,7 @@ static __net_init int rxrpc_init_net(struct net *net)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_proc:
|
err_proc:
|
||||||
|
rxnet->live = false;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,6 +94,7 @@ static __net_exit void rxrpc_exit_net(struct net *net)
|
||||||
{
|
{
|
||||||
struct rxrpc_net *rxnet = rxrpc_net(net);
|
struct rxrpc_net *rxnet = rxrpc_net(net);
|
||||||
|
|
||||||
|
rxnet->live = false;
|
||||||
rxrpc_destroy_all_calls(rxnet);
|
rxrpc_destroy_all_calls(rxnet);
|
||||||
rxrpc_destroy_all_connections(rxnet);
|
rxrpc_destroy_all_connections(rxnet);
|
||||||
rxrpc_destroy_all_locals(rxnet);
|
rxrpc_destroy_all_locals(rxnet);
|
||||||
|
|
|
@ -32,6 +32,24 @@ struct rxrpc_abort_buffer {
|
||||||
__be32 abort_code;
|
__be32 abort_code;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Arrange for a keepalive ping a certain time after we last transmitted. This
|
||||||
|
* lets the far side know we're still interested in this call and helps keep
|
||||||
|
* the route through any intervening firewall open.
|
||||||
|
*
|
||||||
|
* Receiving a response to the ping will prevent the ->expect_rx_by timer from
|
||||||
|
* expiring.
|
||||||
|
*/
|
||||||
|
static void rxrpc_set_keepalive(struct rxrpc_call *call)
|
||||||
|
{
|
||||||
|
unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
|
||||||
|
|
||||||
|
keepalive_at += now;
|
||||||
|
WRITE_ONCE(call->keepalive_at, keepalive_at);
|
||||||
|
rxrpc_reduce_call_timer(call, keepalive_at, now,
|
||||||
|
rxrpc_timer_set_for_keepalive);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fill out an ACK packet.
|
* Fill out an ACK packet.
|
||||||
*/
|
*/
|
||||||
|
@ -95,7 +113,8 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
|
||||||
/*
|
/*
|
||||||
* Send an ACK call packet.
|
* Send an ACK call packet.
|
||||||
*/
|
*/
|
||||||
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
|
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
|
||||||
|
rxrpc_serial_t *_serial)
|
||||||
{
|
{
|
||||||
struct rxrpc_connection *conn = NULL;
|
struct rxrpc_connection *conn = NULL;
|
||||||
struct rxrpc_ack_buffer *pkt;
|
struct rxrpc_ack_buffer *pkt;
|
||||||
|
@ -165,6 +184,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
|
||||||
ntohl(pkt->ack.firstPacket),
|
ntohl(pkt->ack.firstPacket),
|
||||||
ntohl(pkt->ack.serial),
|
ntohl(pkt->ack.serial),
|
||||||
pkt->ack.reason, pkt->ack.nAcks);
|
pkt->ack.reason, pkt->ack.nAcks);
|
||||||
|
if (_serial)
|
||||||
|
*_serial = serial;
|
||||||
|
|
||||||
if (ping) {
|
if (ping) {
|
||||||
call->ping_serial = serial;
|
call->ping_serial = serial;
|
||||||
|
@ -202,6 +223,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
|
||||||
call->ackr_seen = top;
|
call->ackr_seen = top;
|
||||||
spin_unlock_bh(&call->lock);
|
spin_unlock_bh(&call->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rxrpc_set_keepalive(call);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -323,7 +346,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
|
||||||
* ACKs if a DATA packet appears to have been lost.
|
* ACKs if a DATA packet appears to have been lost.
|
||||||
*/
|
*/
|
||||||
if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
|
if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
|
||||||
(retrans ||
|
(test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
|
||||||
|
retrans ||
|
||||||
call->cong_mode == RXRPC_CALL_SLOW_START ||
|
call->cong_mode == RXRPC_CALL_SLOW_START ||
|
||||||
(call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
|
(call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
|
||||||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
|
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
|
||||||
|
@ -370,8 +394,23 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
|
||||||
if (whdr.flags & RXRPC_REQUEST_ACK) {
|
if (whdr.flags & RXRPC_REQUEST_ACK) {
|
||||||
call->peer->rtt_last_req = now;
|
call->peer->rtt_last_req = now;
|
||||||
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
|
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
|
||||||
|
if (call->peer->rtt_usage > 1) {
|
||||||
|
unsigned long nowj = jiffies, ack_lost_at;
|
||||||
|
|
||||||
|
ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt);
|
||||||
|
if (ack_lost_at < 1)
|
||||||
|
ack_lost_at = 1;
|
||||||
|
|
||||||
|
ack_lost_at += nowj;
|
||||||
|
WRITE_ONCE(call->ack_lost_at, ack_lost_at);
|
||||||
|
rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
|
||||||
|
rxrpc_timer_set_for_lost_ack);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rxrpc_set_keepalive(call);
|
||||||
|
|
||||||
_leave(" = %d [%u]", ret, call->peer->maxdata);
|
_leave(" = %d [%u]", ret, call->peer->maxdata);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -144,11 +144,13 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
|
||||||
trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
|
trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
|
||||||
ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
|
ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
|
||||||
|
|
||||||
|
#if 0 // TODO: May want to transmit final ACK under some circumstances anyway
|
||||||
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
|
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
|
||||||
rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
|
rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
|
||||||
rxrpc_propose_ack_terminal_ack);
|
rxrpc_propose_ack_terminal_ack);
|
||||||
rxrpc_send_ack_packet(call, false);
|
rxrpc_send_ack_packet(call, false, NULL);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
write_lock_bh(&call->state_lock);
|
write_lock_bh(&call->state_lock);
|
||||||
|
|
||||||
|
@ -161,7 +163,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
|
||||||
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
||||||
call->tx_phase = true;
|
call->tx_phase = true;
|
||||||
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
|
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
|
||||||
call->ack_at = call->expire_at;
|
call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
|
||||||
write_unlock_bh(&call->state_lock);
|
write_unlock_bh(&call->state_lock);
|
||||||
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
|
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
|
||||||
rxrpc_propose_ack_processing_op);
|
rxrpc_propose_ack_processing_op);
|
||||||
|
@ -217,10 +219,10 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
|
||||||
after_eq(top, call->ackr_seen + 2) ||
|
after_eq(top, call->ackr_seen + 2) ||
|
||||||
(hard_ack == top && after(hard_ack, call->ackr_consumed)))
|
(hard_ack == top && after(hard_ack, call->ackr_consumed)))
|
||||||
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
|
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
|
||||||
true, false,
|
true, true,
|
||||||
rxrpc_propose_ack_rotate_rx);
|
rxrpc_propose_ack_rotate_rx);
|
||||||
if (call->ackr_reason)
|
if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
|
||||||
rxrpc_send_ack_packet(call, false);
|
rxrpc_send_ack_packet(call, false, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,22 +21,6 @@
|
||||||
#include <net/af_rxrpc.h>
|
#include <net/af_rxrpc.h>
|
||||||
#include "ar-internal.h"
|
#include "ar-internal.h"
|
||||||
|
|
||||||
enum rxrpc_command {
|
|
||||||
RXRPC_CMD_SEND_DATA, /* send data message */
|
|
||||||
RXRPC_CMD_SEND_ABORT, /* request abort generation */
|
|
||||||
RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
|
|
||||||
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct rxrpc_send_params {
|
|
||||||
s64 tx_total_len; /* Total Tx data length (if send data) */
|
|
||||||
unsigned long user_call_ID; /* User's call ID */
|
|
||||||
u32 abort_code; /* Abort code to Tx (if abort) */
|
|
||||||
enum rxrpc_command command : 8; /* The command to implement */
|
|
||||||
bool exclusive; /* Shared or exclusive call */
|
|
||||||
bool upgrade; /* If the connection is upgradeable */
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait for space to appear in the Tx queue or a signal to occur.
|
* Wait for space to appear in the Tx queue or a signal to occur.
|
||||||
*/
|
*/
|
||||||
|
@ -174,6 +158,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
||||||
rxrpc_notify_end_tx_t notify_end_tx)
|
rxrpc_notify_end_tx_t notify_end_tx)
|
||||||
{
|
{
|
||||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||||
|
unsigned long now;
|
||||||
rxrpc_seq_t seq = sp->hdr.seq;
|
rxrpc_seq_t seq = sp->hdr.seq;
|
||||||
int ret, ix;
|
int ret, ix;
|
||||||
u8 annotation = RXRPC_TX_ANNO_UNACK;
|
u8 annotation = RXRPC_TX_ANNO_UNACK;
|
||||||
|
@ -213,11 +198,11 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
||||||
break;
|
break;
|
||||||
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
||||||
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
|
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
|
||||||
call->ack_at = call->expire_at;
|
now = jiffies;
|
||||||
|
WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
|
||||||
if (call->ackr_reason == RXRPC_ACK_DELAY)
|
if (call->ackr_reason == RXRPC_ACK_DELAY)
|
||||||
call->ackr_reason = 0;
|
call->ackr_reason = 0;
|
||||||
__rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
|
trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
|
||||||
ktime_get_real());
|
|
||||||
if (!last)
|
if (!last)
|
||||||
break;
|
break;
|
||||||
/* Fall through */
|
/* Fall through */
|
||||||
|
@ -239,14 +224,19 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
||||||
_debug("need instant resend %d", ret);
|
_debug("need instant resend %d", ret);
|
||||||
rxrpc_instant_resend(call, ix);
|
rxrpc_instant_resend(call, ix);
|
||||||
} else {
|
} else {
|
||||||
ktime_t now = ktime_get_real(), resend_at;
|
unsigned long now = jiffies, resend_at;
|
||||||
|
|
||||||
resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
|
if (call->peer->rtt_usage > 1)
|
||||||
|
resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2);
|
||||||
|
else
|
||||||
|
resend_at = rxrpc_resend_timeout;
|
||||||
|
if (resend_at < 1)
|
||||||
|
resend_at = 1;
|
||||||
|
|
||||||
if (ktime_before(resend_at, call->resend_at)) {
|
resend_at = now + rxrpc_resend_timeout;
|
||||||
call->resend_at = resend_at;
|
WRITE_ONCE(call->resend_at, resend_at);
|
||||||
rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
|
rxrpc_reduce_call_timer(call, resend_at, now,
|
||||||
}
|
rxrpc_timer_set_for_send);
|
||||||
}
|
}
|
||||||
|
|
||||||
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
|
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
|
||||||
|
@ -295,7 +285,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
|
||||||
do {
|
do {
|
||||||
/* Check to see if there's a ping ACK to reply to. */
|
/* Check to see if there's a ping ACK to reply to. */
|
||||||
if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
|
if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
|
||||||
rxrpc_send_ack_packet(call, false);
|
rxrpc_send_ack_packet(call, false, NULL);
|
||||||
|
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
size_t size, chunk, max, space;
|
size_t size, chunk, max, space;
|
||||||
|
@ -480,11 +470,11 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
|
||||||
if (msg->msg_flags & MSG_CMSG_COMPAT) {
|
if (msg->msg_flags & MSG_CMSG_COMPAT) {
|
||||||
if (len != sizeof(u32))
|
if (len != sizeof(u32))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
p->user_call_ID = *(u32 *)CMSG_DATA(cmsg);
|
p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
|
||||||
} else {
|
} else {
|
||||||
if (len != sizeof(unsigned long))
|
if (len != sizeof(unsigned long))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
p->user_call_ID = *(unsigned long *)
|
p->call.user_call_ID = *(unsigned long *)
|
||||||
CMSG_DATA(cmsg);
|
CMSG_DATA(cmsg);
|
||||||
}
|
}
|
||||||
got_user_ID = true;
|
got_user_ID = true;
|
||||||
|
@ -522,13 +512,26 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case RXRPC_TX_LENGTH:
|
case RXRPC_TX_LENGTH:
|
||||||
if (p->tx_total_len != -1 || len != sizeof(__s64))
|
if (p->call.tx_total_len != -1 || len != sizeof(__s64))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
|
p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
|
||||||
if (p->tx_total_len < 0)
|
if (p->call.tx_total_len < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case RXRPC_SET_CALL_TIMEOUT:
|
||||||
|
if (len & 3 || len < 4 || len > 12)
|
||||||
|
return -EINVAL;
|
||||||
|
memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
|
||||||
|
p->call.nr_timeouts = len / 4;
|
||||||
|
if (p->call.timeouts.hard > INT_MAX / HZ)
|
||||||
|
return -ERANGE;
|
||||||
|
if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
|
||||||
|
return -ERANGE;
|
||||||
|
if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
|
||||||
|
return -ERANGE;
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -536,7 +539,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
|
||||||
|
|
||||||
if (!got_user_ID)
|
if (!got_user_ID)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
|
if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
_leave(" = 0");
|
_leave(" = 0");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -576,8 +579,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
|
||||||
cp.exclusive = rx->exclusive | p->exclusive;
|
cp.exclusive = rx->exclusive | p->exclusive;
|
||||||
cp.upgrade = p->upgrade;
|
cp.upgrade = p->upgrade;
|
||||||
cp.service_id = srx->srx_service;
|
cp.service_id = srx->srx_service;
|
||||||
call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID,
|
call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL);
|
||||||
p->tx_total_len, GFP_KERNEL);
|
|
||||||
/* The socket is now unlocked */
|
/* The socket is now unlocked */
|
||||||
|
|
||||||
_leave(" = %p\n", call);
|
_leave(" = %p\n", call);
|
||||||
|
@ -594,15 +596,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
||||||
{
|
{
|
||||||
enum rxrpc_call_state state;
|
enum rxrpc_call_state state;
|
||||||
struct rxrpc_call *call;
|
struct rxrpc_call *call;
|
||||||
|
unsigned long now, j;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
struct rxrpc_send_params p = {
|
struct rxrpc_send_params p = {
|
||||||
.tx_total_len = -1,
|
.call.tx_total_len = -1,
|
||||||
.user_call_ID = 0,
|
.call.user_call_ID = 0,
|
||||||
.abort_code = 0,
|
.call.nr_timeouts = 0,
|
||||||
.command = RXRPC_CMD_SEND_DATA,
|
.abort_code = 0,
|
||||||
.exclusive = false,
|
.command = RXRPC_CMD_SEND_DATA,
|
||||||
.upgrade = true,
|
.exclusive = false,
|
||||||
|
.upgrade = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
_enter("");
|
_enter("");
|
||||||
|
@ -615,15 +619,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
|
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
|
||||||
goto error_release_sock;
|
goto error_release_sock;
|
||||||
call = rxrpc_accept_call(rx, p.user_call_ID, NULL);
|
call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
|
||||||
/* The socket is now unlocked. */
|
/* The socket is now unlocked. */
|
||||||
if (IS_ERR(call))
|
if (IS_ERR(call))
|
||||||
return PTR_ERR(call);
|
return PTR_ERR(call);
|
||||||
rxrpc_put_call(call, rxrpc_call_put);
|
ret = 0;
|
||||||
return 0;
|
goto out_put_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID);
|
call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
|
||||||
if (!call) {
|
if (!call) {
|
||||||
ret = -EBADSLT;
|
ret = -EBADSLT;
|
||||||
if (p.command != RXRPC_CMD_SEND_DATA)
|
if (p.command != RXRPC_CMD_SEND_DATA)
|
||||||
|
@ -653,16 +657,41 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
||||||
goto error_put;
|
goto error_put;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p.tx_total_len != -1) {
|
if (p.call.tx_total_len != -1) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
if (call->tx_total_len != -1 ||
|
if (call->tx_total_len != -1 ||
|
||||||
call->tx_pending ||
|
call->tx_pending ||
|
||||||
call->tx_top != 0)
|
call->tx_top != 0)
|
||||||
goto error_put;
|
goto error_put;
|
||||||
call->tx_total_len = p.tx_total_len;
|
call->tx_total_len = p.call.tx_total_len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch (p.call.nr_timeouts) {
|
||||||
|
case 3:
|
||||||
|
j = msecs_to_jiffies(p.call.timeouts.normal);
|
||||||
|
if (p.call.timeouts.normal > 0 && j == 0)
|
||||||
|
j = 1;
|
||||||
|
WRITE_ONCE(call->next_rx_timo, j);
|
||||||
|
/* Fall through */
|
||||||
|
case 2:
|
||||||
|
j = msecs_to_jiffies(p.call.timeouts.idle);
|
||||||
|
if (p.call.timeouts.idle > 0 && j == 0)
|
||||||
|
j = 1;
|
||||||
|
WRITE_ONCE(call->next_req_timo, j);
|
||||||
|
/* Fall through */
|
||||||
|
case 1:
|
||||||
|
if (p.call.timeouts.hard > 0) {
|
||||||
|
j = msecs_to_jiffies(p.call.timeouts.hard);
|
||||||
|
now = jiffies;
|
||||||
|
j += now;
|
||||||
|
WRITE_ONCE(call->expect_term_by, j);
|
||||||
|
rxrpc_reduce_call_timer(call, j, now,
|
||||||
|
rxrpc_timer_set_for_hard);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
state = READ_ONCE(call->state);
|
state = READ_ONCE(call->state);
|
||||||
_debug("CALL %d USR %lx ST %d on CONN %p",
|
_debug("CALL %d USR %lx ST %d on CONN %p",
|
||||||
call->debug_id, call->user_call_ID, state, call->conn);
|
call->debug_id, call->user_call_ID, state, call->conn);
|
||||||
|
@ -689,6 +718,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
||||||
ret = rxrpc_send_data(rx, call, msg, len, NULL);
|
ret = rxrpc_send_data(rx, call, msg, len, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_put_unlock:
|
||||||
mutex_unlock(&call->user_mutex);
|
mutex_unlock(&call->user_mutex);
|
||||||
error_put:
|
error_put:
|
||||||
rxrpc_put_call(call, rxrpc_call_put);
|
rxrpc_put_call(call, rxrpc_call_put);
|
||||||
|
|
|
@ -21,6 +21,8 @@ static const unsigned int four = 4;
|
||||||
static const unsigned int thirtytwo = 32;
|
static const unsigned int thirtytwo = 32;
|
||||||
static const unsigned int n_65535 = 65535;
|
static const unsigned int n_65535 = 65535;
|
||||||
static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
|
static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
|
||||||
|
static const unsigned long one_jiffy = 1;
|
||||||
|
static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RxRPC operating parameters.
|
* RxRPC operating parameters.
|
||||||
|
@ -29,64 +31,60 @@ static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
|
||||||
* information on the individual parameters.
|
* information on the individual parameters.
|
||||||
*/
|
*/
|
||||||
static struct ctl_table rxrpc_sysctl_table[] = {
|
static struct ctl_table rxrpc_sysctl_table[] = {
|
||||||
/* Values measured in milliseconds */
|
/* Values measured in milliseconds but used in jiffies */
|
||||||
{
|
{
|
||||||
.procname = "req_ack_delay",
|
.procname = "req_ack_delay",
|
||||||
.data = &rxrpc_requested_ack_delay,
|
.data = &rxrpc_requested_ack_delay,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&zero,
|
.extra1 = (void *)&one_jiffy,
|
||||||
|
.extra2 = (void *)&max_jiffies,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "soft_ack_delay",
|
.procname = "soft_ack_delay",
|
||||||
.data = &rxrpc_soft_ack_delay,
|
.data = &rxrpc_soft_ack_delay,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&one,
|
.extra1 = (void *)&one_jiffy,
|
||||||
|
.extra2 = (void *)&max_jiffies,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "idle_ack_delay",
|
.procname = "idle_ack_delay",
|
||||||
.data = &rxrpc_idle_ack_delay,
|
.data = &rxrpc_idle_ack_delay,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&one,
|
.extra1 = (void *)&one_jiffy,
|
||||||
},
|
.extra2 = (void *)&max_jiffies,
|
||||||
{
|
|
||||||
.procname = "resend_timeout",
|
|
||||||
.data = &rxrpc_resend_timeout,
|
|
||||||
.maxlen = sizeof(unsigned int),
|
|
||||||
.mode = 0644,
|
|
||||||
.proc_handler = proc_dointvec,
|
|
||||||
.extra1 = (void *)&one,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "idle_conn_expiry",
|
.procname = "idle_conn_expiry",
|
||||||
.data = &rxrpc_conn_idle_client_expiry,
|
.data = &rxrpc_conn_idle_client_expiry,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec_ms_jiffies,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&one,
|
.extra1 = (void *)&one_jiffy,
|
||||||
|
.extra2 = (void *)&max_jiffies,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "idle_conn_fast_expiry",
|
.procname = "idle_conn_fast_expiry",
|
||||||
.data = &rxrpc_conn_idle_client_fast_expiry,
|
.data = &rxrpc_conn_idle_client_fast_expiry,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec_ms_jiffies,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&one,
|
.extra1 = (void *)&one_jiffy,
|
||||||
|
.extra2 = (void *)&max_jiffies,
|
||||||
},
|
},
|
||||||
|
|
||||||
/* Values measured in seconds but used in jiffies */
|
|
||||||
{
|
{
|
||||||
.procname = "max_call_lifetime",
|
.procname = "resend_timeout",
|
||||||
.data = &rxrpc_max_call_lifetime,
|
.data = &rxrpc_resend_timeout,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
|
||||||
.extra1 = (void *)&one,
|
.extra1 = (void *)&one_jiffy,
|
||||||
|
.extra2 = (void *)&max_jiffies,
|
||||||
},
|
},
|
||||||
|
|
||||||
/* Non-time values */
|
/* Non-time values */
|
||||||
|
|
|
@ -336,7 +336,8 @@ static void tcf_block_put_final(struct work_struct *work)
|
||||||
struct tcf_chain *chain, *tmp;
|
struct tcf_chain *chain, *tmp;
|
||||||
|
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
/* Only chain 0 should be still here. */
|
|
||||||
|
/* At this point, all the chains should have refcnt == 1. */
|
||||||
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
|
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
|
||||||
tcf_chain_put(chain);
|
tcf_chain_put(chain);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
@ -344,15 +345,21 @@ static void tcf_block_put_final(struct work_struct *work)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX: Standalone actions are not allowed to jump to any chain, and bound
|
/* XXX: Standalone actions are not allowed to jump to any chain, and bound
|
||||||
* actions should be all removed after flushing. However, filters are now
|
* actions should be all removed after flushing.
|
||||||
* destroyed in tc filter workqueue with RTNL lock, they can not race here.
|
|
||||||
*/
|
*/
|
||||||
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
|
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
|
||||||
struct tcf_block_ext_info *ei)
|
struct tcf_block_ext_info *ei)
|
||||||
{
|
{
|
||||||
struct tcf_chain *chain, *tmp;
|
struct tcf_chain *chain;
|
||||||
|
|
||||||
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
|
/* Hold a refcnt for all chains, except 0, so that they don't disappear
|
||||||
|
* while we are iterating.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(chain, &block->chain_list, list)
|
||||||
|
if (chain->index)
|
||||||
|
tcf_chain_hold(chain);
|
||||||
|
|
||||||
|
list_for_each_entry(chain, &block->chain_list, list)
|
||||||
tcf_chain_flush(chain);
|
tcf_chain_flush(chain);
|
||||||
|
|
||||||
tcf_block_offload_unbind(block, q, ei);
|
tcf_block_offload_unbind(block, q, ei);
|
||||||
|
|
|
@ -258,11 +258,8 @@ static int cls_bpf_init(struct tcf_proto *tp)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
|
static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
|
||||||
{
|
{
|
||||||
tcf_exts_destroy(&prog->exts);
|
|
||||||
tcf_exts_put_net(&prog->exts);
|
|
||||||
|
|
||||||
if (cls_bpf_is_ebpf(prog))
|
if (cls_bpf_is_ebpf(prog))
|
||||||
bpf_prog_put(prog->filter);
|
bpf_prog_put(prog->filter);
|
||||||
else
|
else
|
||||||
|
@ -270,6 +267,14 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
|
||||||
|
|
||||||
kfree(prog->bpf_name);
|
kfree(prog->bpf_name);
|
||||||
kfree(prog->bpf_ops);
|
kfree(prog->bpf_ops);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
|
||||||
|
{
|
||||||
|
tcf_exts_destroy(&prog->exts);
|
||||||
|
tcf_exts_put_net(&prog->exts);
|
||||||
|
|
||||||
|
cls_bpf_free_parms(prog);
|
||||||
kfree(prog);
|
kfree(prog);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -514,12 +519,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
|
||||||
goto errout_idr;
|
goto errout_idr;
|
||||||
|
|
||||||
ret = cls_bpf_offload(tp, prog, oldprog);
|
ret = cls_bpf_offload(tp, prog, oldprog);
|
||||||
if (ret) {
|
if (ret)
|
||||||
if (!oldprog)
|
goto errout_parms;
|
||||||
idr_remove_ext(&head->handle_idr, prog->handle);
|
|
||||||
__cls_bpf_delete_prog(prog);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!tc_in_hw(prog->gen_flags))
|
if (!tc_in_hw(prog->gen_flags))
|
||||||
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
|
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
|
||||||
|
@ -537,6 +538,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
|
||||||
*arg = prog;
|
*arg = prog;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
errout_parms:
|
||||||
|
cls_bpf_free_parms(prog);
|
||||||
errout_idr:
|
errout_idr:
|
||||||
if (!oldprog)
|
if (!oldprog)
|
||||||
idr_remove_ext(&head->handle_idr, prog->handle);
|
idr_remove_ext(&head->handle_idr, prog->handle);
|
||||||
|
|
|
@ -1158,9 +1158,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
|
||||||
if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
|
if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
|
||||||
|
if (err)
|
||||||
|
goto put_rtab;
|
||||||
|
|
||||||
err = qdisc_class_hash_init(&q->clhash);
|
err = qdisc_class_hash_init(&q->clhash);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto put_rtab;
|
goto put_block;
|
||||||
|
|
||||||
q->link.sibling = &q->link;
|
q->link.sibling = &q->link;
|
||||||
q->link.common.classid = sch->handle;
|
q->link.common.classid = sch->handle;
|
||||||
|
@ -1194,6 +1198,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
|
||||||
cbq_addprio(q, &q->link);
|
cbq_addprio(q, &q->link);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
put_block:
|
||||||
|
tcf_block_put(q->link.block);
|
||||||
|
|
||||||
put_rtab:
|
put_rtab:
|
||||||
qdisc_put_rtab(q->link.R_tab);
|
qdisc_put_rtab(q->link.R_tab);
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -724,6 +724,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
|
||||||
int i;
|
int i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
q->sch = sch;
|
||||||
timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
|
timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
|
||||||
|
|
||||||
err = tcf_block_get(&q->block, &q->filter_list, sch);
|
err = tcf_block_get(&q->block, &q->filter_list, sch);
|
||||||
|
|
|
@ -1499,6 +1499,7 @@ static __init int sctp_init(void)
|
||||||
INIT_LIST_HEAD(&sctp_address_families);
|
INIT_LIST_HEAD(&sctp_address_families);
|
||||||
sctp_v4_pf_init();
|
sctp_v4_pf_init();
|
||||||
sctp_v6_pf_init();
|
sctp_v6_pf_init();
|
||||||
|
sctp_sched_ops_init();
|
||||||
|
|
||||||
status = register_pernet_subsys(&sctp_defaults_ops);
|
status = register_pernet_subsys(&sctp_defaults_ops);
|
||||||
if (status)
|
if (status)
|
||||||
|
|
|
@ -188,13 +188,13 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
|
||||||
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
|
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
|
||||||
cb(chunk);
|
cb(chunk);
|
||||||
|
|
||||||
list_for_each_entry(chunk, &q->retransmit, list)
|
list_for_each_entry(chunk, &q->retransmit, transmitted_list)
|
||||||
cb(chunk);
|
cb(chunk);
|
||||||
|
|
||||||
list_for_each_entry(chunk, &q->sacked, list)
|
list_for_each_entry(chunk, &q->sacked, transmitted_list)
|
||||||
cb(chunk);
|
cb(chunk);
|
||||||
|
|
||||||
list_for_each_entry(chunk, &q->abandoned, list)
|
list_for_each_entry(chunk, &q->abandoned, transmitted_list)
|
||||||
cb(chunk);
|
cb(chunk);
|
||||||
|
|
||||||
list_for_each_entry(chunk, &q->out_chunk_list, list)
|
list_for_each_entry(chunk, &q->out_chunk_list, list)
|
||||||
|
|
|
@ -64,7 +64,7 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Mark as failed send. */
|
/* Mark as failed send. */
|
||||||
sctp_chunk_fail(ch, SCTP_ERROR_INV_STRM);
|
sctp_chunk_fail(ch, (__force __u32)SCTP_ERROR_INV_STRM);
|
||||||
if (asoc->peer.prsctp_capable &&
|
if (asoc->peer.prsctp_capable &&
|
||||||
SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
|
SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
|
||||||
asoc->sent_cnt_removable--;
|
asoc->sent_cnt_removable--;
|
||||||
|
@ -254,6 +254,30 @@ static int sctp_send_reconf(struct sctp_association *asoc,
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool sctp_stream_outq_is_empty(struct sctp_stream *stream,
|
||||||
|
__u16 str_nums, __be16 *str_list)
|
||||||
|
{
|
||||||
|
struct sctp_association *asoc;
|
||||||
|
__u16 i;
|
||||||
|
|
||||||
|
asoc = container_of(stream, struct sctp_association, stream);
|
||||||
|
if (!asoc->outqueue.out_qlen)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (!str_nums)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for (i = 0; i < str_nums; i++) {
|
||||||
|
__u16 sid = ntohs(str_list[i]);
|
||||||
|
|
||||||
|
if (stream->out[sid].ext &&
|
||||||
|
!list_empty(&stream->out[sid].ext->outq))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
int sctp_send_reset_streams(struct sctp_association *asoc,
|
int sctp_send_reset_streams(struct sctp_association *asoc,
|
||||||
struct sctp_reset_streams *params)
|
struct sctp_reset_streams *params)
|
||||||
{
|
{
|
||||||
|
@ -317,6 +341,11 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
|
||||||
for (i = 0; i < str_nums; i++)
|
for (i = 0; i < str_nums; i++)
|
||||||
nstr_list[i] = htons(str_list[i]);
|
nstr_list[i] = htons(str_list[i]);
|
||||||
|
|
||||||
|
if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
|
||||||
|
retval = -EAGAIN;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
|
chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
|
||||||
|
|
||||||
kfree(nstr_list);
|
kfree(nstr_list);
|
||||||
|
@ -377,6 +406,9 @@ int sctp_send_reset_assoc(struct sctp_association *asoc)
|
||||||
if (asoc->strreset_outstanding)
|
if (asoc->strreset_outstanding)
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
|
|
||||||
|
if (!sctp_outq_is_empty(&asoc->outqueue))
|
||||||
|
return -EAGAIN;
|
||||||
|
|
||||||
chunk = sctp_make_strreset_tsnreq(asoc);
|
chunk = sctp_make_strreset_tsnreq(asoc);
|
||||||
if (!chunk)
|
if (!chunk)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -563,7 +595,7 @@ struct sctp_chunk *sctp_process_strreset_outreq(
|
||||||
flags = SCTP_STREAM_RESET_INCOMING_SSN;
|
flags = SCTP_STREAM_RESET_INCOMING_SSN;
|
||||||
}
|
}
|
||||||
|
|
||||||
nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2;
|
nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
|
||||||
if (nums) {
|
if (nums) {
|
||||||
str_p = outreq->list_of_streams;
|
str_p = outreq->list_of_streams;
|
||||||
for (i = 0; i < nums; i++) {
|
for (i = 0; i < nums; i++) {
|
||||||
|
@ -627,7 +659,7 @@ struct sctp_chunk *sctp_process_strreset_inreq(
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
nums = (ntohs(param.p->length) - sizeof(*inreq)) / 2;
|
nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16);
|
||||||
str_p = inreq->list_of_streams;
|
str_p = inreq->list_of_streams;
|
||||||
for (i = 0; i < nums; i++) {
|
for (i = 0; i < nums; i++) {
|
||||||
if (ntohs(str_p[i]) >= stream->outcnt) {
|
if (ntohs(str_p[i]) >= stream->outcnt) {
|
||||||
|
@ -636,6 +668,12 @@ struct sctp_chunk *sctp_process_strreset_inreq(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!sctp_stream_outq_is_empty(stream, nums, str_p)) {
|
||||||
|
result = SCTP_STRRESET_IN_PROGRESS;
|
||||||
|
asoc->strreset_inseq--;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0);
|
chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0);
|
||||||
if (!chunk)
|
if (!chunk)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -687,12 +725,18 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
|
||||||
i = asoc->strreset_inseq - request_seq - 1;
|
i = asoc->strreset_inseq - request_seq - 1;
|
||||||
result = asoc->strreset_result[i];
|
result = asoc->strreset_result[i];
|
||||||
if (result == SCTP_STRRESET_PERFORMED) {
|
if (result == SCTP_STRRESET_PERFORMED) {
|
||||||
next_tsn = asoc->next_tsn;
|
next_tsn = asoc->ctsn_ack_point + 1;
|
||||||
init_tsn =
|
init_tsn =
|
||||||
sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
|
sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
|
||||||
}
|
}
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!sctp_outq_is_empty(&asoc->outqueue)) {
|
||||||
|
result = SCTP_STRRESET_IN_PROGRESS;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
asoc->strreset_inseq++;
|
asoc->strreset_inseq++;
|
||||||
|
|
||||||
if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
|
if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
|
||||||
|
@ -703,9 +747,10 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* G3: The same processing as though a SACK chunk with no gap report
|
/* G4: The same processing as though a FWD-TSN chunk (as defined in
|
||||||
* and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
|
* [RFC3758]) with all streams affected and a new cumulative TSN
|
||||||
* received MUST be performed.
|
* ACK of the Receiver's Next TSN minus 1 were received MUST be
|
||||||
|
* performed.
|
||||||
*/
|
*/
|
||||||
max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
|
max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
|
||||||
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
|
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
|
||||||
|
@ -720,10 +765,9 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
|
||||||
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
|
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
|
||||||
init_tsn, GFP_ATOMIC);
|
init_tsn, GFP_ATOMIC);
|
||||||
|
|
||||||
/* G4: The same processing as though a FWD-TSN chunk (as defined in
|
/* G3: The same processing as though a SACK chunk with no gap report
|
||||||
* [RFC3758]) with all streams affected and a new cumulative TSN
|
* and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
|
||||||
* ACK of the Receiver's Next TSN minus 1 were received MUST be
|
* received MUST be performed.
|
||||||
* performed.
|
|
||||||
*/
|
*/
|
||||||
sctp_outq_free(&asoc->outqueue);
|
sctp_outq_free(&asoc->outqueue);
|
||||||
|
|
||||||
|
@ -927,7 +971,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
|
||||||
|
|
||||||
outreq = (struct sctp_strreset_outreq *)req;
|
outreq = (struct sctp_strreset_outreq *)req;
|
||||||
str_p = outreq->list_of_streams;
|
str_p = outreq->list_of_streams;
|
||||||
nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2;
|
nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) /
|
||||||
|
sizeof(__u16);
|
||||||
|
|
||||||
if (result == SCTP_STRRESET_PERFORMED) {
|
if (result == SCTP_STRRESET_PERFORMED) {
|
||||||
if (nums) {
|
if (nums) {
|
||||||
|
@ -956,7 +1001,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
|
||||||
|
|
||||||
inreq = (struct sctp_strreset_inreq *)req;
|
inreq = (struct sctp_strreset_inreq *)req;
|
||||||
str_p = inreq->list_of_streams;
|
str_p = inreq->list_of_streams;
|
||||||
nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2;
|
nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
|
||||||
|
sizeof(__u16);
|
||||||
|
|
||||||
*evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
|
*evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
|
||||||
nums, str_p, GFP_ATOMIC);
|
nums, str_p, GFP_ATOMIC);
|
||||||
|
@ -975,6 +1021,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
|
||||||
if (result == SCTP_STRRESET_PERFORMED) {
|
if (result == SCTP_STRRESET_PERFORMED) {
|
||||||
__u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
|
__u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
|
||||||
&asoc->peer.tsn_map);
|
&asoc->peer.tsn_map);
|
||||||
|
LIST_HEAD(temp);
|
||||||
|
|
||||||
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
|
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
|
||||||
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
|
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
|
||||||
|
@ -983,7 +1030,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
|
||||||
SCTP_TSN_MAP_INITIAL,
|
SCTP_TSN_MAP_INITIAL,
|
||||||
stsn, GFP_ATOMIC);
|
stsn, GFP_ATOMIC);
|
||||||
|
|
||||||
|
/* Clean up sacked and abandoned queues only. As the
|
||||||
|
* out_chunk_list may not be empty, splice it to temp,
|
||||||
|
* then get it back after sctp_outq_free is done.
|
||||||
|
*/
|
||||||
|
list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
|
||||||
sctp_outq_free(&asoc->outqueue);
|
sctp_outq_free(&asoc->outqueue);
|
||||||
|
list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
|
||||||
|
|
||||||
asoc->next_tsn = rtsn;
|
asoc->next_tsn = rtsn;
|
||||||
asoc->ctsn_ack_point = asoc->next_tsn - 1;
|
asoc->ctsn_ack_point = asoc->next_tsn - 1;
|
||||||
|
|
|
@ -119,16 +119,27 @@ static struct sctp_sched_ops sctp_sched_fcfs = {
|
||||||
.unsched_all = sctp_sched_fcfs_unsched_all,
|
.unsched_all = sctp_sched_fcfs_unsched_all,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void sctp_sched_ops_fcfs_init(void)
|
||||||
|
{
|
||||||
|
sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
|
||||||
|
}
|
||||||
|
|
||||||
/* API to other parts of the stack */
|
/* API to other parts of the stack */
|
||||||
|
|
||||||
extern struct sctp_sched_ops sctp_sched_prio;
|
static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
|
||||||
extern struct sctp_sched_ops sctp_sched_rr;
|
|
||||||
|
|
||||||
static struct sctp_sched_ops *sctp_sched_ops[] = {
|
void sctp_sched_ops_register(enum sctp_sched_type sched,
|
||||||
&sctp_sched_fcfs,
|
struct sctp_sched_ops *sched_ops)
|
||||||
&sctp_sched_prio,
|
{
|
||||||
&sctp_sched_rr,
|
sctp_sched_ops[sched] = sched_ops;
|
||||||
};
|
}
|
||||||
|
|
||||||
|
void sctp_sched_ops_init(void)
|
||||||
|
{
|
||||||
|
sctp_sched_ops_fcfs_init();
|
||||||
|
sctp_sched_ops_prio_init();
|
||||||
|
sctp_sched_ops_rr_init();
|
||||||
|
}
|
||||||
|
|
||||||
int sctp_sched_set_sched(struct sctp_association *asoc,
|
int sctp_sched_set_sched(struct sctp_association *asoc,
|
||||||
enum sctp_sched_type sched)
|
enum sctp_sched_type sched)
|
||||||
|
|
|
@ -333,7 +333,7 @@ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream)
|
||||||
sctp_sched_prio_unsched(soute);
|
sctp_sched_prio_unsched(soute);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sctp_sched_ops sctp_sched_prio = {
|
static struct sctp_sched_ops sctp_sched_prio = {
|
||||||
.set = sctp_sched_prio_set,
|
.set = sctp_sched_prio_set,
|
||||||
.get = sctp_sched_prio_get,
|
.get = sctp_sched_prio_get,
|
||||||
.init = sctp_sched_prio_init,
|
.init = sctp_sched_prio_init,
|
||||||
|
@ -345,3 +345,8 @@ struct sctp_sched_ops sctp_sched_prio = {
|
||||||
.sched_all = sctp_sched_prio_sched_all,
|
.sched_all = sctp_sched_prio_sched_all,
|
||||||
.unsched_all = sctp_sched_prio_unsched_all,
|
.unsched_all = sctp_sched_prio_unsched_all,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void sctp_sched_ops_prio_init(void)
|
||||||
|
{
|
||||||
|
sctp_sched_ops_register(SCTP_SS_PRIO, &sctp_sched_prio);
|
||||||
|
}
|
||||||
|
|
|
@ -187,7 +187,7 @@ static void sctp_sched_rr_unsched_all(struct sctp_stream *stream)
|
||||||
sctp_sched_rr_unsched(stream, soute);
|
sctp_sched_rr_unsched(stream, soute);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sctp_sched_ops sctp_sched_rr = {
|
static struct sctp_sched_ops sctp_sched_rr = {
|
||||||
.set = sctp_sched_rr_set,
|
.set = sctp_sched_rr_set,
|
||||||
.get = sctp_sched_rr_get,
|
.get = sctp_sched_rr_get,
|
||||||
.init = sctp_sched_rr_init,
|
.init = sctp_sched_rr_init,
|
||||||
|
@ -199,3 +199,8 @@ struct sctp_sched_ops sctp_sched_rr = {
|
||||||
.sched_all = sctp_sched_rr_sched_all,
|
.sched_all = sctp_sched_rr_sched_all,
|
||||||
.unsched_all = sctp_sched_rr_unsched_all,
|
.unsched_all = sctp_sched_rr_unsched_all,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void sctp_sched_ops_rr_init(void)
|
||||||
|
{
|
||||||
|
sctp_sched_ops_register(SCTP_SS_RR, &sctp_sched_rr);
|
||||||
|
}
|
||||||
|
|
|
@ -497,6 +497,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
|
||||||
while ((skb = skb_peek(defq))) {
|
while ((skb = skb_peek(defq))) {
|
||||||
hdr = buf_msg(skb);
|
hdr = buf_msg(skb);
|
||||||
mtyp = msg_type(hdr);
|
mtyp = msg_type(hdr);
|
||||||
|
blks = msg_blocks(hdr);
|
||||||
deliver = true;
|
deliver = true;
|
||||||
ack = false;
|
ack = false;
|
||||||
update = false;
|
update = false;
|
||||||
|
@ -546,7 +547,6 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
|
||||||
if (!update)
|
if (!update)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
blks = msg_blocks(hdr);
|
|
||||||
tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
|
tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -797,11 +797,13 @@ static void vmci_transport_handle_detach(struct sock *sk)
|
||||||
|
|
||||||
/* We should not be sending anymore since the peer won't be
|
/* We should not be sending anymore since the peer won't be
|
||||||
* there to receive, but we can still receive if there is data
|
* there to receive, but we can still receive if there is data
|
||||||
* left in our consume queue.
|
* left in our consume queue. If the local endpoint is a host,
|
||||||
|
* we can't call vsock_stream_has_data, since that may block,
|
||||||
|
* but a host endpoint can't read data once the VM has
|
||||||
|
* detached, so there is no available data in that case.
|
||||||
*/
|
*/
|
||||||
if (vsock_stream_has_data(vsk) <= 0) {
|
if (vsk->local_addr.svm_cid == VMADDR_CID_HOST ||
|
||||||
sk->sk_state = TCP_CLOSE;
|
vsock_stream_has_data(vsk) <= 0) {
|
||||||
|
|
||||||
if (sk->sk_state == TCP_SYN_SENT) {
|
if (sk->sk_state == TCP_SYN_SENT) {
|
||||||
/* The peer may detach from a queue pair while
|
/* The peer may detach from a queue pair while
|
||||||
* we are still in the connecting state, i.e.,
|
* we are still in the connecting state, i.e.,
|
||||||
|
@ -811,10 +813,12 @@ static void vmci_transport_handle_detach(struct sock *sk)
|
||||||
* event like a reset.
|
* event like a reset.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
sk->sk_state = TCP_CLOSE;
|
||||||
sk->sk_err = ECONNRESET;
|
sk->sk_err = ECONNRESET;
|
||||||
sk->sk_error_report(sk);
|
sk->sk_error_report(sk);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
sk->sk_state = TCP_CLOSE;
|
||||||
}
|
}
|
||||||
sk->sk_state_change(sk);
|
sk->sk_state_change(sk);
|
||||||
}
|
}
|
||||||
|
@ -2144,7 +2148,7 @@ module_exit(vmci_transport_exit);
|
||||||
|
|
||||||
MODULE_AUTHOR("VMware, Inc.");
|
MODULE_AUTHOR("VMware, Inc.");
|
||||||
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
|
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
|
||||||
MODULE_VERSION("1.0.4.0-k");
|
MODULE_VERSION("1.0.5.0-k");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_ALIAS("vmware_vsock");
|
MODULE_ALIAS("vmware_vsock");
|
||||||
MODULE_ALIAS_NETPROTO(PF_VSOCK);
|
MODULE_ALIAS_NETPROTO(PF_VSOCK);
|
||||||
|
|
|
@ -20,6 +20,10 @@ config CFG80211
|
||||||
tristate "cfg80211 - wireless configuration API"
|
tristate "cfg80211 - wireless configuration API"
|
||||||
depends on RFKILL || !RFKILL
|
depends on RFKILL || !RFKILL
|
||||||
select FW_LOADER
|
select FW_LOADER
|
||||||
|
# may need to update this when certificates are changed and are
|
||||||
|
# using a different algorithm, though right now they shouldn't
|
||||||
|
# (this is here rather than below to allow it to be a module)
|
||||||
|
select CRYPTO_SHA256 if CFG80211_USE_KERNEL_REGDB_KEYS
|
||||||
---help---
|
---help---
|
||||||
cfg80211 is the Linux wireless LAN (802.11) configuration API.
|
cfg80211 is the Linux wireless LAN (802.11) configuration API.
|
||||||
Enable this if you have a wireless device.
|
Enable this if you have a wireless device.
|
||||||
|
@ -113,6 +117,9 @@ config CFG80211_EXTRA_REGDB_KEYDIR
|
||||||
certificates like in the kernel sources (net/wireless/certs/)
|
certificates like in the kernel sources (net/wireless/certs/)
|
||||||
that shall be accepted for a signed regulatory database.
|
that shall be accepted for a signed regulatory database.
|
||||||
|
|
||||||
|
Note that you need to also select the correct CRYPTO_<hash> modules
|
||||||
|
for your certificates, and if cfg80211 is built-in they also must be.
|
||||||
|
|
||||||
config CFG80211_REG_CELLULAR_HINTS
|
config CFG80211_REG_CELLULAR_HINTS
|
||||||
bool "cfg80211 regulatory support for cellular base station hints"
|
bool "cfg80211 regulatory support for cellular base station hints"
|
||||||
depends on CFG80211_CERTIFICATION_ONUS
|
depends on CFG80211_CERTIFICATION_ONUS
|
||||||
|
|
Loading…
Reference in a new issue