Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) If the user gives us a msg_namelen of 0, don't try to interpret
    anything pointed to by msg_name.  From Ani Sinha.

 2) Fix some bnx2i/bnx2fc randconfig compilation errors.

    The gist of the issue is that we firstly have drivers that span both
    SCSI and networking.  And at the top of that chain of dependencies
    we have things like SCSI_FC_ATTRS and SCSI_NETLINK which are
    selected.

    But since select is a sledgehammer and ignores dependencies,
    everything to select's SCSI_FC_ATTRS and/or SCSI_NETLINK has to also
    explicitly select their dependencies and so on and so forth.

    Generally speaking 'select' is supposed to only be used for child
    nodes, those which have no dependencies of their own.  And this
    whole chain of dependencies in the scsi layer violates that rather
    strongly.

    So just make SCSI_NETLINK depend upon it's dependencies, and so on
    and so forth for the things selecting it (either directly or
    indirectly).

    From Anish Bhatt and Randy Dunlap.

 3) Fix generation of blackhole routes in IPSEC, from Steffen Klassert.

 4) Actually notice netdev feature changes in rtl_open() code, from
    Hayes Wang.

 5) Fix divide by zero in bond enslaving, from Nikolay Aleksandrov.

 6) Missing memory barrier in sunvnet driver, from David Stevens.

 7) Don't leave anycast addresses around when ipv6 interface is
    destroyed, from Sabrina Dubroca.

 8) Don't call efx_{arch}_filter_sync_rx_mode before addr_list_lock is
    initialized in SFC driver, from Edward Cree.

 9) Fix missing DMA error checking in 3c59x, from Neal Horman.

10) Openvswitch doesn't emit OVS_FLOW_CMD_NEW notifications accidently,
    fix from Samuel Gauthier.

11) pch_gbe needs to select NET_PTP_CLASSIFY otherwise we can get a
    build error.

12) Fix macvlan regression wherein we stopped emitting
    broadcast/multicast frames over software devices.  From Nicolas
    Dichtel.

13) Fix infiniband bug due to unintended overflow of skb->cb[], from
    Eric Dumazet.  And add an assertion so this doesn't happen again.

14) dm9000_parse_dt() should return error pointers, not NULL.  From
    Tobias Klauser.

15) IP tunneling code uses this_cpu_ptr() in preemptible contexts, fix
    from Eric Dumazet.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (87 commits)
  net: bcmgenet: call bcmgenet_dma_teardown in bcmgenet_fini_dma
  net: bcmgenet: fix TX reclaim accounting for fragments
  ipv4: do not use this_cpu_ptr() in preemptible context
  dm9000: Return an ERR_PTR() in all error conditions of dm9000_parse_dt()
  r8169: fix an if condition
  r8152: disable ALDPS
  ipoib: validate struct ipoib_cb size
  net: sched: shrink struct qdisc_skb_cb to 28 bytes
  tg3: Work around HW/FW limitations with vlan encapsulated frames
  macvlan: allow to enqueue broadcast pkt on virtual device
  pch_gbe: 'select' NET_PTP_CLASSIFY.
  scsi: Use 'depends' with LIBFC instead of 'select'.
  openvswitch: restore OVS_FLOW_CMD_NEW notifications
  genetlink: add function genl_has_listeners()
  lib: rhashtable: remove second linux/log2.h inclusion
  net: allow macvlans to move to net namespace
  3c59x: Fix bad offset spec in skb_frag_dma_map
  3c59x: Add dma error checking and recovery
  sparc: bpf_jit: fix support for ldx/stx mem and SKF_AD_VLAN_TAG
  can: at91_can: add missing prepare and unprepare of the clock
  ...
This commit is contained in:
Linus Torvalds 2014-09-22 18:23:33 -07:00
commit 98f75b8291
85 changed files with 829 additions and 356 deletions

View file

@ -462,9 +462,9 @@ JIT compiler
------------
The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC,
ARM and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler is
transparently invoked for each attached filter from user space or for internal
kernel users if it has been previously enabled by root:
ARM, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler
is transparently invoked for each attached filter from user space or for
internal kernel users if it has been previously enabled by root:
echo 1 > /proc/sys/net/core/bpf_jit_enable

View file

@ -234,12 +234,18 @@ do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \
__emit_load8(BASE, STRUCT, FIELD, DEST); \
} while (0)
#define emit_ldmem(OFF, DEST) \
do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST); \
#ifdef CONFIG_SPARC64
#define BIAS (STACK_BIAS - 4)
#else
#define BIAS (-4)
#endif
#define emit_ldmem(OFF, DEST) \
do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \
} while (0)
#define emit_stmem(OFF, SRC) \
do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC); \
#define emit_stmem(OFF, SRC) \
do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \
} while (0)
#ifdef CONFIG_SMP
@ -615,10 +621,11 @@ void bpf_jit_compile(struct bpf_prog *fp)
case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
emit_skb_load16(vlan_tci, r_A);
if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
emit_andi(r_A, VLAN_VID_MASK, r_A);
if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) {
emit_alu_K(SRL, 12);
emit_andi(r_A, 1, r_A);
} else {
emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
emit_loadimm(~VLAN_TAG_PRESENT, r_TMP);
emit_and(r_A, r_TMP, r_A);
}
break;
@ -630,15 +637,19 @@ void bpf_jit_compile(struct bpf_prog *fp)
emit_loadimm(K, r_X);
break;
case BPF_LD | BPF_MEM:
seen |= SEEN_MEM;
emit_ldmem(K * 4, r_A);
break;
case BPF_LDX | BPF_MEM:
seen |= SEEN_MEM | SEEN_XREG;
emit_ldmem(K * 4, r_X);
break;
case BPF_ST:
seen |= SEEN_MEM;
emit_stmem(K * 4, r_A);
break;
case BPF_STX:
seen |= SEEN_MEM | SEEN_XREG;
emit_stmem(K * 4, r_X);
break;

View file

@ -1680,7 +1680,7 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
goto unlock;
update_params.smac_index = new_smac_index;
if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
&update_params)) {
release_mac = new_smac;
goto unlock;

View file

@ -1682,7 +1682,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
MLX4_IB_LINK_TYPE_ETH;
if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
/* set QP to receive both tunneled & non-tunneled packets */
if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
context->srqn = cpu_to_be32(7 << 28);
}
}

View file

@ -131,6 +131,12 @@ struct ipoib_cb {
u8 hwaddr[INFINIBAND_ALEN];
};
static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
return (struct ipoib_cb *)skb->cb;
}
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
struct ipoib_mcast {
struct ib_sa_mcmember_rec mcmember;

View file

@ -716,7 +716,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
struct ipoib_cb *cb = ipoib_skb_cb(skb);
struct ipoib_header *header;
unsigned long flags;
@ -813,7 +813,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
const void *daddr, const void *saddr, unsigned len)
{
struct ipoib_header *header;
struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
struct ipoib_cb *cb = ipoib_skb_cb(skb);
header = (struct ipoib_header *) skb_push(skb, sizeof *header);

View file

@ -29,7 +29,7 @@ config FUSION_SPI
config FUSION_FC
tristate "Fusion MPT ScsiHost drivers for FC"
depends on PCI && SCSI
select SCSI_FC_ATTRS
depends on SCSI_FC_ATTRS
---help---
SCSI HOST support for a Fiber Channel host adapters.

View file

@ -175,7 +175,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
"the same MAC; 0 for none (default), "
"1 for active, 2 for follow");
module_param(all_slaves_active, int, 0);
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
"by setting active flag for all slaves; "
"0 for never (default), 1 for always.");
module_param(resend_igmp, int, 0);
@ -3659,8 +3659,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
else
bond_xmit_slave_id(bond, skb, 0);
} else {
slave_id = bond_rr_gen_slave_id(bond);
bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt);
int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond);
bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
} else {
dev_kfree_skb_any(skb);
}
}
return NETDEV_TX_OK;
@ -3691,8 +3697,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
if (likely(slave_cnt))
bond_xmit_slave_id(bond, skb,
bond_xmit_hash(bond, skb) % slave_cnt);
else
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -1123,7 +1123,9 @@ static int at91_open(struct net_device *dev)
struct at91_priv *priv = netdev_priv(dev);
int err;
clk_enable(priv->clk);
err = clk_prepare_enable(priv->clk);
if (err)
return err;
/* check or determine and set bittime */
err = open_candev(dev);
@ -1149,7 +1151,7 @@ static int at91_open(struct net_device *dev)
out_close:
close_candev(dev);
out:
clk_disable(priv->clk);
clk_disable_unprepare(priv->clk);
return err;
}
@ -1166,7 +1168,7 @@ static int at91_close(struct net_device *dev)
at91_chip_stop(dev, CAN_STATE_STOPPED);
free_irq(dev->irq, dev);
clk_disable(priv->clk);
clk_disable_unprepare(priv->clk);
close_candev(dev);

View file

@ -97,14 +97,14 @@ static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
writel(ctrl, priv->raminit_ctrlreg);
ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
c_can_hw_raminit_wait_ti(priv, ctrl, mask);
c_can_hw_raminit_wait_ti(priv, mask, ctrl);
if (enable) {
/* Set start bit and wait for the done bit. */
ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
writel(ctrl, priv->raminit_ctrlreg);
ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
c_can_hw_raminit_wait_ti(priv, ctrl, mask);
c_can_hw_raminit_wait_ti(priv, mask, ctrl);
}
spin_unlock(&raminit_lock);
}

View file

@ -62,7 +62,7 @@
#define FLEXCAN_MCR_BCC BIT(16)
#define FLEXCAN_MCR_LPRIO_EN BIT(13)
#define FLEXCAN_MCR_AEN BIT(12)
#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f)
#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f)
#define FLEXCAN_MCR_IDAM_A (0 << 8)
#define FLEXCAN_MCR_IDAM_B (1 << 8)
#define FLEXCAN_MCR_IDAM_C (2 << 8)
@ -125,7 +125,9 @@
FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
/* FLEXCAN interrupt flag register (IFLAG) bits */
#define FLEXCAN_TX_BUF_ID 8
/* Errata ERR005829 step7: Reserve first valid MB */
#define FLEXCAN_TX_BUF_RESERVED 8
#define FLEXCAN_TX_BUF_ID 9
#define FLEXCAN_IFLAG_BUF(x) BIT(x)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
@ -136,6 +138,17 @@
/* FLEXCAN message buffers */
#define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24)
#define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24)
#define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24)
#define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24)
#define FLEXCAN_MB_CODE_RX_OVERRRUN (0x6 << 24)
#define FLEXCAN_MB_CODE_RX_RANSWER (0xa << 24)
#define FLEXCAN_MB_CODE_TX_INACTIVE (0x8 << 24)
#define FLEXCAN_MB_CODE_TX_ABORT (0x9 << 24)
#define FLEXCAN_MB_CODE_TX_DATA (0xc << 24)
#define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24)
#define FLEXCAN_MB_CNT_SRR BIT(22)
#define FLEXCAN_MB_CNT_IDE BIT(21)
#define FLEXCAN_MB_CNT_RTR BIT(20)
@ -298,7 +311,7 @@ static int flexcan_chip_enable(struct flexcan_priv *priv)
flexcan_write(reg, &regs->mcr);
while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
usleep_range(10, 20);
udelay(10);
if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
return -ETIMEDOUT;
@ -317,7 +330,7 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
flexcan_write(reg, &regs->mcr);
while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
usleep_range(10, 20);
udelay(10);
if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
return -ETIMEDOUT;
@ -336,7 +349,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
flexcan_write(reg, &regs->mcr);
while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
usleep_range(100, 200);
udelay(100);
if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
return -ETIMEDOUT;
@ -355,7 +368,7 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
flexcan_write(reg, &regs->mcr);
while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
usleep_range(10, 20);
udelay(10);
if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
return -ETIMEDOUT;
@ -370,7 +383,7 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv)
flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
usleep_range(10, 20);
udelay(10);
if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
return -ETIMEDOUT;
@ -428,6 +441,14 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
/* Errata ERR005829 step8:
* Write twice INACTIVE(0x8) code to first MB.
*/
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
&regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
&regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
return NETDEV_TX_OK;
}
@ -744,6 +765,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
stats->tx_bytes += can_get_echo_skb(dev, 0);
stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
/* after sending a RTR frame mailbox is in RX mode */
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
&regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
netif_wake_queue(dev);
}
@ -801,6 +825,7 @@ static int flexcan_chip_start(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
int err;
u32 reg_mcr, reg_ctrl;
int i;
/* enable module */
err = flexcan_chip_enable(priv);
@ -867,8 +892,18 @@ static int flexcan_chip_start(struct net_device *dev)
netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
flexcan_write(reg_ctrl, &regs->ctrl);
/* Abort any pending TX, mark Mailbox as INACTIVE */
flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
/* clear and invalidate all mailboxes first */
for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) {
flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
&regs->cantxfg[i].can_ctrl);
}
/* Errata ERR005829: mark first TX mailbox as INACTIVE */
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
&regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
/* mark TX mailbox as INACTIVE */
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
&regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
/* acceptance mask/acceptance code (accept everything) */

View file

@ -70,6 +70,8 @@ struct peak_pci_chan {
#define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */
#define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */
#define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */
#define PEAK_PCIE_OEM_ID 0x0009 /* PCAN-PCI Express OEM */
#define PEAK_PCIEC34_DEVICE_ID 0x000A /* PCAN-PCI Express 34 (one channel) */
#define PEAK_PCI_CHAN_MAX 4
@ -87,6 +89,7 @@ static const struct pci_device_id peak_pci_tbl[] = {
{PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
#ifdef CONFIG_CAN_PEAK_PCIEC
{PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
#endif
{0,}
};
@ -653,7 +656,8 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* This must be done *before* register_sja1000dev() but
* *after* devices linkage
*/
if (pdev->device == PEAK_PCIEC_DEVICE_ID) {
if (pdev->device == PEAK_PCIEC_DEVICE_ID ||
pdev->device == PEAK_PCIEC34_DEVICE_ID) {
err = peak_pciec_probe(pdev, dev);
if (err) {
dev_err(&pdev->dev,

View file

@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
unsigned long flags;
dma_addr_t dma_addr;
if (vortex_debug > 6) {
pr_debug("boomerang_start_xmit()\n");
@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if (!skb_shinfo(skb)->nr_frags) {
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb->len, PCI_DMA_TODEVICE));
dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
PCI_DMA_TODEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
} else {
int i;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE));
dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
0,
frag->size,
DMA_TO_DEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
for(i = i-1; i >= 0; i--)
dma_unmap_page(&VORTEX_PCI(vp)->dev,
le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
DMA_TO_DEVICE);
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
le32_to_cpu(vp->tx_ring[entry].frag[0].length),
PCI_DMA_TODEVICE);
goto out_dma_err;
}
vp->tx_ring[entry].frag[i+1].addr =
cpu_to_le32(skb_frag_dma_map(
&VORTEX_PCI(vp)->dev,
frag,
frag->page_offset, frag->size, DMA_TO_DEVICE));
cpu_to_le32(dma_addr);
if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#else
vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
#endif
@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
out:
return NETDEV_TX_OK;
out_dma_err:
dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
goto out;
}
/* The interrupt handler does all of the Rx thread work and cleans up

View file

@ -29,6 +29,17 @@
#define DRV_NAME "arc_emac"
#define DRV_VERSION "1.0"
/**
* arc_emac_tx_avail - Return the number of available slots in the tx ring.
* @priv: Pointer to ARC EMAC private data structure.
*
* returns: the number of slots available for transmission in tx the ring.
*/
static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
{
return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
}
/**
* arc_emac_adjust_link - Adjust the PHY link duplex.
* @ndev: Pointer to the net_device structure.
@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
txbd->info = 0;
*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
if (netif_queue_stopped(ndev))
netif_wake_queue(ndev);
}
/* Ensure that txbd_dirty is visible to tx() before checking
* for queue stopped.
*/
smp_mb();
if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
netif_wake_queue(ndev);
}
/**
@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
work_done = arc_emac_rx(ndev, budget);
if (work_done < budget) {
napi_complete(napi);
arc_reg_or(priv, R_ENABLE, RXINT_MASK);
arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
}
return work_done;
@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
/* Reset all flags except "MDIO complete" */
arc_reg_set(priv, R_STATUS, status);
if (status & RXINT_MASK) {
if (status & (RXINT_MASK | TXINT_MASK)) {
if (likely(napi_schedule_prep(&priv->napi))) {
arc_reg_clr(priv, R_ENABLE, RXINT_MASK);
arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
__napi_schedule(&priv->napi);
}
}
@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev)
arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
/* Enable interrupts */
arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
/* Set CONTROL */
arc_reg_set(priv, R_CTRL,
@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev)
netif_stop_queue(ndev);
/* Disable interrupts */
arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
/* Disable EMAC */
arc_reg_clr(priv, R_CTRL, EN_MASK);
@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
len = max_t(unsigned int, ETH_ZLEN, skb->len);
/* EMAC still holds this buffer in its possession.
* CPU must not modify this buffer descriptor
*/
if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
if (unlikely(!arc_emac_tx_avail(priv))) {
netif_stop_queue(ndev);
netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
/* Increment index to point to the next BD */
*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
/* Get "info" of the next BD */
info = &priv->txbd[*txbd_curr].info;
/* Ensure that tx_clean() sees the new txbd_curr before
* checking the queue status. This prevents an unneeded wake
* of the queue in tx_clean().
*/
smp_mb();
/* Check if if Tx BD ring is full - next BD is still owned by EMAC */
if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
if (!arc_emac_tx_avail(priv)) {
netif_stop_queue(ndev);
/* Refresh tx_dirty */
smp_mb();
if (arc_emac_tx_avail(priv))
netif_start_queue(ndev);
}
arc_reg_set(priv, R_STATUS, TXPL_MASK);

View file

@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
hwstat->tx_underruns +
hwstat->tx_excessive_cols +
hwstat->tx_late_cols);
nstat->multicast = hwstat->tx_multicast_pkts;
nstat->multicast = hwstat->rx_multicast_pkts;
nstat->collisions = hwstat->tx_total_cols;
nstat->rx_length_errors = (hwstat->rx_oversize_pkts +

View file

@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
while ((processed < to_process) && (processed < budget)) {
cb = &priv->rx_cbs[priv->rx_read_ptr];
skb = cb->skb;
processed++;
priv->rx_read_ptr++;
if (priv->rx_read_ptr == priv->num_rx_bds)
priv->rx_read_ptr = 0;
/* We do not have a backing SKB, so we do not a corresponding
* DMA mapping for this incoming packet since
* bcm_sysport_rx_refill always either has both skb and mapping
* or none.
*/
if (unlikely(!skb)) {
netif_err(priv, rx_err, ndev, "out of memory!\n");
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
goto refill;
}
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
RX_BUF_LENGTH, DMA_FROM_DEVICE);
@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
DESC_STATUS_MASK;
processed++;
priv->rx_read_ptr++;
if (priv->rx_read_ptr == priv->num_rx_bds)
priv->rx_read_ptr = 0;
netif_dbg(priv, rx_status, ndev,
"p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
p_index, priv->rx_c_index, priv->rx_read_ptr,
len, status);
if (unlikely(!skb)) {
netif_err(priv, rx_err, ndev, "out of memory!\n");
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
goto refill;
}
if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
netif_err(priv, rx_status, ndev, "fragmented packet!\n");
ndev->stats.rx_dropped++;

View file

@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned int bds_compl;
unsigned int c_index;
/* Compute how many buffers are transmitted since last xmit call */
@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
/* Reclaim transmitted buffers */
while (last_tx_cn-- > 0) {
tx_cb_ptr = ring->cbs + last_c_index;
bds_compl = 0;
if (tx_cb_ptr->skb) {
bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
}
dev->stats.tx_packets++;
ring->free_bds += 1;
ring->free_bds += bds_compl;
last_c_index++;
last_c_index &= (num_tx_bds - 1);
@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
while ((rxpktprocessed < rxpkttoprocess) &&
(rxpktprocessed < budget)) {
cb = &priv->rx_cbs[priv->rx_read_ptr];
skb = cb->skb;
rxpktprocessed++;
priv->rx_read_ptr++;
priv->rx_read_ptr &= (priv->num_rx_bds - 1);
/* We do not have a backing SKB, so we do not have a
* corresponding DMA mapping for this incoming packet since
* bcmgenet_rx_refill always either has both skb and mapping or
* none.
*/
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
dev->stats.rx_errors++;
goto refill;
}
/* Unmap the packet contents such that we can use the
* RSV from the 64 bytes descriptor when enabled and save
* a 32-bits register read
*/
cb = &priv->rx_cbs[priv->rx_read_ptr];
skb = cb->skb;
dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
priv->rx_buf_len, DMA_FROM_DEVICE);
@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
__func__, p_index, priv->rx_c_index,
priv->rx_read_ptr, dma_length_status);
rxpktprocessed++;
priv->rx_read_ptr++;
priv->rx_read_ptr &= (priv->num_rx_bds - 1);
/* out of memory, just drop packets at the hardware level */
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
dev->stats.rx_errors++;
goto refill;
}
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
{
int ret = 0;
int timeout = 0;
u32 reg;
/* Disable TDMA to stop add more frames in TX DMA */
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
reg &= ~DMA_EN;
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
/* Check TDMA status register to confirm TDMA is disabled */
while (timeout++ < DMA_TIMEOUT_VAL) {
reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
if (reg & DMA_DISABLED)
break;
udelay(1);
}
if (timeout == DMA_TIMEOUT_VAL) {
netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
ret = -ETIMEDOUT;
}
/* Wait 10ms for packet drain in both tx and rx dma */
usleep_range(10000, 20000);
/* Disable RDMA */
reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
reg &= ~DMA_EN;
bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
timeout = 0;
/* Check RDMA status register to confirm RDMA is disabled */
while (timeout++ < DMA_TIMEOUT_VAL) {
reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
if (reg & DMA_DISABLED)
break;
udelay(1);
}
if (timeout == DMA_TIMEOUT_VAL) {
netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
ret = -ETIMEDOUT;
}
return ret;
}
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
/* disable DMA */
bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
bcmgenet_dma_teardown(priv);
for (i = 0; i < priv->num_tx_bds; i++) {
if (priv->tx_cbs[i].skb != NULL) {
@ -2101,57 +2159,6 @@ static int bcmgenet_open(struct net_device *dev)
return ret;
}
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
{
int ret = 0;
int timeout = 0;
u32 reg;
/* Disable TDMA to stop add more frames in TX DMA */
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
reg &= ~DMA_EN;
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
/* Check TDMA status register to confirm TDMA is disabled */
while (timeout++ < DMA_TIMEOUT_VAL) {
reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
if (reg & DMA_DISABLED)
break;
udelay(1);
}
if (timeout == DMA_TIMEOUT_VAL) {
netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
ret = -ETIMEDOUT;
}
/* Wait 10ms for packet drain in both tx and rx dma */
usleep_range(10000, 20000);
/* Disable RDMA */
reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
reg &= ~DMA_EN;
bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
timeout = 0;
/* Check RDMA status register to confirm RDMA is disabled */
while (timeout++ < DMA_TIMEOUT_VAL) {
reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
if (reg & DMA_DISABLED)
break;
udelay(1);
}
if (timeout == DMA_TIMEOUT_VAL) {
netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
ret = -ETIMEDOUT;
}
return ret;
}
static void bcmgenet_netif_stop(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);

View file

@ -7914,8 +7914,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tnapi->tx_prod;
base_flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
mss = skb_shinfo(skb)->gso_size;
if (mss) {
@ -7929,6 +7927,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
/* HW/FW can not correctly segment packets that have been
* vlan encapsulated.
*/
if (skb->protocol == htons(ETH_P_8021Q) ||
skb->protocol == htons(ETH_P_8021AD))
return tg3_tso_bug(tp, tnapi, txq, skb);
if (!skb_is_gso_v6(skb)) {
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
tg3_flag(tp, TSO_BUG))
@ -7979,6 +7984,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
base_flags |= tsflags << 12;
}
}
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
/* HW/FW can not correctly checksum packets that have been
* vlan encapsulated.
*/
if (skb->protocol == htons(ETH_P_8021Q) ||
skb->protocol == htons(ETH_P_8021AD)) {
if (skb_checksum_help(skb))
goto drop;
} else {
base_flags |= TXD_FLAG_TCPUDP_CSUM;
}
}
if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&

View file

@ -6478,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
void __iomem *regs;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@ -6494,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_release_regions;
}
regs = pci_ioremap_bar(pdev, 0);
if (!regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
goto out_disable_device;
}
/* We control everything through one PF */
func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
if (func != ent->driver_data) {
iounmap(regs);
pci_disable_device(pdev);
pci_save_state(pdev); /* to restore SR-IOV later */
goto sriov;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
highdma = true;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
goto out_disable_device;
goto out_unmap_bar0;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto out_disable_device;
goto out_unmap_bar0;
}
}
@ -6518,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
err = -ENOMEM;
goto out_disable_device;
goto out_unmap_bar0;
}
adapter->workq = create_singlethread_workqueue("cxgb4");
@ -6530,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
adapter->regs = pci_ioremap_bar(pdev, 0);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
goto out_free_adapter;
}
/* We control everything through one PF */
func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
if (func != ent->driver_data) {
pci_save_state(pdev); /* to restore SR-IOV later */
goto sriov;
}
adapter->regs = regs;
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->mbox = func;
@ -6560,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_prep_adapter(adapter);
if (err)
goto out_unmap_bar0;
goto out_free_adapter;
if (!is_t4(adapter->params.chip)) {
s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
@ -6577,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"Incorrect number of egress queues per page\n");
err = -EINVAL;
goto out_unmap_bar0;
goto out_free_adapter;
}
adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
pci_resource_len(pdev, 2));
if (!adapter->bar2) {
dev_err(&pdev->dev, "cannot map device bar2 region\n");
err = -ENOMEM;
goto out_unmap_bar0;
goto out_free_adapter;
}
}
@ -6722,13 +6727,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out_unmap_bar:
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
out_unmap_bar0:
iounmap(adapter->regs);
out_free_adapter:
if (adapter->workq)
destroy_workqueue(adapter->workq);
kfree(adapter);
out_unmap_bar0:
iounmap(regs);
out_disable_device:
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);

View file

@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
const void *mac_addr;
if (!IS_ENABLED(CONFIG_OF) || !np)
return NULL;
return ERR_PTR(-ENXIO);
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)

View file

@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
}
EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
{
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
+ 1;
int max_port = min_port +
bitmap_weight(actv_ports.ports, dev->caps.num_ports);
if (port < min_port)
port = min_port;
else if (port >= max_port)
port = max_port - 1;
return port;
}
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mac;
mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
if ((0 == vlan) && (0 == qos))
@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
struct mlx4_priv *priv;
priv = mlx4_priv(dev);
port = mlx4_slaves_closest_port(dev, slave, port);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (MLX4_VGT != vp_oper->state.default_vlan) {
@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->spoofchk = setting;
@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
/* get current link state */

View file

@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
struct mlx4_en_dev *mdev = priv->mdev;
int err;
if (pause->autoneg)
return -EINVAL;
priv->prof->tx_pause = pause->tx_pause != 0;
priv->prof->rx_pause = pause->rx_pause != 0;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,

View file

@ -390,13 +390,14 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_update_qp_context *cmd;
u64 pri_addr_path_mask = 0;
u64 qp_mask = 0;
int err = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
}
cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
if (attr & MLX4_UPDATE_QP_VSD) {
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
}
err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
cmd->qp_mask = cpu_to_be64(qp_mask);
err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);

View file

@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_qp_context *qpc = inbox->buf + 8;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
u32 qp_type;
int port;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
if (mlx4_is_qp_reserved(dev, qpn))
return 0;
/* force strip vlan by clear vsd */
qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
/* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
if (qp_type == MLX4_QP_ST_UD ||
(qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
*(__be32 *)inbox->buf =
cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
MLX4_QP_OPTPAR_VLAN_STRIPPING);
qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
} else {
struct mlx4_update_qp_params params = {.flags = 0};
mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
}
}
if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
}
port = (rqp->sched_queue >> 6 & 1) + 1;
smac_index = cmd->qp_context.pri_path.grh_mylmc;
err = mac_find_smac_ix_in_slave(dev, slave, port,
smac_index, &mac);
if (err) {
mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
qpn, smac_index);
goto err_mac;
if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
smac_index = cmd->qp_context.pri_path.grh_mylmc;
err = mac_find_smac_ix_in_slave(dev, slave, port,
smac_index, &mac);
if (err) {
mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
qpn, smac_index);
goto err_mac;
}
}
err = mlx4_cmd(dev, inbox->dma,
@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf;
upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {

View file

@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
/* Read the hardware TX timestamp if one was recorded */
if (unlikely(re.s.tstamp)) {
struct skb_shared_hwtstamps ts;
u64 ns;
memset(&ts, 0, sizeof(ts));
/* Read the timestamp */
u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
/* Remove the timestamp from the FIFO */
cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
/* Tell the kernel about the timestamp */

View file

@ -7,6 +7,7 @@ config PCH_GBE
depends on PCI && (X86_32 || COMPILE_TEST)
select MII
select PTP_1588_CLOCK_PCH
select NET_PTP_CLASSIFY
---help---
This is a gigabit ethernet driver for EG20T PCH.
EG20T PCH is the platform controller hub that is used in Intel's

View file

@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev,
netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
netdev_features_t changed = features ^ dev->features;
void __iomem *ioaddr = tp->mmio_addr;
u32 rx_config;
if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_RX)))
return;
rx_config = RTL_R32(RxConfig);
if (features & NETIF_F_RXALL)
rx_config |= (AcceptErr | AcceptRunt);
else
rx_config &= ~(AcceptErr | AcceptRunt);
if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_RXCSUM)
tp->cp_cmd |= RxChkSum;
else
tp->cp_cmd &= ~RxChkSum;
RTL_W32(RxConfig, rx_config);
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
tp->cp_cmd |= RxVlan;
else
tp->cp_cmd &= ~RxVlan;
if (features & NETIF_F_RXCSUM)
tp->cp_cmd |= RxChkSum;
else
tp->cp_cmd &= ~RxChkSum;
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_R16(CPlusCmd);
}
if (changed & NETIF_F_RXALL) {
int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
if (features & NETIF_F_RXALL)
tmp |= (AcceptErr | AcceptRunt);
RTL_W32(RxConfig, tmp);
}
if (features & NETIF_F_HW_VLAN_CTAG_RX)
tp->cp_cmd |= RxVlan;
else
tp->cp_cmd &= ~RxVlan;
tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_R16(CPlusCmd);
}
static int rtl8169_set_features(struct net_device *dev,
@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev,
{
struct rtl8169_private *tp = netdev_priv(dev);
features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
rtl_lock_work(tp);
__rtl8169_set_features(dev, features);
if (features ^ dev->features)
__rtl8169_set_features(dev, features);
rtl_unlock_work(tp);
return 0;
@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
}
}
static int
rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
const unsigned int region = cfg->region;
@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
tp->cp_cmd = RxChkSum;
tp->cp_cmd = 0;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
* Interrupts stop flowing on high load on 8110SCd controllers.
*/
if (tp->mac_version == RTL_GIGA_MAC_VER_05)
tp->cp_cmd |= RxVlan;
rtl_init_mdio_ops(tp);
rtl_init_pll_power_ops(tp);
rtl_init_jumbo_ops(tp);
@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
tp->cp_cmd |= RxChkSum | RxVlan;
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
* Interrupts stop flowing on high load on 8110SCd controllers.
*/
if (tp->mac_version == RTL_GIGA_MAC_VER_05)
/* 8110SCd requires hardware Rx VLAN - disallow toggling */
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
if (tp->txd_version == RTL_TD_0)

View file

@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
u32 crc;
int bit;
if (!efx_dev_registered(efx))
return;
netif_addr_lock_bh(net_dev);
efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);

View file

@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port,
if (IS_ERR(desc))
return PTR_ERR(desc);
if (desc->hdr.state != VIO_DESC_READY)
return 1;
rmb();
viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
desc->hdr.state, desc->hdr.ack,
desc->size, desc->ncookies,
desc->cookies[0].cookie_addr,
desc->cookies[0].cookie_size);
if (desc->hdr.state != VIO_DESC_READY)
return 1;
err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
if (err == -ECONNRESET)
return err;

View file

@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
bool ndev_status = false;
struct cpsw_slave *slave = priv->slaves;
int n;
if (priv->data.dual_emac) {
/* In dual emac mode check for all interfaces */
for (n = priv->data.slaves; n; n--, slave++)
if (netif_running(slave->ndev))
ndev_status = true;
}
if (ndev_status && (status >= 0)) {
/* The packet received is for the interface which
* is already down and the other interface is up
* and running, intead of freeing which results
* in reducing of the number of rx descriptor in
* DMA engine, requeue skb back to cpdma.
*/
new_skb = skb;
goto requeue;
}
/* the interface is going down, skbs are purged */
dev_kfree_skb_any(skb);
return;
@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
new_skb = skb;
}
requeue:
ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
skb_tailroom(new_skb), 0);
if (WARN_ON(ret < 0))
@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
if (netif_running(ndev))
cpsw_ndo_stop(ndev);
if (priv->data.dual_emac) {
int i;
for_each_slave(priv, soft_reset_slave);
for (i = 0; i < priv->data.slaves; i++) {
if (netif_running(priv->slaves[i].ndev))
cpsw_ndo_stop(priv->slaves[i].ndev);
soft_reset_slave(priv->slaves + i);
}
} else {
if (netif_running(ndev))
cpsw_ndo_stop(ndev);
for_each_slave(priv, soft_reset_slave);
}
pm_runtime_put_sync(&pdev->dev);
@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
pm_runtime_get_sync(&pdev->dev);
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
if (netif_running(ndev))
cpsw_ndo_open(ndev);
if (priv->data.dual_emac) {
int i;
for (i = 0; i < priv->data.slaves; i++) {
if (netif_running(priv->slaves[i].ndev))
cpsw_ndo_open(priv->slaves[i].ndev);
}
} else {
if (netif_running(ndev))
cpsw_ndo_open(ndev);
}
return 0;
}

View file

@ -36,6 +36,7 @@
#include <linux/netpoll.h>
#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
#define MACVLAN_BC_QUEUE_LEN 1000
struct macvlan_port {
struct net_device *dev;
@ -248,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
goto err;
spin_lock(&port->bc_queue.lock);
if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
__skb_queue_tail(&port->bc_queue, nskb);
err = 0;
}
@ -806,6 +807,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
features,
mask);
features |= ALWAYS_ON_FEATURES;
features &= ~NETIF_F_NETNS_LOCAL;
return features;
}

View file

@ -592,8 +592,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KSZ9031 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = ksz9031_config_init,
.config_aneg = genphy_config_aneg,

View file

@ -2019,7 +2019,7 @@ static int rtl8153_enable(struct r8152 *tp)
return rtl_enable(tp);
}
static void rtl8152_disable(struct r8152 *tp)
static void rtl_disable(struct r8152 *tp)
{
u32 ocp_data;
int i;
@ -2232,6 +2232,13 @@ static inline void r8152b_enable_aldps(struct r8152 *tp)
LINKENA | DIS_SDSAVE);
}
static void rtl8152_disable(struct r8152 *tp)
{
r8152b_disable_aldps(tp);
rtl_disable(tp);
r8152b_enable_aldps(tp);
}
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
u16 data;
@ -2242,11 +2249,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
r8152_mdio_write(tp, MII_BMCR, data);
}
r8152b_disable_aldps(tp);
rtl_clear_bp(tp);
r8152b_enable_aldps(tp);
set_bit(PHY_RESET, &tp->flags);
}
@ -2255,9 +2259,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
u32 ocp_data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@ -2347,7 +2348,7 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
rtl8152_disable(tp);
rtl_disable(tp);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@ -2485,9 +2486,6 @@ static void r8153_first_init(struct r8152 *tp)
u32 ocp_data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
rxdy_gated_en(tp, true);
r8153_teredo_off(tp);
@ -2560,7 +2558,7 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
rtl8152_disable(tp);
rtl_disable(tp);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@ -2624,6 +2622,13 @@ static void r8153_enable_aldps(struct r8152 *tp)
ocp_reg_write(tp, OCP_POWER_CFG, data);
}
static void rtl8153_disable(struct r8152 *tp)
{
r8153_disable_aldps(tp);
rtl_disable(tp);
r8153_enable_aldps(tp);
}
static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
{
u16 bmcr, anar, gbcr;
@ -2714,6 +2719,16 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
return ret;
}
static void rtl8152_up(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
r8152b_disable_aldps(tp);
r8152b_exit_oob(tp);
r8152b_enable_aldps(tp);
}
static void rtl8152_down(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
@ -2727,6 +2742,16 @@ static void rtl8152_down(struct r8152 *tp)
r8152b_enable_aldps(tp);
}
static void rtl8153_up(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
r8153_disable_aldps(tp);
r8153_first_init(tp);
r8153_enable_aldps(tp);
}
static void rtl8153_down(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
@ -2946,6 +2971,8 @@ static void r8152b_init(struct r8152 *tp)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
r8152b_disable_aldps(tp);
if (tp->version == RTL_VER_01) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
ocp_data &= ~LED_MODE_MASK;
@ -2984,6 +3011,7 @@ static void r8153_init(struct r8152 *tp)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
r8153_disable_aldps(tp);
r8153_u1u2en(tp, false);
for (i = 0; i < 500; i++) {
@ -3392,7 +3420,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8152b_init;
ops->enable = rtl8152_enable;
ops->disable = rtl8152_disable;
ops->up = r8152b_exit_oob;
ops->up = rtl8152_up;
ops->down = rtl8152_down;
ops->unload = rtl8152_unload;
ret = 0;
@ -3400,8 +3428,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
case PRODUCT_ID_RTL8153:
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8152_disable;
ops->up = r8153_first_init;
ops->disable = rtl8153_disable;
ops->up = rtl8153_up;
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ret = 0;
@ -3416,8 +3444,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
case PRODUCT_ID_SAMSUNG:
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8152_disable;
ops->up = r8153_first_init;
ops->disable = rtl8153_disable;
ops->up = rtl8153_up;
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ret = 0;

View file

@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
struct ath9k_beacon_state *bs)
{
struct ath_common *common = ath9k_hw_common(ah);
int dtim_intval, sleepduration;
int dtim_intval;
u64 tsf;
/* No need to configure beacon if we are not associated */
@ -75,7 +75,6 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
* last beacon we received (which may be none).
*/
dtim_intval = conf->intval * conf->dtim_period;
sleepduration = ah->hw->conf.listen_interval * conf->intval;
/*
* Pull nexttbtt forward to reflect the current
@ -113,7 +112,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
*/
bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
sleepduration));
conf->intval));
if (bs->bs_sleepduration > bs->bs_dtimperiod)
bs->bs_sleepduration = bs->bs_dtimperiod;

View file

@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ath_hw *ah = common->ah;
struct ath_htc_rx_status *rxstatus;
struct ath_rx_status rx_stats;
bool decrypt_error;
bool decrypt_error = false;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",

View file

@ -27,10 +27,17 @@ config BRCMFMAC
one of the bus interface support. If you choose to build a module,
it'll be called brcmfmac.ko.
config BRCMFMAC_PROTO_BCDC
bool
config BRCMFMAC_PROTO_MSGBUF
bool
config BRCMFMAC_SDIO
bool "SDIO bus interface support for FullMAC driver"
depends on (MMC = y || MMC = BRCMFMAC)
depends on BRCMFMAC
select BRCMFMAC_PROTO_BCDC
select FW_LOADER
default y
---help---
@ -42,6 +49,7 @@ config BRCMFMAC_USB
bool "USB bus interface support for FullMAC driver"
depends on (USB = y || USB = BRCMFMAC)
depends on BRCMFMAC
select BRCMFMAC_PROTO_BCDC
select FW_LOADER
---help---
This option enables the USB bus interface support for Broadcom
@ -52,6 +60,8 @@ config BRCMFMAC_PCIE
bool "PCIE bus interface support for FullMAC driver"
depends on BRCMFMAC
depends on PCI
depends on HAS_DMA
select BRCMFMAC_PROTO_MSGBUF
select FW_LOADER
---help---
This option enables the PCIE bus interface support for Broadcom

View file

@ -30,16 +30,18 @@ brcmfmac-objs += \
fwsignal.o \
p2p.o \
proto.o \
bcdc.o \
commonring.o \
flowring.o \
msgbuf.o \
dhd_common.o \
dhd_linux.o \
firmware.o \
feature.o \
btcoex.o \
vendor.o
brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
bcdc.o
brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
commonring.o \
flowring.o \
msgbuf.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
bcmsdh.o

View file

@ -16,9 +16,12 @@
#ifndef BRCMFMAC_BCDC_H
#define BRCMFMAC_BCDC_H
#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
#else
static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
#endif
#endif /* BRCMFMAC_BCDC_H */

View file

@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
ifevent->action, ifevent->ifidx, ifevent->bssidx,
ifevent->flags, ifevent->role);
if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
/* The P2P Device interface event must not be ignored
* contrary to what firmware tells us. The only way to
* distinguish the P2P Device is by looking at the ifidx
* and bssidx received.
*/
if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) &&
(ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
brcmf_dbg(EVENT, "event can be ignored\n");
return;
}
@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
return;
}
if (ifevent->action == BRCMF_E_IF_CHANGE)
if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
brcmf_fws_reset_interface(ifp);
err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
if (ifevent->action == BRCMF_E_IF_DEL) {
if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
brcmf_fws_del_interface(ifp);
brcmf_del_if(drvr, ifevent->bssidx);
}

View file

@ -172,6 +172,8 @@ enum brcmf_fweh_event_code {
#define BRCMF_E_IF_ROLE_STA 0
#define BRCMF_E_IF_ROLE_AP 1
#define BRCMF_E_IF_ROLE_WDS 2
#define BRCMF_E_IF_ROLE_P2P_GO 3
#define BRCMF_E_IF_ROLE_P2P_CLIENT 4
/**
* definitions for event packet validation.

View file

@ -15,6 +15,7 @@
#ifndef BRCMFMAC_MSGBUF_H
#define BRCMFMAC_MSGBUF_H
#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20
#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256
@ -32,9 +33,15 @@
int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
#else
static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
{
return 0;
}
static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {}
#endif
#endif /* BRCMFMAC_MSGBUF_H */

View file

@ -497,8 +497,11 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
static void
brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
{
struct net_device *ndev = wdev->netdev;
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_vif *vif;
struct brcmf_if *ifp;
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
ifp = vif->ifp;
if ((wdev->iftype == NL80211_IFTYPE_ADHOC) ||
(wdev->iftype == NL80211_IFTYPE_AP) ||
@ -5143,6 +5146,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
ch.band = BRCMU_CHAN_BAND_2G;
ch.bw = BRCMU_CHAN_BW_40;
ch.sb = BRCMU_CHAN_SB_NONE;
ch.chnum = 0;
cfg->d11inf.encchspec(&ch);
@ -5176,6 +5180,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
brcmf_update_bw40_channel_flag(&band->channels[j], &ch);
}
kfree(pbuf);
}
return err;
}

View file

@ -40,7 +40,7 @@
#include "commands.h"
#include "power.h"
static bool force_cam;
static bool force_cam = true;
module_param(force_cam, bool, 0644);
MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)");

View file

@ -83,6 +83,8 @@
#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3160_NVM_VERSION 0x709
#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3165_NVM_VERSION 0x709
#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265_NVM_VERSION 0x0a1d
#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
@ -92,6 +94,9 @@
#define IWL3160_FW_PRE "iwlwifi-3160-"
#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
#define IWL3165_FW_PRE "iwlwifi-3165-"
#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
@ -213,6 +218,16 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
{0},
};
const struct iwl_cfg iwl3165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 3165",
.fw_name_pre = IWL3165_FW_PRE,
IWL_DEVICE_7000,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3165_NVM_VERSION,
.nvm_calib_ver = IWL3165_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7265",
.fw_name_pre = IWL7265_FW_PRE,
@ -245,4 +260,5 @@ const struct iwl_cfg iwl7265_n_cfg = {
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));

View file

@ -120,6 +120,8 @@ enum iwl_led_mode {
#define IWL_LONG_WD_TIMEOUT 10000
#define IWL_MAX_WD_TIMEOUT 120000
#define IWL_DEFAULT_MAX_TX_POWER 22
/* Antenna presence definitions */
#define ANT_NONE 0x0
#define ANT_A BIT(0)
@ -335,6 +337,7 @@ extern const struct iwl_cfg iwl7260_n_cfg;
extern const struct iwl_cfg iwl3160_2ac_cfg;
extern const struct iwl_cfg iwl3160_2n_cfg;
extern const struct iwl_cfg iwl3160_n_cfg;
extern const struct iwl_cfg iwl3165_2ac_cfg;
extern const struct iwl_cfg iwl7265_2ac_cfg;
extern const struct iwl_cfg iwl7265_2n_cfg;
extern const struct iwl_cfg iwl7265_n_cfg;

View file

@ -146,8 +146,6 @@ static const u8 iwl_nvm_channels_family_8000[] = {
#define LAST_2GHZ_HT_PLUS 9
#define LAST_5GHZ_HT 161
#define DEFAULT_MAX_TX_POWER 16
/* rate data (static) */
static struct ieee80211_rate iwl_cfg80211_rates[] = {
{ .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
@ -295,7 +293,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
* Default value - highest tx power value. max_power
* is not used in mvm, and is used for backwards compatibility
*/
channel->max_power = DEFAULT_MAX_TX_POWER;
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
IWL_DEBUG_EEPROM(dev,
"Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",

View file

@ -585,8 +585,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
u32 mode;
switch (mvm->bt_force_ant_mode) {
case BT_FORCE_ANT_BT:
mode = BT_COEX_BT;
@ -756,7 +754,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct iwl_bt_iterator_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_smps_mode smps_mode;
/* default smps_mode is AUTOMATIC - only used for client modes */
enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
u32 bt_activity_grading;
int ave_rssi;
@ -764,8 +763,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
/* default smps_mode for BSS / P2P client is AUTOMATIC */
smps_mode = IEEE80211_SMPS_AUTOMATIC;
break;
case NL80211_IFTYPE_AP:
if (!mvmvif->ap_ibss_active)
@ -797,7 +794,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
else if (bt_activity_grading >= BT_LOW_TRAFFIC)
smps_mode = IEEE80211_SMPS_DYNAMIC;
/* relax SMPS contraints for next association */
/* relax SMPS constraints for next association */
if (!vif->bss_conf.assoc)
smps_mode = IEEE80211_SMPS_AUTOMATIC;

View file

@ -74,8 +74,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
switch (param) {
case MVM_DEBUGFS_PM_KEEP_ALIVE: {
struct ieee80211_hw *hw = mvm->hw;
int dtimper = hw->conf.ps_dtim_period ?: 1;
int dtimper = vif->bss_conf.dtim_period ?: 1;
int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);

View file

@ -1563,14 +1563,14 @@ enum iwl_sf_scenario {
/**
* Smart Fifo configuration command.
* @state: smart fifo state, types listed in iwl_sf_sate.
* @state: smart fifo state, types listed in enum %iwl_sf_sate.
* @watermark: Minimum allowed availabe free space in RXF for transient state.
* @long_delay_timeouts: aging and idle timer values for each scenario
* in long delay state.
* @full_on_timeouts: timer values for each scenario in full on state.
*/
struct iwl_sf_cfg_cmd {
enum iwl_sf_state state;
__le32 state;
__le32 watermark[SF_TRANSIENT_STATES_NUMBER];
__le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
__le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];

View file

@ -721,11 +721,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
!force_assoc_off) {
u32 dtim_offs;
/* Allow beacons to pass through as long as we are not
* associated, or we do not have dtim period information.
*/
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
/*
* The DTIM count counts down, so when it is N that means N
* more beacon intervals happen until the DTIM TBTT. Therefore
@ -759,6 +754,11 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
ctxt_sta->is_assoc = cpu_to_le32(1);
} else {
ctxt_sta->is_assoc = cpu_to_le32(0);
/* Allow beacons to pass through as long as we are not
* associated, or we do not have dtim period information.
*/
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
}
ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);

View file

@ -396,12 +396,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/* TODO: enable that only for firmwares that don't crash */
/* hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; */
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
/* we create the 802.11 header and zero length SSID IE. */
hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
/* we create the 802.11 header and zero length SSID IE. */
hw->wiphy->max_sched_scan_ie_len =
SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
}
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
@ -1524,11 +1526,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mac(mvm);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
if (changes & BSS_CHANGED_BEACON_INFO) {
@ -1536,6 +1533,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
}
if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mac(mvm);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
if (changes & BSS_CHANGED_TXPOWER) {
IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
bss_conf->txpower);

View file

@ -281,7 +281,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
{
struct ieee80211_hw *hw = mvm->hw;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *chan;
int dtimper, dtimper_msec;
@ -292,7 +291,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
dtimper = hw->conf.ps_dtim_period ?: 1;
dtimper = vif->bss_conf.dtim_period;
/*
* Regardless of power management state the driver must set
@ -885,7 +884,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
if (enable) {
/* configure skip over dtim up to 300 msec */
int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
int dtimper = vif->bss_conf.dtim_period ?: 1;
int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
if (WARN_ON(!dtimper_msec))

View file

@ -149,13 +149,13 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
IWL_RX_INFO_ENERGY_ANT_A_POS;
energy_a = energy_a ? -energy_a : -256;
energy_a = energy_a ? -energy_a : S8_MIN;
energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
IWL_RX_INFO_ENERGY_ANT_B_POS;
energy_b = energy_b ? -energy_b : -256;
energy_b = energy_b ? -energy_b : S8_MIN;
energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
IWL_RX_INFO_ENERGY_ANT_C_POS;
energy_c = energy_c ? -energy_c : -256;
energy_c = energy_c ? -energy_c : S8_MIN;
max_energy = max(energy_a, energy_b);
max_energy = max(max_energy, energy_c);

View file

@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
enum iwl_sf_state new_state)
{
struct iwl_sf_cfg_cmd sf_cmd = {
.state = new_state,
.state = cpu_to_le32(new_state),
};
struct ieee80211_sta *sta;
int ret = 0;

View file

@ -168,10 +168,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
/*
* for data packets, rate info comes from the table inside the fw. This
* table is controlled by LINK_QUALITY commands
* table is controlled by LINK_QUALITY commands. Exclude ctrl port
* frames like EAPOLs which should be treated as mgmt frames. This
* avoids them being sent initially in high rates which increases the
* chances for completion of the 4-Way handshake.
*/
if (ieee80211_is_data(fc) && sta) {
if (ieee80211_is_data(fc) && sta &&
!(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
return;

View file

@ -352,11 +352,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
@ -378,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},

View file

@ -43,7 +43,7 @@ config SCSI_DMA
config SCSI_NETLINK
bool
default n
select NET
depends on NET
config SCSI_PROC_FS
bool "legacy /proc/scsi/ support"
@ -257,7 +257,7 @@ config SCSI_SPI_ATTRS
config SCSI_FC_ATTRS
tristate "FiberChannel Transport Attributes"
depends on SCSI
depends on SCSI && NET
select SCSI_NETLINK
help
If you wish to export transport-specific information about
@ -585,28 +585,28 @@ config HYPERV_STORAGE
config LIBFC
tristate "LibFC module"
select SCSI_FC_ATTRS
depends on SCSI_FC_ATTRS
select CRC32
---help---
Fibre Channel library module
config LIBFCOE
tristate "LibFCoE module"
select LIBFC
depends on LIBFC
---help---
Library for Fibre Channel over Ethernet module
config FCOE
tristate "FCoE module"
depends on PCI
select LIBFCOE
depends on LIBFCOE
---help---
Fibre Channel over Ethernet module
config FCOE_FNIC
tristate "Cisco FNIC Driver"
depends on PCI && X86
select LIBFCOE
depends on LIBFCOE
help
This is support for the Cisco PCI-Express FCoE HBA.
@ -816,7 +816,7 @@ config SCSI_IBMVSCSI
config SCSI_IBMVFC
tristate "IBM Virtual FC support"
depends on PPC_PSERIES && SCSI
select SCSI_FC_ATTRS
depends on SCSI_FC_ATTRS
help
This is the IBM POWER Virtual FC Client
@ -1266,7 +1266,7 @@ source "drivers/scsi/qla4xxx/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
depends on SCSI_FC_ATTRS
select CRC_T10DIF
help
This lpfc driver supports the Emulex LightPulse
@ -1676,7 +1676,7 @@ config SCSI_SUNESP
config ZFCP
tristate "FCP host bus adapter driver for IBM eServer zSeries"
depends on S390 && QDIO && SCSI
select SCSI_FC_ATTRS
depends on SCSI_FC_ATTRS
help
If you want to access SCSI devices attached to your IBM eServer
zSeries by means of Fibre Channel interfaces say Y.
@ -1704,7 +1704,7 @@ config SCSI_PM8001
config SCSI_BFA_FC
tristate "Brocade BFA Fibre Channel Support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
depends on SCSI_FC_ATTRS
help
This bfa driver supports all Brocade PCIe FC/FCOE host adapters.

View file

@ -1,11 +1,12 @@
config SCSI_BNX2X_FCOE
tristate "QLogic NetXtreme II FCoE support"
depends on PCI
depends on (IPV6 || IPV6=n)
depends on LIBFC
depends on LIBFCOE
select NETDEVICES
select ETHERNET
select NET_VENDOR_BROADCOM
select LIBFC
select LIBFCOE
select CNIC
---help---
This driver supports FCoE offload for the QLogic NetXtreme II

View file

@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI
tristate "QLogic NetXtreme II iSCSI support"
depends on NET
depends on PCI
depends on (IPV6 || IPV6=n)
select SCSI_ISCSI_ATTRS
select NETDEVICES
select ETHERNET

View file

@ -1,7 +1,7 @@
config SCSI_CHELSIO_FCOE
tristate "Chelsio Communications FCoE support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
depends on SCSI_FC_ATTRS
select FW_LOADER
help
This driver supports FCoE Offload functionality over

View file

@ -1,7 +1,7 @@
config SCSI_QLA_FC
tristate "QLogic QLA2XXX Fibre Channel Support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
depends on SCSI_FC_ATTRS
select FW_LOADER
---help---
This qla2xxx driver supports all QLogic Fibre Channel
@ -31,7 +31,7 @@ config SCSI_QLA_FC
config TCM_QLA2XXX
tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
depends on SCSI_QLA_FC && TARGET_CORE
select LIBFC
depends on LIBFC
select BTREE
default n
---help---

View file

@ -209,6 +209,7 @@ enum {
MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
};
enum mlx4_event {

View file

@ -56,7 +56,8 @@ enum mlx4_qp_optpar {
MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20
MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20,
MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21,
};
enum mlx4_qp_state {
@ -423,13 +424,20 @@ struct mlx4_wqe_inline_seg {
enum mlx4_update_qp_attr {
MLX4_UPDATE_QP_SMAC = 1 << 0,
MLX4_UPDATE_QP_VSD = 1 << 2,
MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1
};
enum mlx4_update_qp_params_flags {
MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0,
};
struct mlx4_update_qp_params {
u8 smac_index;
u32 flags;
};
int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params);
int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,

View file

@ -204,6 +204,7 @@ void ipv6_sock_ac_close(struct sock *sk);
int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
void ipv6_ac_destroy_dev(struct inet6_dev *idev);
bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,

View file

@ -480,6 +480,7 @@ void dst_init(void);
/* Flags for xfrm_lookup flags argument. */
enum {
XFRM_LOOKUP_ICMP = 1 << 0,
XFRM_LOOKUP_QUEUE = 1 << 1,
};
struct flowi;
@ -490,7 +491,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
int flags)
{
return dst_orig;
}
}
static inline struct dst_entry *xfrm_lookup_route(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl,
struct sock *sk,
int flags)
{
return dst_orig;
}
static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
{
@ -502,6 +512,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, struct sock *sk,
int flags);
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, struct sock *sk,
int flags);
/* skb attached with this dst needs transformation if dst->xfrm is valid */
static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
{

View file

@ -394,4 +394,12 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
return netlink_set_err(net->genl_sock, portid, group, code);
}
static inline int genl_has_listeners(struct genl_family *family,
struct sock *sk, unsigned int group)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
return netlink_has_listeners(sk, group);
}
#endif /* __NET_GENERIC_NETLINK_H */

View file

@ -231,7 +231,8 @@ struct qdisc_skb_cb {
unsigned int pkt_len;
u16 slave_dev_queue_mapping;
u16 _pad;
unsigned char data[24];
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
};
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)

View file

@ -23,7 +23,6 @@
#include <linux/hash.h>
#include <linux/random.h>
#include <linux/rhashtable.h>
#include <linux/log2.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4UL

View file

@ -309,6 +309,9 @@ struct br_input_skb_cb {
int igmp;
int mrouters_only;
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
bool vlan_filtered;
#endif
};
#define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)

View file

@ -27,9 +27,13 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
{
if (flags & BRIDGE_VLAN_INFO_PVID)
__vlan_add_pvid(v, vid);
else
__vlan_delete_pvid(v, vid);
if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
set_bit(vid, v->untagged_bitmap);
else
clear_bit(vid, v->untagged_bitmap);
}
static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
@ -125,7 +129,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
{
u16 vid;
if (!br->vlan_enabled)
/* If this packet was not filtered at input, let it pass */
if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
goto out;
/* Vlan filter table must be configured at this point. The
@ -164,8 +169,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
/* If VLAN filtering is disabled on the bridge, all packets are
* permitted.
*/
if (!br->vlan_enabled)
if (!br->vlan_enabled) {
BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
return true;
}
/* If there are no vlan in the permitted list, all packets are
* rejected.
@ -173,6 +180,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
if (!v)
goto drop;
BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
proto = br->vlan_proto;
/* If vlan tx offload is disabled on bridge device and frame was
@ -251,7 +259,8 @@ bool br_allowed_egress(struct net_bridge *br,
{
u16 vid;
if (!br->vlan_enabled)
/* If this packet was not filtered at input, let it pass */
if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
return true;
if (!v)
@ -270,6 +279,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
struct net_bridge *br = p->br;
struct net_port_vlans *v;
/* If filtering was disabled at input, let it pass. */
if (!br->vlan_enabled)
return true;

View file

@ -4809,9 +4809,14 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev,
sysfs_remove_link(&(dev->dev.kobj), linkname);
}
#define netdev_adjacent_is_neigh_list(dev, dev_list) \
(dev_list == &dev->adj_list.upper || \
dev_list == &dev->adj_list.lower)
static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *dev_list)
{
return (dev_list == &dev->adj_list.upper ||
dev_list == &dev->adj_list.lower) &&
net_eq(dev_net(dev), dev_net(adj_dev));
}
static int __netdev_adjacent_dev_insert(struct net_device *dev,
struct net_device *adj_dev,
@ -4841,7 +4846,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
pr_debug("dev_hold for %s, because of link added from %s to %s\n",
adj_dev->name, dev->name, adj_dev->name);
if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
if (ret)
goto free_adj;
@ -4862,7 +4867,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
return 0;
remove_symlinks:
if (netdev_adjacent_is_neigh_list(dev, dev_list))
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
free_adj:
kfree(adj);
@ -4895,8 +4900,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
if (adj->master)
sysfs_remove_link(&(dev->dev.kobj), "master");
if (netdev_adjacent_is_neigh_list(dev, dev_list) &&
net_eq(dev_net(dev),dev_net(adj_dev)))
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
list_del_rcu(&adj->list);

View file

@ -1866,7 +1866,7 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
* skb_page_frag_refill - check that a page_frag contains enough room
* @sz: minimum size of the fragment we want to get
* @pfrag: pointer to page_frag
* @prio: priority for memory allocation
* @gfp: priority for memory allocation
*
* Note: While this allocator tries to use high order pages, there is
* no guarantee that allocations succeed. Therefore, @sz MUST be

View file

@ -79,10 +79,10 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
idst->saddr = saddr;
}
static void tunnel_dst_set(struct ip_tunnel *t,
static noinline void tunnel_dst_set(struct ip_tunnel *t,
struct dst_entry *dst, __be32 saddr)
{
__tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
__tunnel_dst_set(raw_cpu_ptr(t->dst_cache), dst, saddr);
}
static void tunnel_dst_reset(struct ip_tunnel *t)
@ -106,7 +106,7 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
struct dst_entry *dst;
rcu_read_lock();
idst = this_cpu_ptr(t->dst_cache);
idst = raw_cpu_ptr(t->dst_cache);
dst = rcu_dereference(idst->dst);
if (dst && !atomic_inc_not_zero(&dst->__refcnt))
dst = NULL;

View file

@ -2265,9 +2265,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
return rt;
if (flp4->flowi4_proto)
rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
flowi4_to_flowi(flp4),
sk, 0);
rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
flowi4_to_flowi(flp4),
sk, 0);
return rt;
}

View file

@ -3094,11 +3094,13 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_unlock_bh(&idev->lock);
/* Step 5: Discard multicast list */
if (how)
/* Step 5: Discard anycast and multicast list */
if (how) {
ipv6_ac_destroy_dev(idev);
ipv6_mc_destroy_dev(idev);
else
} else {
ipv6_mc_down(idev);
}
idev->tstamp = jiffies;

View file

@ -351,6 +351,27 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
return __ipv6_dev_ac_dec(idev, addr);
}
void ipv6_ac_destroy_dev(struct inet6_dev *idev)
{
struct ifacaddr6 *aca;
write_lock_bh(&idev->lock);
while ((aca = idev->ac_list) != NULL) {
idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock);
addrconf_leave_solict(idev, &aca->aca_addr);
dst_hold(&aca->aca_rt->dst);
ip6_del_rt(aca->aca_rt);
aca_put(aca);
write_lock_bh(&idev->lock);
}
write_unlock_bh(&idev->lock);
}
/*
* check if the interface has this anycast address
* called with rcu_read_lock()

View file

@ -1009,7 +1009,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
if (final_dst)
fl6->daddr = *final_dst;
return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
}
EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
@ -1041,7 +1041,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
if (final_dst)
fl6->daddr = *final_dst;
return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
}
EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);

View file

@ -1822,7 +1822,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
if (sdata->vif.bss_conf.use_short_slot)
sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period;
sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
sinfo->sta_flags.set = 0;

View file

@ -78,11 +78,12 @@ static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
/* Check if need to build a reply message.
* OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
static bool ovs_must_notify(struct genl_info *info,
const struct genl_multicast_group *grp)
static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
unsigned int group)
{
return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
netlink_has_listeners(genl_info_net(info)->genl_sock, 0);
genl_has_listeners(family, genl_info_net(info)->genl_sock,
group);
}
static void ovs_notify(struct genl_family *family,
@ -763,7 +764,7 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act
{
struct sk_buff *skb;
if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
return NULL;
skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);

View file

@ -163,6 +163,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
{ "LNV4752", RFKILL_TYPE_GPS },
{ },
};
MODULE_DEVICE_TABLE(acpi, rfkill_acpi_match);
#endif
static struct platform_driver rfkill_gpio_driver = {

View file

@ -1143,7 +1143,7 @@ static long rxrpc_read(const struct key *key,
if (copy_to_user(xdr, (s), _l) != 0) \
goto fault; \
if (_l & 3 && \
copy_to_user((u8 *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
goto fault; \
xdr += (_l + 3) >> 2; \
} while(0)

View file

@ -133,10 +133,16 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
--sch->q.qlen;
}
/* private part of skb->cb[] that a qdisc is allowed to use
* is limited to QDISC_CB_PRIV_LEN bytes.
* As a flow key might be too large, we store a part of it only.
*/
#define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3)
struct choke_skb_cb {
u16 classid;
u8 keys_valid;
struct flow_keys keys;
u8 keys[QDISC_CB_PRIV_LEN - 3];
};
static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@ -163,22 +169,26 @@ static u16 choke_get_classid(const struct sk_buff *skb)
static bool choke_match_flow(struct sk_buff *skb1,
struct sk_buff *skb2)
{
struct flow_keys temp;
if (skb1->protocol != skb2->protocol)
return false;
if (!choke_skb_cb(skb1)->keys_valid) {
choke_skb_cb(skb1)->keys_valid = 1;
skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
skb_flow_dissect(skb1, &temp);
memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN);
}
if (!choke_skb_cb(skb2)->keys_valid) {
choke_skb_cb(skb2)->keys_valid = 1;
skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
skb_flow_dissect(skb2, &temp);
memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN);
}
return !memcmp(&choke_skb_cb(skb1)->keys,
&choke_skb_cb(skb2)->keys,
sizeof(struct flow_keys));
CHOKE_K_LEN);
}
/*

View file

@ -1996,6 +1996,9 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
return -EFAULT;
if (kmsg->msg_name == NULL)
kmsg->msg_namelen = 0;
if (kmsg->msg_namelen < 0)
return -EINVAL;

View file

@ -6969,6 +6969,9 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
struct nlattr *data = ((void **)skb->cb)[2];
enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE;
/* clear CB data for netlink core to own from now on */
memset(skb->cb, 0, sizeof(skb->cb));
nla_nest_end(skb, data);
genlmsg_end(skb, hdr);
@ -9294,6 +9297,9 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
void *hdr = ((void **)skb->cb)[1];
struct nlattr *data = ((void **)skb->cb)[2];
/* clear CB data for netlink core to own from now on */
memset(skb->cb, 0, sizeof(skb->cb));
if (WARN_ON(!rdev->cur_cmd_info)) {
kfree_skb(skb);
return -EINVAL;

View file

@ -39,6 +39,11 @@
#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
#define XFRM_MAX_QUEUE_LEN 100
struct xfrm_flo {
struct dst_entry *dst_orig;
u8 flags;
};
static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
__read_mostly;
@ -1877,13 +1882,14 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
}
static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
struct dst_entry *dst,
struct xfrm_flo *xflo,
const struct flowi *fl,
int num_xfrms,
u16 family)
{
int err;
struct net_device *dev;
struct dst_entry *dst;
struct dst_entry *dst1;
struct xfrm_dst *xdst;
@ -1891,9 +1897,12 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
if (IS_ERR(xdst))
return xdst;
if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
net->xfrm.sysctl_larval_drop ||
num_xfrms <= 0)
return xdst;
dst = xflo->dst_orig;
dst1 = &xdst->u.dst;
dst_hold(dst);
xdst->route = dst;
@ -1935,7 +1944,7 @@ static struct flow_cache_object *
xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
struct flow_cache_object *oldflo, void *ctx)
{
struct dst_entry *dst_orig = (struct dst_entry *)ctx;
struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
struct xfrm_dst *xdst, *new_xdst;
int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
@ -1976,7 +1985,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
goto make_dummy_bundle;
}
new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
xflo->dst_orig);
if (IS_ERR(new_xdst)) {
err = PTR_ERR(new_xdst);
if (err != -EAGAIN)
@ -2010,7 +2020,7 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
/* We found policies, but there's no bundles to instantiate:
* either because the policy blocks, has no transformations or
* we could not build template (no xfrm_states).*/
xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
if (IS_ERR(xdst)) {
xfrm_pols_put(pols, num_pols);
return ERR_CAST(xdst);
@ -2104,13 +2114,18 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
}
if (xdst == NULL) {
struct xfrm_flo xflo;
xflo.dst_orig = dst_orig;
xflo.flags = flags;
/* To accelerate a bit... */
if ((dst_orig->flags & DST_NOXFRM) ||
!net->xfrm.policy_count[XFRM_POLICY_OUT])
goto nopol;
flo = flow_cache_lookup(net, fl, family, dir,
xfrm_bundle_lookup, dst_orig);
xfrm_bundle_lookup, &xflo);
if (flo == NULL)
goto nopol;
if (IS_ERR(flo)) {
@ -2138,7 +2153,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
xfrm_pols_put(pols, drop_pols);
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
return make_blackhole(net, family, dst_orig);
return ERR_PTR(-EREMOTE);
}
err = -EAGAIN;
@ -2195,6 +2210,23 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
}
EXPORT_SYMBOL(xfrm_lookup);
/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
* Otherwise we may send out blackholed packets.
*/
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl,
struct sock *sk, int flags)
{
struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
flags | XFRM_LOOKUP_QUEUE);
if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
return make_blackhole(net, dst_orig->ops->family, dst_orig);
return dst;
}
EXPORT_SYMBOL(xfrm_lookup_route);
static inline int
xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
{
@ -2460,7 +2492,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
skb_dst_force(skb);
dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
if (IS_ERR(dst)) {
res = 0;
dst = NULL;