Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) kvaser CAN driver has fixed limits of some of it's table, validate that we won't exceed those limits at probe time. Fix from Olivier Sobrie. 2) Fix rtl8192ce disabling interrupts for too long, from Olivier Langlois. 3) Fix botched shift in ath5k driver, from Dan Carpenter. 4) Fix corruption of deferred packets in TIPC, from Erik Hugne. 5) Fix newlink error path in macvlan driver, from Cong Wang. 6) Fix netpoll deadlock in bonding, from Ding Tianhong. 7) Handle GSO packets properly in forwarding path when fragmentation is necessary on egress, from Florian Westphal. 8) Fix axienet build errors, from Michal Simek. 9) Fix refcounting of ubufs on tx in vhost net driver, from Michael S Tsirkin. 10) Carrier status isn't set properly in hyperv driver, from Haiyang Zhang. 11) Missing pci_disable_device() in tulip_remove_one), from Ingo Molnar. 12) AF_PACKET qdisc bypass mode doesn't adhere to driver provided TX queue selection method. Add a fallback method mechanism to fix this bug, from Daniel Borkmann. 13) Fix regression in link local route handling on GRE tunnels, from Nicolas Dichtel. 14) Bonding can assign dup aggregator IDs in some sequences of configuration, fix by making the allocation counter per-bond instead of global. From Jiri Bohac. 15) sctp_connectx() needs compat translations, from Daniel Borkmann. 16) Fix of_mdio PHY interrupt parsing, from Ben Dooks * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (62 commits) MAINTAINERS: add entry for the PHY library of_mdio: fix phy interrupt passing net: ethernet: update dependency and help text of mvneta NET: fec: only enable napi if we are successful af_packet: remove a stray tab in packet_set_ring() net: sctp: fix sctp_connectx abi for ia32 emulation/compat mode ipv4: fix counter in_slow_tot irtty-sir.c: Do not set_termios() on irtty_close() bonding: 802.3ad: make aggregator_identifier bond-private usbnet: remove generic hard_header_len check gre: add link local route when local addr is any batman-adv: fix potential kernel paging error for unicast transmissions batman-adv: avoid double free when orig_node initialization fails batman-adv: free skb on TVLV parsing success batman-adv: fix TT CRC computation by ensuring byte order batman-adv: fix potential orig_node reference leak batman-adv: avoid potential race condition when adding a new neighbour batman-adv: properly check pskb_may_pull return value batman-adv: release vlan object after checking the CRC batman-adv: fix TT-TVLV parsing on OGM reception ...
This commit is contained in:
commit
b0d3f6d47e
89 changed files with 1081 additions and 361 deletions
58
Documentation/devicetree/bindings/net/sti-dwmac.txt
Normal file
58
Documentation/devicetree/bindings/net/sti-dwmac.txt
Normal file
|
@ -0,0 +1,58 @@
|
|||
STMicroelectronics SoC DWMAC glue layer controller
|
||||
|
||||
The device node has following properties.
|
||||
|
||||
Required properties:
|
||||
- compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or
|
||||
"st,stid127-dwmac".
|
||||
- reg : Offset of the glue configuration register map in system
|
||||
configuration regmap pointed by st,syscon property and size.
|
||||
|
||||
- reg-names : Should be "sti-ethconf".
|
||||
|
||||
- st,syscon : Should be phandle to system configuration node which
|
||||
encompases this glue registers.
|
||||
|
||||
- st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be
|
||||
wired up in from different sources. One via TXCLK pin and other via CLK_125
|
||||
pin. This wiring is totally board dependent. However the retiming glue
|
||||
logic should be configured accordingly. Possible values for this property
|
||||
|
||||
"txclk" - if 125Mhz clock is wired up via txclk line.
|
||||
"clk_125" - if 125Mhz clock is wired up via clk_125 line.
|
||||
|
||||
This property is only valid for Giga bit setup( GMII, RGMII), and it is
|
||||
un-used for non-giga bit (MII and RMII) setups. Also note that internal
|
||||
clockgen can not generate stable 125Mhz clock.
|
||||
|
||||
- st,ext-phyclk: This boolean property indicates who is generating the clock
|
||||
for tx and rx. This property is only valid for RMII case where the clock can
|
||||
be generated from the MAC or PHY.
|
||||
|
||||
- clock-names: should be "sti-ethclk".
|
||||
- clocks: Should point to ethernet clockgen which can generate phyclk.
|
||||
|
||||
|
||||
Example:
|
||||
|
||||
ethernet0: dwmac@fe810000 {
|
||||
device_type = "network";
|
||||
compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
|
||||
reg = <0xfe810000 0x8000>, <0x8bc 0x4>;
|
||||
reg-names = "stmmaceth", "sti-ethconf";
|
||||
interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
|
||||
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
|
||||
phy-mode = "mii";
|
||||
|
||||
st,syscon = <&syscfg_rear>;
|
||||
|
||||
snps,pbl = <32>;
|
||||
snps,mixed-burst;
|
||||
|
||||
resets = <&softreset STIH416_ETH0_SOFTRESET>;
|
||||
reset-names = "stmmaceth";
|
||||
pinctrl-0 = <&pinctrl_mii0>;
|
||||
pinctrl-names = "default";
|
||||
clocks = <&CLK_S_GMAC0_PHY>;
|
||||
clock-names = "stmmaceth";
|
||||
};
|
|
@ -1,45 +0,0 @@
|
|||
The 3Com Etherlink Plus (3c505) driver.
|
||||
|
||||
This driver now uses DMA. There is currently no support for PIO operation.
|
||||
The default DMA channel is 6; this is _not_ autoprobed, so you must
|
||||
make sure you configure it correctly. If loading the driver as a
|
||||
module, you can do this with "modprobe 3c505 dma=n". If the driver is
|
||||
linked statically into the kernel, you must either use an "ether="
|
||||
statement on the command line, or change the definition of ELP_DMA in 3c505.h.
|
||||
|
||||
The driver will warn you if it has to fall back on the compiled in
|
||||
default DMA channel.
|
||||
|
||||
If no base address is given at boot time, the driver will autoprobe
|
||||
ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver
|
||||
will try to probe for it.
|
||||
|
||||
The driver can be used as a loadable module.
|
||||
|
||||
Theoretically, one instance of the driver can now run multiple cards,
|
||||
in the standard way (when loading a module, say "modprobe 3c505
|
||||
io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested
|
||||
this, though.
|
||||
|
||||
The driver may now support revision 2 hardware; the dependency on
|
||||
being able to read the host control register has been removed. This
|
||||
is also untested, since I don't have a suitable card.
|
||||
|
||||
Known problems:
|
||||
I still see "DMA upload timed out" messages from time to time. These
|
||||
seem to be fairly non-fatal though.
|
||||
The card is old and slow.
|
||||
|
||||
To do:
|
||||
Improve probe/setup code
|
||||
Test multicast and promiscuous operation
|
||||
|
||||
Authors:
|
||||
The driver is mainly written by Craig Southeren, email
|
||||
<craigs@ineluki.apana.org.au>.
|
||||
Parts of the driver (adapting the driver to 1.1.4+ kernels,
|
||||
IRQ/address detection, some changes) and this README by
|
||||
Juha Laiho <jlaiho@ichaos.nullnet.fi>.
|
||||
DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk>
|
||||
Multicard support, Software configurable DMA, etc., by
|
||||
Christopher Collins <ccollins@pcug.org.au>
|
11
MAINTAINERS
11
MAINTAINERS
|
@ -3324,6 +3324,17 @@ S: Maintained
|
|||
F: include/linux/netfilter_bridge/
|
||||
F: net/bridge/
|
||||
|
||||
ETHERNET PHY LIBRARY
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: include/linux/phy.h
|
||||
F: include/linux/phy_fixed.h
|
||||
F: drivers/net/phy/
|
||||
F: Documentation/networking/phy.txt
|
||||
F: drivers/of/of_mdio.c
|
||||
F: drivers/of/of_net.c
|
||||
|
||||
EXT2 FILE SYSTEM
|
||||
M: Jan Kara <jack@suse.cz>
|
||||
L: linux-ext4@vger.kernel.org
|
||||
|
|
|
@ -139,7 +139,7 @@ config MACVTAP
|
|||
This adds a specialized tap character device driver that is based
|
||||
on the MAC-VLAN network interface, called macvtap. A macvtap device
|
||||
can be added in the same way as a macvlan device, using 'type
|
||||
macvlan', and then be accessed through the tap user space interface.
|
||||
macvtap', and then be accessed through the tap user space interface.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called macvtap.
|
||||
|
|
|
@ -1796,8 +1796,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
|
|||
BOND_AD_INFO(bond).agg_select_timer = timeout;
|
||||
}
|
||||
|
||||
static u16 aggregator_identifier;
|
||||
|
||||
/**
|
||||
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
|
||||
* @bond: bonding struct to work on
|
||||
|
@ -1811,7 +1809,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
|
|||
if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
|
||||
bond->dev->dev_addr)) {
|
||||
|
||||
aggregator_identifier = 0;
|
||||
BOND_AD_INFO(bond).aggregator_identifier = 0;
|
||||
|
||||
BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
|
||||
BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
|
||||
|
@ -1880,7 +1878,7 @@ void bond_3ad_bind_slave(struct slave *slave)
|
|||
ad_initialize_agg(aggregator);
|
||||
|
||||
aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
|
||||
aggregator->aggregator_identifier = (++aggregator_identifier);
|
||||
aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
|
||||
aggregator->slave = slave;
|
||||
aggregator->is_active = 0;
|
||||
aggregator->num_of_ports = 0;
|
||||
|
|
|
@ -253,6 +253,7 @@ struct ad_system {
|
|||
struct ad_bond_info {
|
||||
struct ad_system system; /* 802.3ad system structure */
|
||||
u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
|
||||
u16 aggregator_identifier;
|
||||
};
|
||||
|
||||
struct ad_slave_info {
|
||||
|
|
|
@ -1543,9 +1543,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||
bond_set_carrier(bond);
|
||||
|
||||
if (USES_PRIMARY(bond->params.mode)) {
|
||||
block_netpoll_tx();
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
bond_select_active_slave(bond);
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
unblock_netpoll_tx();
|
||||
}
|
||||
|
||||
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
|
||||
|
@ -1571,10 +1573,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||
if (bond->primary_slave == new_slave)
|
||||
bond->primary_slave = NULL;
|
||||
if (bond->curr_active_slave == new_slave) {
|
||||
block_netpoll_tx();
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
bond_change_active_slave(bond, NULL);
|
||||
bond_select_active_slave(bond);
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
unblock_netpoll_tx();
|
||||
}
|
||||
slave_disable_netpoll(new_slave);
|
||||
|
||||
|
@ -2864,9 +2868,12 @@ static int bond_slave_netdev_event(unsigned long event,
|
|||
pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
|
||||
bond->dev->name, bond->primary_slave ? slave_dev->name :
|
||||
"none");
|
||||
|
||||
block_netpoll_tx();
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
bond_select_active_slave(bond);
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
unblock_netpoll_tx();
|
||||
break;
|
||||
case NETDEV_FEAT_CHANGE:
|
||||
bond_compute_features(bond);
|
||||
|
@ -3700,7 +3707,7 @@ static inline int bond_slave_override(struct bonding *bond,
|
|||
|
||||
|
||||
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
/*
|
||||
* This helper function exists to help dev_pick_tx get the correct
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/if.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/rwlock.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/inet.h>
|
||||
|
|
|
@ -473,6 +473,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
|
|||
return err;
|
||||
|
||||
dev->nchannels = msg.u.cardinfo.nchannels;
|
||||
if (dev->nchannels > MAX_NET_DEVICES)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1873,7 +1873,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
|
|||
}
|
||||
|
||||
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
|
||||
|
@ -1895,7 +1895,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* select a non-FCoE queue */
|
||||
return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
|
||||
return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
|
||||
}
|
||||
|
||||
void bnx2x_set_num_queues(struct bnx2x *bp)
|
||||
|
|
|
@ -496,7 +496,7 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
|
|||
|
||||
/* select_queue callback */
|
||||
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv);
|
||||
void *accel_priv, select_queue_fallback_t fallback);
|
||||
|
||||
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
|
||||
struct bnx2x_fastpath *fp,
|
||||
|
|
|
@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_dev *pdev)
|
|||
pci_iounmap(pdev, tp->base_addr);
|
||||
free_netdev (dev);
|
||||
pci_release_regions (pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
/* pci_power_off (pdev, -1); */
|
||||
}
|
||||
|
|
|
@ -1778,8 +1778,6 @@ fec_enet_open(struct net_device *ndev)
|
|||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
int ret;
|
||||
|
||||
napi_enable(&fep->napi);
|
||||
|
||||
/* I should reset the ring buffers here, but I don't yet know
|
||||
* a simple way to do that.
|
||||
*/
|
||||
|
@ -1794,6 +1792,8 @@ fec_enet_open(struct net_device *ndev)
|
|||
fec_enet_free_buffers(ndev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
napi_enable(&fep->napi);
|
||||
phy_start(fep->phy_dev);
|
||||
netif_start_queue(ndev);
|
||||
fep->opened = 1;
|
||||
|
|
|
@ -6881,7 +6881,7 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
|
|||
}
|
||||
|
||||
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
|
||||
#ifdef IXGBE_FCOE
|
||||
|
@ -6907,7 +6907,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
|
||||
break;
|
||||
default:
|
||||
return __netdev_pick_tx(dev, skb);
|
||||
return fallback(dev, skb);
|
||||
}
|
||||
|
||||
f = &adapter->ring_feature[RING_F_FCOE];
|
||||
|
@ -6920,7 +6920,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
|
||||
return txq + f->offset;
|
||||
#else
|
||||
return __netdev_pick_tx(dev, skb);
|
||||
return fallback(dev, skb);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -619,7 +619,7 @@ ltq_etop_set_multicast_list(struct net_device *dev)
|
|||
|
||||
static u16
|
||||
ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
/* we are currently only using the first queue */
|
||||
return 0;
|
||||
|
|
|
@ -43,12 +43,12 @@ config MVMDIO
|
|||
This driver is used by the MV643XX_ETH and MVNETA drivers.
|
||||
|
||||
config MVNETA
|
||||
tristate "Marvell Armada 370/XP network interface support"
|
||||
depends on MACH_ARMADA_370_XP
|
||||
tristate "Marvell Armada 370/38x/XP network interface support"
|
||||
depends on PLAT_ORION
|
||||
select MVMDIO
|
||||
---help---
|
||||
This driver supports the network interface units in the
|
||||
Marvell ARMADA XP and ARMADA 370 SoC family.
|
||||
Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
|
||||
|
||||
Note that this driver is distinct from the mv643xx_eth
|
||||
driver, which should be used for the older Marvell SoCs
|
||||
|
|
|
@ -629,7 +629,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
|
|||
}
|
||||
|
||||
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
u16 rings_p_up = priv->num_tx_rings_p_up;
|
||||
|
@ -641,7 +641,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
if (vlan_tx_tag_present(skb))
|
||||
up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
|
||||
|
||||
return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
|
||||
return fallback(dev, skb) % rings_p_up + up * rings_p_up;
|
||||
}
|
||||
|
||||
static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
|
||||
|
|
|
@ -723,7 +723,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
|||
|
||||
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
|
||||
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv);
|
||||
void *accel_priv, select_queue_fallback_t fallback);
|
||||
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
|
|
|
@ -37,6 +37,17 @@ config DWMAC_SUNXI
|
|||
stmmac device driver. This driver is used for A20/A31
|
||||
GMAC ethernet controller.
|
||||
|
||||
config DWMAC_STI
|
||||
bool "STi GMAC support"
|
||||
depends on STMMAC_PLATFORM && ARCH_STI
|
||||
default y
|
||||
---help---
|
||||
Support for ethernet controller on STi SOCs.
|
||||
|
||||
This selects STi SoC glue layer support for the stmmac
|
||||
device driver. This driver is used on for the STi series
|
||||
SOCs GMAC ethernet controller.
|
||||
|
||||
config STMMAC_PCI
|
||||
bool "STMMAC PCI bus support"
|
||||
depends on STMMAC_ETH && PCI
|
||||
|
|
|
@ -2,6 +2,7 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
|
|||
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
|
||||
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
|
||||
stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
|
||||
stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
|
||||
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
|
||||
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
|
||||
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
|
||||
|
|
330
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
Normal file
330
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
Normal file
|
@ -0,0 +1,330 @@
|
|||
/**
|
||||
* dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
|
||||
*
|
||||
* Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
|
||||
* Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/stmmac.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_net.h>
|
||||
|
||||
/**
|
||||
* STi GMAC glue logic.
|
||||
* --------------------
|
||||
*
|
||||
* _
|
||||
* | \
|
||||
* --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK
|
||||
* phyclk | |___________________________________________
|
||||
* | | | (phyclk-in)
|
||||
* --------|1 / |
|
||||
* int-clk |_ / |
|
||||
* | _
|
||||
* | | \
|
||||
* |_______|1 \ ETH_SEL_TX_RETIME_CLK
|
||||
* | |___________________________
|
||||
* | | (tx-retime-clk)
|
||||
* _______|0 /
|
||||
* | |_ /
|
||||
* _ |
|
||||
* | \ |
|
||||
* --------|0 \ |
|
||||
* clk_125 | |__|
|
||||
* | | ETH_SEL_TXCLK_NOT_CLK125
|
||||
* --------|1 /
|
||||
* txclk |_ /
|
||||
*
|
||||
*
|
||||
* ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can
|
||||
* generate 50MHz clock or MAC can generate it.
|
||||
* This bit is configured by "st,ext-phyclk" property.
|
||||
*
|
||||
* ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz
|
||||
* clock either comes from clk-125 pin or txclk pin. This configuration is
|
||||
* totally driven by the board wiring. This bit is configured by
|
||||
* "st,tx-retime-src" property.
|
||||
*
|
||||
* TXCLK configuration is different for different phy interface modes
|
||||
* and changes according to link speed in modes like RGMII.
|
||||
*
|
||||
* Below table summarizes the clock requirement and clock sources for
|
||||
* supported phy interface modes with link speeds.
|
||||
* ________________________________________________
|
||||
*| PHY_MODE | 1000 Mbit Link | 100 Mbit Link |
|
||||
* ------------------------------------------------
|
||||
*| MII | n/a | 25Mhz |
|
||||
*| | | txclk |
|
||||
* ------------------------------------------------
|
||||
*| GMII | 125Mhz | 25Mhz |
|
||||
*| | clk-125/txclk | txclk |
|
||||
* ------------------------------------------------
|
||||
*| RGMII | 125Mhz | 25Mhz |
|
||||
*| | clk-125/txclk | clkgen |
|
||||
* ------------------------------------------------
|
||||
*| RMII | n/a | 25Mhz |
|
||||
*| | |clkgen/phyclk-in |
|
||||
* ------------------------------------------------
|
||||
*
|
||||
* TX lines are always retimed with a clk, which can vary depending
|
||||
* on the board configuration. Below is the table of these bits
|
||||
* in eth configuration register depending on source of retime clk.
|
||||
*
|
||||
*---------------------------------------------------------------
|
||||
* src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125|
|
||||
*---------------------------------------------------------------
|
||||
* txclk | 0 | n/a | 1 |
|
||||
*---------------------------------------------------------------
|
||||
* ck_125| 0 | n/a | 0 |
|
||||
*---------------------------------------------------------------
|
||||
* phyclk| 1 | 0 | n/a |
|
||||
*---------------------------------------------------------------
|
||||
* clkgen| 1 | 1 | n/a |
|
||||
*---------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/* Register definition */
|
||||
|
||||
/* 3 bits [8:6]
|
||||
* [6:6] ETH_SEL_TXCLK_NOT_CLK125
|
||||
* [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK
|
||||
* [8:8] ETH_SEL_TX_RETIME_CLK
|
||||
*
|
||||
*/
|
||||
|
||||
#define TX_RETIME_SRC_MASK GENMASK(8, 6)
|
||||
#define ETH_SEL_TX_RETIME_CLK BIT(8)
|
||||
#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
|
||||
#define ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
|
||||
|
||||
#define ENMII_MASK GENMASK(5, 5)
|
||||
#define ENMII BIT(5)
|
||||
|
||||
/**
|
||||
* 3 bits [4:2]
|
||||
* 000-GMII/MII
|
||||
* 001-RGMII
|
||||
* 010-SGMII
|
||||
* 100-RMII
|
||||
*/
|
||||
#define MII_PHY_SEL_MASK GENMASK(4, 2)
|
||||
#define ETH_PHY_SEL_RMII BIT(4)
|
||||
#define ETH_PHY_SEL_SGMII BIT(3)
|
||||
#define ETH_PHY_SEL_RGMII BIT(2)
|
||||
#define ETH_PHY_SEL_GMII 0x0
|
||||
#define ETH_PHY_SEL_MII 0x0
|
||||
|
||||
#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
|
||||
iface == PHY_INTERFACE_MODE_RGMII_ID || \
|
||||
iface == PHY_INTERFACE_MODE_RGMII_RXID || \
|
||||
iface == PHY_INTERFACE_MODE_RGMII_TXID)
|
||||
|
||||
#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
|
||||
iface == PHY_INTERFACE_MODE_GMII)
|
||||
|
||||
struct sti_dwmac {
|
||||
int interface;
|
||||
bool ext_phyclk;
|
||||
bool is_tx_retime_src_clk_125;
|
||||
struct clk *clk;
|
||||
int reg;
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
};
|
||||
|
||||
static u32 phy_intf_sels[] = {
|
||||
[PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
|
||||
[PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
|
||||
[PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
|
||||
[PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
|
||||
[PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
|
||||
[PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
|
||||
};
|
||||
|
||||
enum {
|
||||
TX_RETIME_SRC_NA = 0,
|
||||
TX_RETIME_SRC_TXCLK = 1,
|
||||
TX_RETIME_SRC_CLK_125,
|
||||
TX_RETIME_SRC_PHYCLK,
|
||||
TX_RETIME_SRC_CLKGEN,
|
||||
};
|
||||
|
||||
static const char *const tx_retime_srcs[] = {
|
||||
[TX_RETIME_SRC_NA] = "",
|
||||
[TX_RETIME_SRC_TXCLK] = "txclk",
|
||||
[TX_RETIME_SRC_CLK_125] = "clk_125",
|
||||
[TX_RETIME_SRC_PHYCLK] = "phyclk",
|
||||
[TX_RETIME_SRC_CLKGEN] = "clkgen",
|
||||
};
|
||||
|
||||
static u32 tx_retime_val[] = {
|
||||
[TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125,
|
||||
[TX_RETIME_SRC_CLK_125] = 0x0,
|
||||
[TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK,
|
||||
[TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK |
|
||||
ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
|
||||
};
|
||||
|
||||
static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd)
|
||||
{
|
||||
u32 src = 0, freq = 0;
|
||||
|
||||
if (spd == SPEED_100) {
|
||||
if (dwmac->interface == PHY_INTERFACE_MODE_MII ||
|
||||
dwmac->interface == PHY_INTERFACE_MODE_GMII) {
|
||||
src = TX_RETIME_SRC_TXCLK;
|
||||
} else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
|
||||
if (dwmac->ext_phyclk) {
|
||||
src = TX_RETIME_SRC_PHYCLK;
|
||||
} else {
|
||||
src = TX_RETIME_SRC_CLKGEN;
|
||||
freq = 50000000;
|
||||
}
|
||||
|
||||
} else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
|
||||
src = TX_RETIME_SRC_CLKGEN;
|
||||
freq = 25000000;
|
||||
}
|
||||
|
||||
if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk)
|
||||
clk_set_rate(dwmac->clk, freq);
|
||||
|
||||
} else if (spd == SPEED_1000) {
|
||||
if (dwmac->is_tx_retime_src_clk_125)
|
||||
src = TX_RETIME_SRC_CLK_125;
|
||||
else
|
||||
src = TX_RETIME_SRC_TXCLK;
|
||||
}
|
||||
|
||||
regmap_update_bits(dwmac->regmap, dwmac->reg,
|
||||
TX_RETIME_SRC_MASK, tx_retime_val[src]);
|
||||
}
|
||||
|
||||
static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
|
||||
{
|
||||
struct sti_dwmac *dwmac = priv;
|
||||
|
||||
if (dwmac->clk)
|
||||
clk_disable_unprepare(dwmac->clk);
|
||||
}
|
||||
|
||||
static void sti_fix_mac_speed(void *priv, unsigned int spd)
|
||||
{
|
||||
struct sti_dwmac *dwmac = priv;
|
||||
|
||||
setup_retime_src(dwmac, spd);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct resource *res;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct regmap *regmap;
|
||||
int err;
|
||||
|
||||
if (!np)
|
||||
return -EINVAL;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
|
||||
if (!res)
|
||||
return -ENODATA;
|
||||
|
||||
regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
|
||||
if (IS_ERR(regmap))
|
||||
return PTR_ERR(regmap);
|
||||
|
||||
dwmac->dev = dev;
|
||||
dwmac->interface = of_get_phy_mode(np);
|
||||
dwmac->regmap = regmap;
|
||||
dwmac->reg = res->start;
|
||||
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
|
||||
dwmac->is_tx_retime_src_clk_125 = false;
|
||||
|
||||
if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
|
||||
const char *rs;
|
||||
|
||||
err = of_property_read_string(np, "st,tx-retime-src", &rs);
|
||||
if (err < 0) {
|
||||
dev_err(dev, "st,tx-retime-src not specified\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!strcasecmp(rs, "clk_125"))
|
||||
dwmac->is_tx_retime_src_clk_125 = true;
|
||||
}
|
||||
|
||||
dwmac->clk = devm_clk_get(dev, "sti-ethclk");
|
||||
|
||||
if (IS_ERR(dwmac->clk))
|
||||
dwmac->clk = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sti_dwmac_init(struct platform_device *pdev, void *priv)
|
||||
{
|
||||
struct sti_dwmac *dwmac = priv;
|
||||
struct regmap *regmap = dwmac->regmap;
|
||||
int iface = dwmac->interface;
|
||||
u32 reg = dwmac->reg;
|
||||
u32 val, spd;
|
||||
|
||||
if (dwmac->clk)
|
||||
clk_prepare_enable(dwmac->clk);
|
||||
|
||||
regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
|
||||
|
||||
val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
|
||||
regmap_update_bits(regmap, reg, ENMII_MASK, val);
|
||||
|
||||
if (IS_PHY_IF_MODE_GBIT(iface))
|
||||
spd = SPEED_1000;
|
||||
else
|
||||
spd = SPEED_100;
|
||||
|
||||
setup_retime_src(dwmac, spd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *sti_dwmac_setup(struct platform_device *pdev)
|
||||
{
|
||||
struct sti_dwmac *dwmac;
|
||||
int ret;
|
||||
|
||||
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
|
||||
if (!dwmac)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = sti_dwmac_parse_data(dwmac, pdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Unable to parse OF data\n");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return dwmac;
|
||||
}
|
||||
|
||||
const struct stmmac_of_data sti_gmac_data = {
|
||||
.fix_mac_speed = sti_fix_mac_speed,
|
||||
.setup = sti_dwmac_setup,
|
||||
.init = sti_dwmac_init,
|
||||
.exit = sti_dwmac_exit,
|
||||
};
|
|
@ -133,6 +133,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
|
|||
#ifdef CONFIG_DWMAC_SUNXI
|
||||
extern const struct stmmac_of_data sun7i_gmac_data;
|
||||
#endif
|
||||
#ifdef CONFIG_DWMAC_STI
|
||||
extern const struct stmmac_of_data sti_gmac_data;
|
||||
#endif
|
||||
extern struct platform_driver stmmac_pltfr_driver;
|
||||
static inline int stmmac_register_platform(void)
|
||||
{
|
||||
|
|
|
@ -32,6 +32,11 @@
|
|||
static const struct of_device_id stmmac_dt_ids[] = {
|
||||
#ifdef CONFIG_DWMAC_SUNXI
|
||||
{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
|
||||
#endif
|
||||
#ifdef CONFIG_DWMAC_STI
|
||||
{ .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
|
||||
{ .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
|
||||
{ .compatible = "st,stih127-dwmac", .data = &sti_gmac_data},
|
||||
#endif
|
||||
/* SoC specific glue layers should come before generic bindings */
|
||||
{ .compatible = "st,spear600-gmac"},
|
||||
|
|
|
@ -554,7 +554,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
|
|||
* common for both the interface as the interface shares
|
||||
* the same hardware resource.
|
||||
*/
|
||||
for (i = 0; i <= priv->data.slaves; i++)
|
||||
for (i = 0; i < priv->data.slaves; i++)
|
||||
if (priv->slaves[i].ndev->flags & IFF_PROMISC)
|
||||
flag = true;
|
||||
|
||||
|
@ -578,7 +578,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
|
|||
unsigned long timeout = jiffies + HZ;
|
||||
|
||||
/* Disable Learn for all ports */
|
||||
for (i = 0; i <= priv->data.slaves; i++) {
|
||||
for (i = 0; i < priv->data.slaves; i++) {
|
||||
cpsw_ale_control_set(ale, i,
|
||||
ALE_PORT_NOLEARN, 1);
|
||||
cpsw_ale_control_set(ale, i,
|
||||
|
@ -606,7 +606,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
|
|||
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
|
||||
|
||||
/* Enable Learn for all ports */
|
||||
for (i = 0; i <= priv->data.slaves; i++) {
|
||||
for (i = 0; i < priv->data.slaves; i++) {
|
||||
cpsw_ale_control_set(ale, i,
|
||||
ALE_PORT_NOLEARN, 0);
|
||||
cpsw_ale_control_set(ale, i,
|
||||
|
@ -1896,6 +1896,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
|
|||
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
|
||||
|
||||
slave_data->phy_if = of_get_phy_mode(slave_node);
|
||||
if (slave_data->phy_if < 0) {
|
||||
pr_err("Missing or malformed slave[%d] phy-mode property\n",
|
||||
i);
|
||||
return slave_data->phy_if;
|
||||
}
|
||||
|
||||
if (data->dual_emac) {
|
||||
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
|
||||
|
|
|
@ -2071,7 +2071,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
/* Return subqueue id on this core (one per core). */
|
||||
static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
return smp_processor_id();
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/netdevice.h>
|
||||
#include <linux/of_mdio.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -600,7 +601,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
|
|||
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
|
||||
packets++;
|
||||
|
||||
lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
|
||||
++lp->tx_bd_ci;
|
||||
lp->tx_bd_ci %= TX_BD_NUM;
|
||||
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
|
||||
status = cur_p->status;
|
||||
}
|
||||
|
@ -686,7 +688,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
skb_headlen(skb), DMA_TO_DEVICE);
|
||||
|
||||
for (ii = 0; ii < num_frag; ii++) {
|
||||
lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
|
||||
++lp->tx_bd_tail;
|
||||
lp->tx_bd_tail %= TX_BD_NUM;
|
||||
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
|
||||
frag = &skb_shinfo(skb)->frags[ii];
|
||||
cur_p->phys = dma_map_single(ndev->dev.parent,
|
||||
|
@ -702,7 +705,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
|
||||
/* Start the transfer */
|
||||
axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
|
||||
lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
|
||||
++lp->tx_bd_tail;
|
||||
lp->tx_bd_tail %= TX_BD_NUM;
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -774,7 +778,8 @@ static void axienet_recv(struct net_device *ndev)
|
|||
cur_p->status = 0;
|
||||
cur_p->sw_id_offset = (u32) new_skb;
|
||||
|
||||
lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
|
||||
++lp->rx_bd_ci;
|
||||
lp->rx_bd_ci %= RX_BD_NUM;
|
||||
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
|
||||
}
|
||||
|
||||
|
|
|
@ -88,8 +88,12 @@ static int netvsc_open(struct net_device *net)
|
|||
{
|
||||
struct net_device_context *net_device_ctx = netdev_priv(net);
|
||||
struct hv_device *device_obj = net_device_ctx->device_ctx;
|
||||
struct netvsc_device *nvdev;
|
||||
struct rndis_device *rdev;
|
||||
int ret = 0;
|
||||
|
||||
netif_carrier_off(net);
|
||||
|
||||
/* Open up the device */
|
||||
ret = rndis_filter_open(device_obj);
|
||||
if (ret != 0) {
|
||||
|
@ -99,6 +103,11 @@ static int netvsc_open(struct net_device *net)
|
|||
|
||||
netif_start_queue(net);
|
||||
|
||||
nvdev = hv_get_drvdata(device_obj);
|
||||
rdev = nvdev->extension;
|
||||
if (!rdev->link_state)
|
||||
netif_carrier_on(net);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -229,23 +238,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
|
|||
struct net_device *net;
|
||||
struct net_device_context *ndev_ctx;
|
||||
struct netvsc_device *net_device;
|
||||
struct rndis_device *rdev;
|
||||
|
||||
net_device = hv_get_drvdata(device_obj);
|
||||
rdev = net_device->extension;
|
||||
|
||||
rdev->link_state = status != 1;
|
||||
|
||||
net = net_device->ndev;
|
||||
|
||||
if (!net) {
|
||||
netdev_err(net, "got link status but net device "
|
||||
"not initialized yet\n");
|
||||
if (!net || net->reg_state != NETREG_REGISTERED)
|
||||
return;
|
||||
}
|
||||
|
||||
if (status == 1) {
|
||||
netif_carrier_on(net);
|
||||
ndev_ctx = netdev_priv(net);
|
||||
if (status == 1) {
|
||||
schedule_delayed_work(&ndev_ctx->dwork, 0);
|
||||
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
|
||||
} else {
|
||||
netif_carrier_off(net);
|
||||
schedule_delayed_work(&ndev_ctx->dwork, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -388,16 +398,34 @@ static const struct net_device_ops device_ops = {
|
|||
* current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
|
||||
* another netif_notify_peers() into a delayed work, otherwise GARP packet
|
||||
* will not be sent after quick migration, and cause network disconnection.
|
||||
* Also, we update the carrier status here.
|
||||
*/
|
||||
static void netvsc_send_garp(struct work_struct *w)
|
||||
static void netvsc_link_change(struct work_struct *w)
|
||||
{
|
||||
struct net_device_context *ndev_ctx;
|
||||
struct net_device *net;
|
||||
struct netvsc_device *net_device;
|
||||
struct rndis_device *rdev;
|
||||
bool notify;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
ndev_ctx = container_of(w, struct net_device_context, dwork.work);
|
||||
net_device = hv_get_drvdata(ndev_ctx->device_ctx);
|
||||
rdev = net_device->extension;
|
||||
net = net_device->ndev;
|
||||
|
||||
if (rdev->link_state) {
|
||||
netif_carrier_off(net);
|
||||
notify = false;
|
||||
} else {
|
||||
netif_carrier_on(net);
|
||||
notify = true;
|
||||
}
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
if (notify)
|
||||
netdev_notify_peers(net);
|
||||
}
|
||||
|
||||
|
@ -414,13 +442,10 @@ static int netvsc_probe(struct hv_device *dev,
|
|||
if (!net)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Set initial state */
|
||||
netif_carrier_off(net);
|
||||
|
||||
net_device_ctx = netdev_priv(net);
|
||||
net_device_ctx->device_ctx = dev;
|
||||
hv_set_drvdata(dev, net);
|
||||
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
|
||||
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
|
||||
INIT_WORK(&net_device_ctx->work, do_set_multicast);
|
||||
|
||||
net->netdev_ops = &device_ops;
|
||||
|
@ -443,8 +468,6 @@ static int netvsc_probe(struct hv_device *dev,
|
|||
}
|
||||
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
|
||||
|
||||
netif_carrier_on(net);
|
||||
|
||||
ret = register_netdev(net);
|
||||
if (ret != 0) {
|
||||
pr_err("Unable to register netdev.\n");
|
||||
|
|
|
@ -522,7 +522,6 @@ static void irtty_close(struct tty_struct *tty)
|
|||
sirdev_put_instance(priv->dev);
|
||||
|
||||
/* Stop tty */
|
||||
irtty_stop_receiver(tty, TRUE);
|
||||
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
|
||||
if (tty->ops->stop)
|
||||
tty->ops->stop(tty);
|
||||
|
|
|
@ -879,14 +879,15 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
|
|||
dev->priv_flags |= IFF_MACVLAN;
|
||||
err = netdev_upper_dev_link(lowerdev, dev);
|
||||
if (err)
|
||||
goto destroy_port;
|
||||
|
||||
goto unregister_netdev;
|
||||
|
||||
list_add_tail_rcu(&vlan->list, &port->vlans);
|
||||
netif_stacked_transfer_operstate(lowerdev, dev);
|
||||
|
||||
return 0;
|
||||
|
||||
unregister_netdev:
|
||||
unregister_netdevice(dev);
|
||||
destroy_port:
|
||||
port->count -= 1;
|
||||
if (!port->count)
|
||||
|
|
|
@ -1006,11 +1006,6 @@ static int dp83640_probe(struct phy_device *phydev)
|
|||
} else
|
||||
list_add_tail(&dp83640->list, &clock->phylist);
|
||||
|
||||
if (clock->chosen && !list_empty(&clock->phylist))
|
||||
recalibrate(clock);
|
||||
else
|
||||
enable_broadcast(dp83640->phydev, clock->page, 1);
|
||||
|
||||
dp83640_clock_put(clock);
|
||||
return 0;
|
||||
|
||||
|
@ -1063,6 +1058,14 @@ static void dp83640_remove(struct phy_device *phydev)
|
|||
|
||||
static int dp83640_config_init(struct phy_device *phydev)
|
||||
{
|
||||
struct dp83640_private *dp83640 = phydev->priv;
|
||||
struct dp83640_clock *clock = dp83640->clock;
|
||||
|
||||
if (clock->chosen && !list_empty(&clock->phylist))
|
||||
recalibrate(clock);
|
||||
else
|
||||
enable_broadcast(phydev, clock->page, 1);
|
||||
|
||||
enable_status_frames(phydev, true);
|
||||
ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
|
||||
return 0;
|
||||
|
|
|
@ -1648,7 +1648,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
|
||||
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
/*
|
||||
* This helper function exists to help dev_pick_tx get the correct
|
||||
|
|
|
@ -366,7 +366,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
|
|||
* hope the rxq no. may help here.
|
||||
*/
|
||||
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
struct tun_flow_entry *e;
|
||||
|
|
|
@ -296,7 +296,6 @@ config USB_NET_SR9800
|
|||
tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
|
||||
depends on USB_USBNET
|
||||
select CRC32
|
||||
default y
|
||||
---help---
|
||||
Say Y if you want to use one of the following 100Mbps USB Ethernet
|
||||
device based on the CoreChip-sz SR9800 chip.
|
||||
|
|
|
@ -917,7 +917,8 @@ static const struct driver_info ax88178_info = {
|
|||
.status = asix_status,
|
||||
.link_reset = ax88178_link_reset,
|
||||
.reset = ax88178_reset,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
|
||||
FLAG_MULTI_PACKET,
|
||||
.rx_fixup = asix_rx_fixup_common,
|
||||
.tx_fixup = asix_tx_fixup,
|
||||
};
|
||||
|
|
|
@ -1118,6 +1118,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
u16 hdr_off;
|
||||
u32 *pkt_hdr;
|
||||
|
||||
/* This check is no longer done by usbnet */
|
||||
if (skb->len < dev->net->hard_header_len)
|
||||
return 0;
|
||||
|
||||
skb_trim(skb, skb->len - 4);
|
||||
memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
|
||||
le32_to_cpus(&rx_hdr);
|
||||
|
|
|
@ -84,6 +84,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
u32 size;
|
||||
u32 count;
|
||||
|
||||
/* This check is no longer done by usbnet */
|
||||
if (skb->len < dev->net->hard_header_len)
|
||||
return 0;
|
||||
|
||||
header = (struct gl_header *) skb->data;
|
||||
|
||||
// get the packet count of the received skb
|
||||
|
|
|
@ -526,8 +526,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
{
|
||||
u8 status;
|
||||
|
||||
if (skb->len == 0) {
|
||||
dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
|
||||
/* This check is no longer done by usbnet */
|
||||
if (skb->len < dev->net->hard_header_len) {
|
||||
dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -364,6 +364,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
struct nc_trailer *trailer;
|
||||
u16 hdr_len, packet_len;
|
||||
|
||||
/* This check is no longer done by usbnet */
|
||||
if (skb->len < dev->net->hard_header_len)
|
||||
return 0;
|
||||
|
||||
if (!(skb->len & 0x01)) {
|
||||
netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
|
||||
skb->len, dev->net->hard_header_len, dev->hard_mtu,
|
||||
|
|
|
@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
{
|
||||
__be16 proto;
|
||||
|
||||
/* usbnet rx_complete guarantees that skb->len is at least
|
||||
* hard_header_len, so we can inspect the dest address without
|
||||
* checking skb->len
|
||||
*/
|
||||
/* This check is no longer done by usbnet */
|
||||
if (skb->len < dev->net->hard_header_len)
|
||||
return 0;
|
||||
|
||||
switch (skb->data[0] & 0xf0) {
|
||||
case 0x40:
|
||||
proto = htons(ETH_P_IP);
|
||||
|
@ -732,6 +732,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
|
||||
{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
|
|
@ -492,6 +492,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
|
|||
*/
|
||||
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
{
|
||||
/* This check is no longer done by usbnet */
|
||||
if (skb->len < dev->net->hard_header_len)
|
||||
return 0;
|
||||
|
||||
/* peripheral may have batched packets to us... */
|
||||
while (likely(skb->len)) {
|
||||
struct rndis_data_hdr *hdr = (void *)skb->data;
|
||||
|
|
|
@ -2106,6 +2106,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
|
|||
|
||||
static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
{
|
||||
/* This check is no longer done by usbnet */
|
||||
if (skb->len < dev->net->hard_header_len)
|
||||
return 0;
|
||||
|
||||
while (skb->len > 0) {
|
||||
u32 rx_cmd_a, rx_cmd_b, align_count, size;
|
||||
struct sk_buff *ax_skb;
|
||||
|
|
|
@ -1723,6 +1723,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
|
|||
|
||||
static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
{
|
||||
/* This check is no longer done by usbnet */
|
||||
if (skb->len < dev->net->hard_header_len)
|
||||
return 0;
|
||||
|
||||
while (skb->len > 0) {
|
||||
u32 header, align_count;
|
||||
struct sk_buff *ax_skb;
|
||||
|
|
|
@ -63,6 +63,10 @@ static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
{
|
||||
int offset = 0;
|
||||
|
||||
/* This check is no longer done by usbnet */
|
||||
if (skb->len < dev->net->hard_header_len)
|
||||
return 0;
|
||||
|
||||
while (offset + sizeof(u32) < skb->len) {
|
||||
struct sk_buff *sr_skb;
|
||||
u16 size;
|
||||
|
@ -823,7 +827,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
dev->rx_urb_size =
|
||||
SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
|
||||
}
|
||||
netdev_dbg(dev->net, "%s : setting rx_urb_size with : %ld\n", __func__,
|
||||
netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__,
|
||||
dev->rx_urb_size);
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -542,17 +542,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
|
|||
}
|
||||
// else network stack removes extra byte if we forced a short packet
|
||||
|
||||
if (skb->len) {
|
||||
/* all data was already cloned from skb inside the driver */
|
||||
if (dev->driver_info->flags & FLAG_MULTI_PACKET)
|
||||
dev_kfree_skb_any(skb);
|
||||
else
|
||||
goto done;
|
||||
|
||||
if (skb->len < ETH_HLEN) {
|
||||
dev->net->stats.rx_errors++;
|
||||
dev->net->stats.rx_length_errors++;
|
||||
netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
|
||||
} else {
|
||||
usbnet_skb_return(dev, skb);
|
||||
return;
|
||||
}
|
||||
|
||||
netif_dbg(dev, rx_err, dev->net, "drop\n");
|
||||
dev->net->stats.rx_errors++;
|
||||
done:
|
||||
skb_queue_tail(&dev->done, skb);
|
||||
}
|
||||
|
@ -574,13 +576,6 @@ static void rx_complete (struct urb *urb)
|
|||
switch (urb_status) {
|
||||
/* success */
|
||||
case 0:
|
||||
if (skb->len < dev->net->hard_header_len) {
|
||||
state = rx_cleanup;
|
||||
dev->net->stats.rx_errors++;
|
||||
dev->net->stats.rx_length_errors++;
|
||||
netif_dbg(dev, rx_err, dev->net,
|
||||
"rx length %d\n", skb->len);
|
||||
}
|
||||
break;
|
||||
|
||||
/* stalls need manual reset. this is rare ... except that
|
||||
|
|
|
@ -110,7 +110,7 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
|
|||
ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
|
||||
|
||||
if (ah->ah_version == AR5K_AR5210) {
|
||||
srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf;
|
||||
srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf;
|
||||
ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
|
||||
} else {
|
||||
srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
|
||||
|
|
|
@ -496,7 +496,7 @@ void hostap_init_proc(local_info_t *local)
|
|||
|
||||
void hostap_remove_proc(local_info_t *local)
|
||||
{
|
||||
remove_proc_subtree(local->ddev->name, hostap_proc);
|
||||
proc_remove(local->proc);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -696,6 +696,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
|
||||
{
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
|
||||
{
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
|
||||
return false;
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
|
||||
return true;
|
||||
|
||||
/* disabled by default */
|
||||
return false;
|
||||
}
|
||||
|
||||
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
enum ieee80211_ampdu_mlme_action action,
|
||||
|
@ -717,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
|
|||
|
||||
switch (action) {
|
||||
case IEEE80211_AMPDU_RX_START:
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
|
||||
if (!iwl_enable_rx_ampdu(priv->cfg))
|
||||
break;
|
||||
IWL_DEBUG_HT(priv, "start Rx\n");
|
||||
ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
|
||||
|
@ -729,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
|
|||
case IEEE80211_AMPDU_TX_START:
|
||||
if (!priv->trans->ops->txq_enable)
|
||||
break;
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
|
||||
if (!iwl_enable_tx_ampdu(priv->cfg))
|
||||
break;
|
||||
IWL_DEBUG_HT(priv, "start Tx\n");
|
||||
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
|
||||
|
|
|
@ -1286,7 +1286,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
|
|||
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
|
||||
module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(11n_disable,
|
||||
"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
|
||||
"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
|
||||
module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
|
||||
int, S_IRUGO);
|
||||
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
|
||||
|
|
|
@ -79,9 +79,12 @@ enum iwl_power_level {
|
|||
IWL_POWER_NUM
|
||||
};
|
||||
|
||||
#define IWL_DISABLE_HT_ALL BIT(0)
|
||||
#define IWL_DISABLE_HT_TXAGG BIT(1)
|
||||
#define IWL_DISABLE_HT_RXAGG BIT(2)
|
||||
enum iwl_disable_11n {
|
||||
IWL_DISABLE_HT_ALL = BIT(0),
|
||||
IWL_DISABLE_HT_TXAGG = BIT(1),
|
||||
IWL_DISABLE_HT_RXAGG = BIT(2),
|
||||
IWL_ENABLE_HT_TXAGG = BIT(3),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_mod_params
|
||||
|
@ -90,7 +93,7 @@ enum iwl_power_level {
|
|||
*
|
||||
* @sw_crypto: using hardware encryption, default = 0
|
||||
* @disable_11n: disable 11n capabilities, default = 0,
|
||||
* use IWL_DISABLE_HT_* constants
|
||||
* use IWL_[DIS,EN]ABLE_HT_* constants
|
||||
* @amsdu_size_8K: enable 8K amsdu size, default = 0
|
||||
* @restart_fw: restart firmware, default = 1
|
||||
* @wd_disable: enable stuck queue check, default = 0
|
||||
|
|
|
@ -328,6 +328,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
|||
ieee80211_free_txskb(hw, skb);
|
||||
}
|
||||
|
||||
static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
|
||||
{
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
|
||||
{
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
|
||||
return false;
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
|
||||
return true;
|
||||
|
||||
/* enabled by default */
|
||||
return true;
|
||||
}
|
||||
|
||||
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
enum ieee80211_ampdu_mlme_action action,
|
||||
|
@ -347,7 +365,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
|
|||
|
||||
switch (action) {
|
||||
case IEEE80211_AMPDU_RX_START:
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
|
||||
if (!iwl_enable_rx_ampdu(mvm->cfg)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
@ -357,7 +375,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
|
|||
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_START:
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
|
||||
if (!iwl_enable_tx_ampdu(mvm->cfg)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -748,7 +748,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
|
|||
|
||||
static u16
|
||||
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
skb->priority = cfg80211_classify8021d(skb, NULL);
|
||||
return mwifiex_1d_to_wmm_queue[skb->priority];
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
#ifndef RTL8187_H
|
||||
#define RTL8187_H
|
||||
|
||||
#include <linux/cache.h>
|
||||
|
||||
#include "rtl818x.h"
|
||||
#include "leds.h"
|
||||
|
||||
|
@ -139,7 +141,10 @@ struct rtl8187_priv {
|
|||
u8 aifsn[4];
|
||||
u8 rfkill_mask;
|
||||
struct {
|
||||
union {
|
||||
__le64 buf;
|
||||
u8 dummy1[L1_CACHE_BYTES];
|
||||
} ____cacheline_aligned;
|
||||
struct sk_buff_head queue;
|
||||
} b_tx_status; /* This queue is used by both -b and non-b devices */
|
||||
struct mutex io_mutex;
|
||||
|
@ -147,7 +152,8 @@ struct rtl8187_priv {
|
|||
u8 bits8;
|
||||
__le16 bits16;
|
||||
__le32 bits32;
|
||||
} *io_dmabuf;
|
||||
u8 dummy2[L1_CACHE_BYTES];
|
||||
} *io_dmabuf ____cacheline_aligned;
|
||||
bool rfkill_off;
|
||||
u16 seqno;
|
||||
};
|
||||
|
|
|
@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
|
|||
|
||||
/*<2> Enable Adapter */
|
||||
if (rtlpriv->cfg->ops->hw_init(hw))
|
||||
return 1;
|
||||
return false;
|
||||
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
|
||||
|
||||
/*<3> Enable Interrupt */
|
||||
|
|
|
@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
|
|||
bool is92c;
|
||||
int err;
|
||||
u8 tmp_u1b;
|
||||
unsigned long flags;
|
||||
|
||||
rtlpci->being_init_adapter = true;
|
||||
|
||||
/* Since this function can take a very long time (up to 350 ms)
|
||||
* and can be called with irqs disabled, reenable the irqs
|
||||
* to let the other devices continue being serviced.
|
||||
*
|
||||
* It is safe doing so since our own interrupts will only be enabled
|
||||
* in a subsequent step.
|
||||
*/
|
||||
local_save_flags(flags);
|
||||
local_irq_enable();
|
||||
|
||||
rtlpriv->intf_ops->disable_aspm(hw);
|
||||
rtstatus = _rtl92ce_init_mac(hw);
|
||||
if (!rtstatus) {
|
||||
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
|
||||
err = 1;
|
||||
return err;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = rtl92c_download_fw(hw);
|
||||
|
@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
|
|||
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
|
||||
"Failed to download FW. Init HW without FW now..\n");
|
||||
err = 1;
|
||||
return err;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
rtlhal->last_hmeboxnum = 0;
|
||||
|
@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
|
|||
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
|
||||
}
|
||||
rtl92c_dm_init(hw);
|
||||
exit:
|
||||
local_irq_restore(flags);
|
||||
rtlpci->being_init_adapter = false;
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,11 @@ MODULE_LICENSE("GPL");
|
|||
|
||||
static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
|
||||
{
|
||||
phydev->supported |= PHY_DEFAULT_FEATURES;
|
||||
/* The default values for phydev->supported are provided by the PHY
|
||||
* driver "features" member, we want to reset to sane defaults fist
|
||||
* before supporting higher speeds.
|
||||
*/
|
||||
phydev->supported &= PHY_DEFAULT_FEATURES;
|
||||
|
||||
switch (max_speed) {
|
||||
default:
|
||||
|
@ -44,7 +48,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
|
|||
{
|
||||
struct phy_device *phy;
|
||||
bool is_c45;
|
||||
int rc, prev_irq;
|
||||
int rc;
|
||||
u32 max_speed = 0;
|
||||
|
||||
is_c45 = of_device_is_compatible(child,
|
||||
|
@ -54,12 +58,14 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
|
|||
if (!phy || IS_ERR(phy))
|
||||
return 1;
|
||||
|
||||
if (mdio->irq) {
|
||||
prev_irq = mdio->irq[addr];
|
||||
mdio->irq[addr] =
|
||||
irq_of_parse_and_map(child, 0);
|
||||
if (!mdio->irq[addr])
|
||||
mdio->irq[addr] = prev_irq;
|
||||
rc = irq_of_parse_and_map(child, 0);
|
||||
if (rc > 0) {
|
||||
phy->irq = rc;
|
||||
if (mdio->irq)
|
||||
mdio->irq[addr] = rc;
|
||||
} else {
|
||||
if (mdio->irq)
|
||||
phy->irq = mdio->irq[addr];
|
||||
}
|
||||
|
||||
/* Associate the OF node with the device structure so it
|
||||
|
|
|
@ -40,7 +40,7 @@ static INT bcm_close(struct net_device *dev)
|
|||
}
|
||||
|
||||
static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
return ClassifyPacket(netdev_priv(dev), skb);
|
||||
}
|
||||
|
|
|
@ -307,7 +307,7 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
return (u16)smp_processor_id();
|
||||
}
|
||||
|
|
|
@ -653,7 +653,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
struct adapter *padapter = rtw_netdev_priv(dev);
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
|
|
|
@ -70,7 +70,12 @@ enum {
|
|||
};
|
||||
|
||||
struct vhost_net_ubuf_ref {
|
||||
struct kref kref;
|
||||
/* refcount follows semantics similar to kref:
|
||||
* 0: object is released
|
||||
* 1: no outstanding ubufs
|
||||
* >1: outstanding ubufs
|
||||
*/
|
||||
atomic_t refcount;
|
||||
wait_queue_head_t wait;
|
||||
struct vhost_virtqueue *vq;
|
||||
};
|
||||
|
@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq)
|
|||
vhost_net_zcopy_mask |= 0x1 << vq;
|
||||
}
|
||||
|
||||
static void vhost_net_zerocopy_done_signal(struct kref *kref)
|
||||
{
|
||||
struct vhost_net_ubuf_ref *ubufs;
|
||||
|
||||
ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
|
||||
wake_up(&ubufs->wait);
|
||||
}
|
||||
|
||||
static struct vhost_net_ubuf_ref *
|
||||
vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
|
||||
{
|
||||
|
@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
|
|||
ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
|
||||
if (!ubufs)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
kref_init(&ubufs->kref);
|
||||
atomic_set(&ubufs->refcount, 1);
|
||||
init_waitqueue_head(&ubufs->wait);
|
||||
ubufs->vq = vq;
|
||||
return ubufs;
|
||||
}
|
||||
|
||||
static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
|
||||
static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
|
||||
{
|
||||
kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
|
||||
int r = atomic_sub_return(1, &ubufs->refcount);
|
||||
if (unlikely(!r))
|
||||
wake_up(&ubufs->wait);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
|
||||
{
|
||||
kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
|
||||
wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
|
||||
vhost_net_ubuf_put(ubufs);
|
||||
wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
|
||||
}
|
||||
|
||||
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
|
||||
|
@ -306,23 +306,26 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
|
|||
{
|
||||
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
|
||||
struct vhost_virtqueue *vq = ubufs->vq;
|
||||
int cnt = atomic_read(&ubufs->kref.refcount);
|
||||
int cnt;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
|
||||
/* set len to mark this desc buffers done DMA */
|
||||
vq->heads[ubuf->desc].len = success ?
|
||||
VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
|
||||
vhost_net_ubuf_put(ubufs);
|
||||
cnt = vhost_net_ubuf_put(ubufs);
|
||||
|
||||
/*
|
||||
* Trigger polling thread if guest stopped submitting new buffers:
|
||||
* in this case, the refcount after decrement will eventually reach 1
|
||||
* so here it is 2.
|
||||
* in this case, the refcount after decrement will eventually reach 1.
|
||||
* We also trigger polling periodically after each 16 packets
|
||||
* (the value 16 here is more or less arbitrary, it's tuned to trigger
|
||||
* less than 10% of times).
|
||||
*/
|
||||
if (cnt <= 2 || !(cnt % 16))
|
||||
if (cnt <= 1 || !(cnt % 16))
|
||||
vhost_poll_queue(&vq->poll);
|
||||
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
|
||||
/* Expects to be always run from workqueue - which acts as
|
||||
|
@ -420,7 +423,7 @@ static void handle_tx(struct vhost_net *net)
|
|||
msg.msg_control = ubuf;
|
||||
msg.msg_controllen = sizeof(ubuf);
|
||||
ubufs = nvq->ubufs;
|
||||
kref_get(&ubufs->kref);
|
||||
atomic_inc(&ubufs->refcount);
|
||||
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
|
||||
} else {
|
||||
msg.msg_control = NULL;
|
||||
|
@ -780,7 +783,7 @@ static void vhost_net_flush(struct vhost_net *n)
|
|||
vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
|
||||
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
|
||||
n->tx_flush = false;
|
||||
kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
|
||||
atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
|
||||
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
|
||||
}
|
||||
}
|
||||
|
@ -800,6 +803,8 @@ static int vhost_net_release(struct inode *inode, struct file *f)
|
|||
fput(tx_sock->file);
|
||||
if (rx_sock)
|
||||
fput(rx_sock->file);
|
||||
/* Make sure no callbacks are outstanding */
|
||||
synchronize_rcu_bh();
|
||||
/* We do an extra flush before freeing memory,
|
||||
* since jobs can re-queue themselves. */
|
||||
vhost_net_flush(n);
|
||||
|
|
|
@ -752,6 +752,9 @@ struct netdev_phys_port_id {
|
|||
unsigned char id_len;
|
||||
};
|
||||
|
||||
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
|
||||
/*
|
||||
* This structure defines the management hooks for network devices.
|
||||
* The following hooks can be defined; unless noted otherwise, they are
|
||||
|
@ -783,7 +786,7 @@ struct netdev_phys_port_id {
|
|||
* Required can not be NULL.
|
||||
*
|
||||
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
|
||||
* void *accel_priv);
|
||||
* void *accel_priv, select_queue_fallback_t fallback);
|
||||
* Called to decide which queue to when device supports multiple
|
||||
* transmit queues.
|
||||
*
|
||||
|
@ -1005,7 +1008,8 @@ struct net_device_ops {
|
|||
struct net_device *dev);
|
||||
u16 (*ndo_select_queue)(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv);
|
||||
void *accel_priv,
|
||||
select_queue_fallback_t fallback);
|
||||
void (*ndo_change_rx_flags)(struct net_device *dev,
|
||||
int flags);
|
||||
void (*ndo_set_rx_mode)(struct net_device *dev);
|
||||
|
@ -1551,7 +1555,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
|
|||
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv);
|
||||
u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
|
||||
|
||||
/*
|
||||
* Net namespace inlines
|
||||
|
@ -2275,6 +2278,26 @@ static inline void netdev_reset_queue(struct net_device *dev_queue)
|
|||
netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
|
||||
}
|
||||
|
||||
/**
|
||||
* netdev_cap_txqueue - check if selected tx queue exceeds device queues
|
||||
* @dev: network device
|
||||
* @queue_index: given tx queue index
|
||||
*
|
||||
* Returns 0 if given tx queue index >= number of device tx queues,
|
||||
* otherwise returns the originally passed tx queue index.
|
||||
*/
|
||||
static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
|
||||
{
|
||||
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
|
||||
net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
|
||||
dev->name, queue_index,
|
||||
dev->real_num_tx_queues);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return queue_index;
|
||||
}
|
||||
|
||||
/**
|
||||
* netif_running - test if up
|
||||
* @dev: network device
|
||||
|
@ -3068,7 +3091,12 @@ void netdev_change_features(struct net_device *dev);
|
|||
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
|
||||
struct net_device *dev);
|
||||
|
||||
netdev_features_t netif_skb_features(struct sk_buff *skb);
|
||||
netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
|
||||
const struct net_device *dev);
|
||||
static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
|
||||
{
|
||||
return netif_skb_dev_features(skb, skb->dev);
|
||||
}
|
||||
|
||||
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
|
||||
{
|
||||
|
|
|
@ -2916,5 +2916,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
|
|||
{
|
||||
return !skb->head_frag || skb_cloned(skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_gso_network_seglen - Return length of individual segments of a gso packet
|
||||
*
|
||||
* @skb: GSO skb
|
||||
*
|
||||
* skb_gso_network_seglen is used to determine the real size of the
|
||||
* individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
|
||||
*
|
||||
* The MAC/L2 header is not accounted for.
|
||||
*/
|
||||
static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int hdr_len = skb_transport_header(skb) -
|
||||
skb_network_header(skb);
|
||||
return hdr_len + skb_gso_transport_seglen(skb);
|
||||
}
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SKBUFF_H */
|
||||
|
|
|
@ -1653,17 +1653,6 @@ struct sctp_association {
|
|||
/* This is the last advertised value of rwnd over a SACK chunk. */
|
||||
__u32 a_rwnd;
|
||||
|
||||
/* Number of bytes by which the rwnd has slopped. The rwnd is allowed
|
||||
* to slop over a maximum of the association's frag_point.
|
||||
*/
|
||||
__u32 rwnd_over;
|
||||
|
||||
/* Keeps treack of rwnd pressure. This happens when we have
|
||||
* a window, but not recevie buffer (i.e small packets). This one
|
||||
* is releases slowly (1 PMTU at a time ).
|
||||
*/
|
||||
__u32 rwnd_press;
|
||||
|
||||
/* This is the sndbuf size in use for the association.
|
||||
* This corresponds to the sndbuf size for the association,
|
||||
* as specified in the sk->sndbuf.
|
||||
|
@ -1892,8 +1881,7 @@ void sctp_assoc_update(struct sctp_association *old,
|
|||
__u32 sctp_association_get_next_tsn(struct sctp_association *);
|
||||
|
||||
void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
|
||||
void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
|
||||
void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
|
||||
void sctp_assoc_rwnd_update(struct sctp_association *, bool);
|
||||
void sctp_assoc_set_primary(struct sctp_association *,
|
||||
struct sctp_transport *);
|
||||
void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
|
||||
|
|
|
@ -241,19 +241,19 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
|
|||
size = bat_priv->num_ifaces * sizeof(uint8_t);
|
||||
orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
|
||||
if (!orig_node->bat_iv.bcast_own_sum)
|
||||
goto free_bcast_own;
|
||||
goto free_orig_node;
|
||||
|
||||
hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
|
||||
batadv_choose_orig, orig_node,
|
||||
&orig_node->hash_entry);
|
||||
if (hash_added != 0)
|
||||
goto free_bcast_own;
|
||||
goto free_orig_node;
|
||||
|
||||
return orig_node;
|
||||
|
||||
free_bcast_own:
|
||||
kfree(orig_node->bat_iv.bcast_own);
|
||||
free_orig_node:
|
||||
/* free twice, as batadv_orig_node_new sets refcount to 2 */
|
||||
batadv_orig_node_free_ref(orig_node);
|
||||
batadv_orig_node_free_ref(orig_node);
|
||||
|
||||
return NULL;
|
||||
|
@ -266,7 +266,7 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
|
|||
struct batadv_orig_node *orig_neigh)
|
||||
{
|
||||
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
|
||||
|
||||
neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
|
||||
if (!neigh_node)
|
||||
|
@ -281,13 +281,23 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
|
|||
neigh_node->orig_node = orig_neigh;
|
||||
neigh_node->if_incoming = hard_iface;
|
||||
|
||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||
tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface,
|
||||
neigh_addr);
|
||||
if (!tmp_neigh_node) {
|
||||
hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
|
||||
} else {
|
||||
kfree(neigh_node);
|
||||
batadv_hardif_free_ref(hard_iface);
|
||||
neigh_node = tmp_neigh_node;
|
||||
}
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
if (!tmp_neigh_node)
|
||||
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
||||
"Creating new neighbor %pM for orig_node %pM on interface %s\n",
|
||||
neigh_addr, orig_node->orig, hard_iface->net_dev->name);
|
||||
|
||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||
hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
neigh_addr, orig_node->orig,
|
||||
hard_iface->net_dev->name);
|
||||
|
||||
out:
|
||||
return neigh_node;
|
||||
|
|
|
@ -241,7 +241,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
|
|||
{
|
||||
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
|
||||
const struct batadv_hard_iface *hard_iface;
|
||||
int min_mtu = ETH_DATA_LEN;
|
||||
int min_mtu = INT_MAX;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
|
||||
|
@ -256,8 +256,6 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
|
|||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
atomic_set(&bat_priv->packet_size_max, min_mtu);
|
||||
|
||||
if (atomic_read(&bat_priv->fragmentation) == 0)
|
||||
goto out;
|
||||
|
||||
|
@ -268,13 +266,21 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
|
|||
min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
|
||||
min_mtu -= sizeof(struct batadv_frag_packet);
|
||||
min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
|
||||
atomic_set(&bat_priv->packet_size_max, min_mtu);
|
||||
|
||||
/* with fragmentation enabled we can fragment external packets easily */
|
||||
min_mtu = min_t(int, min_mtu, ETH_DATA_LEN);
|
||||
|
||||
out:
|
||||
return min_mtu - batadv_max_header_len();
|
||||
/* report to the other components the maximum amount of bytes that
|
||||
* batman-adv can send over the wire (without considering the payload
|
||||
* overhead). For example, this value is used by TT to compute the
|
||||
* maximum local table table size
|
||||
*/
|
||||
atomic_set(&bat_priv->packet_size_max, min_mtu);
|
||||
|
||||
/* the real soft-interface MTU is computed by removing the payload
|
||||
* overhead from the maximum amount of bytes that was just computed.
|
||||
*
|
||||
* However batman-adv does not support MTUs bigger than ETH_DATA_LEN
|
||||
*/
|
||||
return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
|
||||
}
|
||||
|
||||
/* adjusts the MTU if a new interface with a smaller MTU appeared. */
|
||||
|
|
|
@ -457,6 +457,42 @@ batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
|
|||
return neigh_node;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_node_get - retrieve a neighbour from the list
|
||||
* @orig_node: originator which the neighbour belongs to
|
||||
* @hard_iface: the interface where this neighbour is connected to
|
||||
* @addr: the address of the neighbour
|
||||
*
|
||||
* Looks for and possibly returns a neighbour belonging to this originator list
|
||||
* which is connected through the provided hard interface.
|
||||
* Returns NULL if the neighbour is not found.
|
||||
*/
|
||||
struct batadv_neigh_node *
|
||||
batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
|
||||
const struct batadv_hard_iface *hard_iface,
|
||||
const uint8_t *addr)
|
||||
{
|
||||
struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
|
||||
if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
|
||||
continue;
|
||||
|
||||
if (tmp_neigh_node->if_incoming != hard_iface)
|
||||
continue;
|
||||
|
||||
if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
|
||||
continue;
|
||||
|
||||
res = tmp_neigh_node;
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
|
||||
* @rcu: rcu pointer of the orig_ifinfo object
|
||||
|
|
|
@ -29,6 +29,10 @@ void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
|
|||
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
|
||||
const uint8_t *addr);
|
||||
struct batadv_neigh_node *
|
||||
batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
|
||||
const struct batadv_hard_iface *hard_iface,
|
||||
const uint8_t *addr);
|
||||
struct batadv_neigh_node *
|
||||
batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
|
||||
const uint8_t *neigh_addr,
|
||||
struct batadv_orig_node *orig_node);
|
||||
|
|
|
@ -688,7 +688,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
|
|||
int is_old_ttvn;
|
||||
|
||||
/* check if there is enough data before accessing it */
|
||||
if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0)
|
||||
if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
|
||||
return 0;
|
||||
|
||||
/* create a copy of the skb (in case of for re-routing) to modify it. */
|
||||
|
@ -918,6 +918,8 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
|
|||
|
||||
if (ret != NET_RX_SUCCESS)
|
||||
ret = batadv_route_unicast_packet(skb, recv_if);
|
||||
else
|
||||
consume_skb(skb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -254,9 +254,9 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
|
|||
struct batadv_orig_node *orig_node,
|
||||
unsigned short vid)
|
||||
{
|
||||
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
|
||||
struct ethhdr *ethhdr;
|
||||
struct batadv_unicast_packet *unicast_packet;
|
||||
int ret = NET_XMIT_DROP;
|
||||
int ret = NET_XMIT_DROP, hdr_size;
|
||||
|
||||
if (!orig_node)
|
||||
goto out;
|
||||
|
@ -265,12 +265,16 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
|
|||
case BATADV_UNICAST:
|
||||
if (!batadv_send_skb_prepare_unicast(skb, orig_node))
|
||||
goto out;
|
||||
|
||||
hdr_size = sizeof(*unicast_packet);
|
||||
break;
|
||||
case BATADV_UNICAST_4ADDR:
|
||||
if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
|
||||
orig_node,
|
||||
packet_subtype))
|
||||
goto out;
|
||||
|
||||
hdr_size = sizeof(struct batadv_unicast_4addr_packet);
|
||||
break;
|
||||
default:
|
||||
/* this function supports UNICAST and UNICAST_4ADDR only. It
|
||||
|
@ -279,6 +283,7 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ethhdr = (struct ethhdr *)(skb->data + hdr_size);
|
||||
unicast_packet = (struct batadv_unicast_packet *)skb->data;
|
||||
|
||||
/* inform the destination node that we are still missing a correct route
|
||||
|
|
|
@ -1975,6 +1975,7 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
|
|||
struct hlist_head *head;
|
||||
uint32_t i, crc_tmp, crc = 0;
|
||||
uint8_t flags;
|
||||
__be16 tmp_vid;
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
@ -2011,8 +2012,11 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
|
|||
orig_node))
|
||||
continue;
|
||||
|
||||
crc_tmp = crc32c(0, &tt_common->vid,
|
||||
sizeof(tt_common->vid));
|
||||
/* use network order to read the VID: this ensures that
|
||||
* every node reads the bytes in the same order.
|
||||
*/
|
||||
tmp_vid = htons(tt_common->vid);
|
||||
crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
|
||||
|
||||
/* compute the CRC on flags that have to be kept in sync
|
||||
* among nodes
|
||||
|
@ -2046,6 +2050,7 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
|
|||
struct hlist_head *head;
|
||||
uint32_t i, crc_tmp, crc = 0;
|
||||
uint8_t flags;
|
||||
__be16 tmp_vid;
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
@ -2064,8 +2069,11 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
|
|||
if (tt_common->flags & BATADV_TT_CLIENT_NEW)
|
||||
continue;
|
||||
|
||||
crc_tmp = crc32c(0, &tt_common->vid,
|
||||
sizeof(tt_common->vid));
|
||||
/* use network order to read the VID: this ensures that
|
||||
* every node reads the bytes in the same order.
|
||||
*/
|
||||
tmp_vid = htons(tt_common->vid);
|
||||
crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
|
||||
|
||||
/* compute the CRC on flags that have to be kept in sync
|
||||
* among nodes
|
||||
|
@ -2262,6 +2270,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
|
|||
{
|
||||
struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
|
||||
struct batadv_orig_node_vlan *vlan;
|
||||
uint32_t crc;
|
||||
int i;
|
||||
|
||||
/* check if each received CRC matches the locally stored one */
|
||||
|
@ -2281,7 +2290,10 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
|
|||
if (!vlan)
|
||||
return false;
|
||||
|
||||
if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc))
|
||||
crc = vlan->tt.crc;
|
||||
batadv_orig_node_vlan_free_ref(vlan);
|
||||
|
||||
if (crc != ntohl(tt_vlan_tmp->crc))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -3218,7 +3230,6 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
|
|||
|
||||
spin_lock_bh(&orig_node->tt_lock);
|
||||
|
||||
tt_change = (struct batadv_tvlv_tt_change *)tt_buff;
|
||||
batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
|
||||
ttvn, tt_change);
|
||||
|
||||
|
|
|
@ -2420,7 +2420,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
|
|||
* 2. No high memory really exists on this machine.
|
||||
*/
|
||||
|
||||
static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
|
||||
static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
int i;
|
||||
|
@ -2495,34 +2495,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
|
|||
}
|
||||
|
||||
static netdev_features_t harmonize_features(struct sk_buff *skb,
|
||||
const struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
if (skb->ip_summed != CHECKSUM_NONE &&
|
||||
!can_checksum_protocol(features, skb_network_protocol(skb))) {
|
||||
features &= ~NETIF_F_ALL_CSUM;
|
||||
} else if (illegal_highdma(skb->dev, skb)) {
|
||||
} else if (illegal_highdma(dev, skb)) {
|
||||
features &= ~NETIF_F_SG;
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
netdev_features_t netif_skb_features(struct sk_buff *skb)
|
||||
netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
|
||||
const struct net_device *dev)
|
||||
{
|
||||
__be16 protocol = skb->protocol;
|
||||
netdev_features_t features = skb->dev->features;
|
||||
netdev_features_t features = dev->features;
|
||||
|
||||
if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
|
||||
if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
|
||||
features &= ~NETIF_F_GSO_MASK;
|
||||
|
||||
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
|
||||
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
|
||||
protocol = veh->h_vlan_encapsulated_proto;
|
||||
} else if (!vlan_tx_tag_present(skb)) {
|
||||
return harmonize_features(skb, features);
|
||||
return harmonize_features(skb, dev, features);
|
||||
}
|
||||
|
||||
features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
|
||||
|
@ -2530,9 +2532,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
|
|||
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
|
||||
return harmonize_features(skb, features);
|
||||
return harmonize_features(skb, dev, features);
|
||||
}
|
||||
EXPORT_SYMBOL(netif_skb_features);
|
||||
EXPORT_SYMBOL(netif_skb_dev_features);
|
||||
|
||||
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
struct netdev_queue *txq)
|
||||
|
|
|
@ -323,17 +323,6 @@ u32 __skb_get_poff(const struct sk_buff *skb)
|
|||
return poff;
|
||||
}
|
||||
|
||||
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
|
||||
{
|
||||
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
|
||||
net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
|
||||
dev->name, queue_index,
|
||||
dev->real_num_tx_queues);
|
||||
return 0;
|
||||
}
|
||||
return queue_index;
|
||||
}
|
||||
|
||||
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_XPS
|
||||
|
@ -372,7 +361,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
|||
#endif
|
||||
}
|
||||
|
||||
u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
|
||||
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
int queue_index = sk_tx_queue_get(sk);
|
||||
|
@ -392,7 +381,6 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
|
|||
|
||||
return queue_index;
|
||||
}
|
||||
EXPORT_SYMBOL(__netdev_pick_tx);
|
||||
|
||||
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
|
@ -403,13 +391,13 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
|||
if (dev->real_num_tx_queues != 1) {
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
if (ops->ndo_select_queue)
|
||||
queue_index = ops->ndo_select_queue(dev, skb,
|
||||
accel_priv);
|
||||
queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
|
||||
__netdev_pick_tx);
|
||||
else
|
||||
queue_index = __netdev_pick_tx(dev, skb);
|
||||
|
||||
if (!accel_priv)
|
||||
queue_index = dev_cap_txqueue(dev, queue_index);
|
||||
queue_index = netdev_cap_txqueue(dev, queue_index);
|
||||
}
|
||||
|
||||
skb_set_queue_mapping(skb, queue_index);
|
||||
|
|
|
@ -1963,16 +1963,21 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|||
|
||||
dev->ifindex = ifm->ifi_index;
|
||||
|
||||
if (ops->newlink)
|
||||
if (ops->newlink) {
|
||||
err = ops->newlink(net, dev, tb, data);
|
||||
else
|
||||
/* Drivers should call free_netdev() in ->destructor
|
||||
* and unregister it on failure so that device could be
|
||||
* finally freed in rtnl_unlock.
|
||||
*/
|
||||
if (err < 0)
|
||||
goto out;
|
||||
} else {
|
||||
err = register_netdevice(dev);
|
||||
|
||||
if (err < 0) {
|
||||
free_netdev(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
}
|
||||
err = rtnl_configure_link(dev, ifm);
|
||||
if (err < 0)
|
||||
unregister_netdevice(dev);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "tfrc.h"
|
||||
|
||||
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
|
||||
static bool tfrc_debug;
|
||||
bool tfrc_debug;
|
||||
module_param(tfrc_debug, bool, 0644);
|
||||
MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
|
||||
#endif
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "packet_history.h"
|
||||
|
||||
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
|
||||
extern bool tfrc_debug;
|
||||
#define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a)
|
||||
#else
|
||||
#define tfrc_pr_debug(format, a...)
|
||||
|
|
|
@ -39,6 +39,71 @@
|
|||
#include <net/route.h>
|
||||
#include <net/xfrm.h>
|
||||
|
||||
static bool ip_may_fragment(const struct sk_buff *skb)
|
||||
{
|
||||
return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
|
||||
!skb->local_df;
|
||||
}
|
||||
|
||||
static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
|
||||
{
|
||||
if (skb->len <= mtu || skb->local_df)
|
||||
return false;
|
||||
|
||||
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int mtu;
|
||||
|
||||
if (skb->local_df || !skb_is_gso(skb))
|
||||
return false;
|
||||
|
||||
mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
|
||||
|
||||
/* if seglen > mtu, do software segmentation for IP fragmentation on
|
||||
* output. DF bit cannot be set since ip_forward would have sent
|
||||
* icmp error.
|
||||
*/
|
||||
return skb_gso_network_seglen(skb) > mtu;
|
||||
}
|
||||
|
||||
/* called if GSO skb needs to be fragmented on forward */
|
||||
static int ip_forward_finish_gso(struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
netdev_features_t features;
|
||||
struct sk_buff *segs;
|
||||
int ret = 0;
|
||||
|
||||
features = netif_skb_dev_features(skb, dst->dev);
|
||||
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
|
||||
if (IS_ERR(segs)) {
|
||||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
consume_skb(skb);
|
||||
|
||||
do {
|
||||
struct sk_buff *nskb = segs->next;
|
||||
int err;
|
||||
|
||||
segs->next = NULL;
|
||||
err = dst_output(segs);
|
||||
|
||||
if (err && ret == 0)
|
||||
ret = err;
|
||||
segs = nskb;
|
||||
} while (segs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ip_forward_finish(struct sk_buff *skb)
|
||||
{
|
||||
struct ip_options *opt = &(IPCB(skb)->opt);
|
||||
|
@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb)
|
|||
if (unlikely(opt->optlen))
|
||||
ip_forward_options(skb);
|
||||
|
||||
if (ip_gso_exceeds_dst_mtu(skb))
|
||||
return ip_forward_finish_gso(skb);
|
||||
|
||||
return dst_output(skb);
|
||||
}
|
||||
|
||||
|
@ -91,8 +159,7 @@ int ip_forward(struct sk_buff *skb)
|
|||
|
||||
IPCB(skb)->flags |= IPSKB_FORWARDED;
|
||||
mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
|
||||
if (unlikely(skb->len > mtu && !skb_is_gso(skb) &&
|
||||
(ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
|
||||
if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) {
|
||||
IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
||||
htonl(mtu));
|
||||
|
|
|
@ -273,7 +273,7 @@ static int __init ic_open_devs(void)
|
|||
|
||||
msleep(1);
|
||||
|
||||
if time_before(jiffies, next_msg)
|
||||
if (time_before(jiffies, next_msg))
|
||||
continue;
|
||||
|
||||
elapsed = jiffies_to_msecs(jiffies - start);
|
||||
|
|
|
@ -1597,6 +1597,7 @@ static int __mkroute_input(struct sk_buff *skb,
|
|||
rth->rt_gateway = 0;
|
||||
rth->rt_uses_gateway = 0;
|
||||
INIT_LIST_HEAD(&rth->rt_uncached);
|
||||
RT_CACHE_STAT_INC(in_slow_tot);
|
||||
|
||||
rth->dst.input = ip_forward;
|
||||
rth->dst.output = ip_output;
|
||||
|
@ -1695,10 +1696,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
fl4.daddr = daddr;
|
||||
fl4.saddr = saddr;
|
||||
err = fib_lookup(net, &fl4, &res);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
if (!IN_DEV_FORWARD(in_dev))
|
||||
err = -EHOSTUNREACH;
|
||||
goto no_route;
|
||||
|
||||
RT_CACHE_STAT_INC(in_slow_tot);
|
||||
}
|
||||
|
||||
if (res.type == RTN_BROADCAST)
|
||||
goto brd_input;
|
||||
|
@ -1712,8 +1714,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
goto local_input;
|
||||
}
|
||||
|
||||
if (!IN_DEV_FORWARD(in_dev))
|
||||
if (!IN_DEV_FORWARD(in_dev)) {
|
||||
err = -EHOSTUNREACH;
|
||||
goto no_route;
|
||||
}
|
||||
if (res.type != RTN_UNICAST)
|
||||
goto martian_destination;
|
||||
|
||||
|
@ -1768,6 +1772,7 @@ out: return err;
|
|||
rth->rt_gateway = 0;
|
||||
rth->rt_uses_gateway = 0;
|
||||
INIT_LIST_HEAD(&rth->rt_uncached);
|
||||
RT_CACHE_STAT_INC(in_slow_tot);
|
||||
if (res.type == RTN_UNREACHABLE) {
|
||||
rth->dst.input= ip_error;
|
||||
rth->dst.error= -err;
|
||||
|
|
|
@ -2783,6 +2783,8 @@ static void addrconf_gre_config(struct net_device *dev)
|
|||
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
|
||||
if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
|
||||
addrconf_add_linklocal(idev, &addr);
|
||||
else
|
||||
addrconf_prefix_route(&addr, 64, dev, 0, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -342,6 +342,20 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
|
|||
return mtu;
|
||||
}
|
||||
|
||||
static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
|
||||
{
|
||||
if (skb->len <= mtu || skb->local_df)
|
||||
return false;
|
||||
|
||||
if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
|
||||
return true;
|
||||
|
||||
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int ip6_forward(struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
|
@ -466,8 +480,7 @@ int ip6_forward(struct sk_buff *skb)
|
|||
if (mtu < IPV6_MIN_MTU)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
|
||||
if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
|
||||
(IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
|
||||
if (ip6_pkt_too_big(skb, mtu)) {
|
||||
/* Again, force OUTPUT device used as source address */
|
||||
skb->dev = dst->dev;
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
||||
|
|
|
@ -1057,7 +1057,8 @@ static void ieee80211_uninit(struct net_device *dev)
|
|||
|
||||
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
|
||||
}
|
||||
|
@ -1075,7 +1076,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
|
|||
|
||||
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
void *accel_priv,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
|
|
|
@ -308,11 +308,27 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
|
|||
return po->xmit == packet_direct_xmit;
|
||||
}
|
||||
|
||||
static u16 packet_pick_tx_queue(struct net_device *dev)
|
||||
static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
|
||||
}
|
||||
|
||||
static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
u16 queue_index;
|
||||
|
||||
if (ops->ndo_select_queue) {
|
||||
queue_index = ops->ndo_select_queue(dev, skb, NULL,
|
||||
__packet_pick_tx_queue);
|
||||
queue_index = netdev_cap_txqueue(dev, queue_index);
|
||||
} else {
|
||||
queue_index = __packet_pick_tx_queue(dev, skb);
|
||||
}
|
||||
|
||||
skb_set_queue_mapping(skb, queue_index);
|
||||
}
|
||||
|
||||
/* register_prot_hook must be invoked with the po->bind_lock held,
|
||||
* or from a context in which asynchronous accesses to the packet
|
||||
* socket is not possible (packet_create()).
|
||||
|
@ -2285,7 +2301,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
|||
}
|
||||
}
|
||||
|
||||
skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
|
||||
packet_pick_tx_queue(dev, skb);
|
||||
|
||||
skb->destructor = tpacket_destruct_skb;
|
||||
__packet_set_status(po, ph, TP_STATUS_SENDING);
|
||||
packet_inc_pending(&po->tx_ring);
|
||||
|
@ -2499,7 +2516,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
|||
skb->dev = dev;
|
||||
skb->priority = sk->sk_priority;
|
||||
skb->mark = sk->sk_mark;
|
||||
skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
|
||||
|
||||
packet_pick_tx_queue(dev, skb);
|
||||
|
||||
if (po->has_vnet_hdr) {
|
||||
if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
|
||||
|
|
|
@ -15,6 +15,11 @@
|
|||
*
|
||||
* ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
|
||||
* University of Oslo, Norway.
|
||||
*
|
||||
* References:
|
||||
* IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00
|
||||
* IEEE Conference on High Performance Switching and Routing 2013 :
|
||||
* "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem"
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -36,7 +41,7 @@ struct pie_params {
|
|||
psched_time_t target; /* user specified target delay in pschedtime */
|
||||
u32 tupdate; /* timer frequency (in jiffies) */
|
||||
u32 limit; /* number of packets that can be enqueued */
|
||||
u32 alpha; /* alpha and beta are between -4 and 4 */
|
||||
u32 alpha; /* alpha and beta are between 0 and 32 */
|
||||
u32 beta; /* and are used for shift relative to 1 */
|
||||
bool ecn; /* true if ecn is enabled */
|
||||
bool bytemode; /* to scale drop early prob based on pkt size */
|
||||
|
@ -326,10 +331,16 @@ static void calculate_probability(struct Qdisc *sch)
|
|||
if (qdelay == 0 && qlen != 0)
|
||||
update_prob = false;
|
||||
|
||||
/* Add ranges for alpha and beta, more aggressive for high dropping
|
||||
* mode and gentle steps for light dropping mode
|
||||
* In light dropping mode, take gentle steps; in medium dropping mode,
|
||||
* take medium steps; in high dropping mode, take big steps.
|
||||
/* In the algorithm, alpha and beta are between 0 and 2 with typical
|
||||
* value for alpha as 0.125. In this implementation, we use values 0-32
|
||||
* passed from user space to represent this. Also, alpha and beta have
|
||||
* unit of HZ and need to be scaled before they can used to update
|
||||
* probability. alpha/beta are updated locally below by 1) scaling them
|
||||
* appropriately 2) scaling down by 16 to come to 0-2 range.
|
||||
* Please see paper for details.
|
||||
*
|
||||
* We scale alpha and beta differently depending on whether we are in
|
||||
* light, medium or high dropping mode.
|
||||
*/
|
||||
if (q->vars.prob < MAX_PROB / 100) {
|
||||
alpha =
|
||||
|
|
|
@ -1367,44 +1367,35 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Increase asoc's rwnd by len and send any window update SACK if needed. */
|
||||
void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
|
||||
/* Update asoc's rwnd for the approximated state in the buffer,
|
||||
* and check whether SACK needs to be sent.
|
||||
*/
|
||||
void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer)
|
||||
{
|
||||
int rx_count;
|
||||
struct sctp_chunk *sack;
|
||||
struct timer_list *timer;
|
||||
|
||||
if (asoc->rwnd_over) {
|
||||
if (asoc->rwnd_over >= len) {
|
||||
asoc->rwnd_over -= len;
|
||||
} else {
|
||||
asoc->rwnd += (len - asoc->rwnd_over);
|
||||
asoc->rwnd_over = 0;
|
||||
}
|
||||
} else {
|
||||
asoc->rwnd += len;
|
||||
}
|
||||
if (asoc->ep->rcvbuf_policy)
|
||||
rx_count = atomic_read(&asoc->rmem_alloc);
|
||||
else
|
||||
rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
|
||||
|
||||
/* If we had window pressure, start recovering it
|
||||
* once our rwnd had reached the accumulated pressure
|
||||
* threshold. The idea is to recover slowly, but up
|
||||
* to the initial advertised window.
|
||||
*/
|
||||
if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
|
||||
int change = min(asoc->pathmtu, asoc->rwnd_press);
|
||||
asoc->rwnd += change;
|
||||
asoc->rwnd_press -= change;
|
||||
}
|
||||
if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0)
|
||||
asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1;
|
||||
else
|
||||
asoc->rwnd = 0;
|
||||
|
||||
pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
|
||||
__func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
|
||||
asoc->a_rwnd);
|
||||
pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n",
|
||||
__func__, asoc, asoc->rwnd, rx_count,
|
||||
asoc->base.sk->sk_rcvbuf);
|
||||
|
||||
/* Send a window update SACK if the rwnd has increased by at least the
|
||||
* minimum of the association's PMTU and half of the receive buffer.
|
||||
* The algorithm used is similar to the one described in
|
||||
* Section 4.2.3.3 of RFC 1122.
|
||||
*/
|
||||
if (sctp_peer_needs_update(asoc)) {
|
||||
if (update_peer && sctp_peer_needs_update(asoc)) {
|
||||
asoc->a_rwnd = asoc->rwnd;
|
||||
|
||||
pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
|
||||
|
@ -1426,45 +1417,6 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
|
|||
}
|
||||
}
|
||||
|
||||
/* Decrease asoc's rwnd by len. */
|
||||
void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
|
||||
{
|
||||
int rx_count;
|
||||
int over = 0;
|
||||
|
||||
if (unlikely(!asoc->rwnd || asoc->rwnd_over))
|
||||
pr_debug("%s: association:%p has asoc->rwnd:%u, "
|
||||
"asoc->rwnd_over:%u!\n", __func__, asoc,
|
||||
asoc->rwnd, asoc->rwnd_over);
|
||||
|
||||
if (asoc->ep->rcvbuf_policy)
|
||||
rx_count = atomic_read(&asoc->rmem_alloc);
|
||||
else
|
||||
rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
|
||||
|
||||
/* If we've reached or overflowed our receive buffer, announce
|
||||
* a 0 rwnd if rwnd would still be positive. Store the
|
||||
* the potential pressure overflow so that the window can be restored
|
||||
* back to original value.
|
||||
*/
|
||||
if (rx_count >= asoc->base.sk->sk_rcvbuf)
|
||||
over = 1;
|
||||
|
||||
if (asoc->rwnd >= len) {
|
||||
asoc->rwnd -= len;
|
||||
if (over) {
|
||||
asoc->rwnd_press += asoc->rwnd;
|
||||
asoc->rwnd = 0;
|
||||
}
|
||||
} else {
|
||||
asoc->rwnd_over = len - asoc->rwnd;
|
||||
asoc->rwnd = 0;
|
||||
}
|
||||
|
||||
pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
|
||||
__func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
|
||||
asoc->rwnd_press);
|
||||
}
|
||||
|
||||
/* Build the bind address list for the association based on info from the
|
||||
* local endpoint and the remote peer.
|
||||
|
|
|
@ -6176,7 +6176,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
|||
* PMTU. In cases, such as loopback, this might be a rather
|
||||
* large spill over.
|
||||
*/
|
||||
if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
|
||||
if ((!chunk->data_accepted) && (!asoc->rwnd ||
|
||||
(datalen > asoc->rwnd + asoc->frag_point))) {
|
||||
|
||||
/* If this is the next TSN, consider reneging to make
|
||||
|
|
|
@ -64,6 +64,7 @@
|
|||
#include <linux/crypto.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
#include <net/ip.h>
|
||||
#include <net/icmp.h>
|
||||
|
@ -1368,11 +1369,19 @@ static int sctp_setsockopt_connectx(struct sock *sk,
|
|||
/*
|
||||
* New (hopefully final) interface for the API.
|
||||
* We use the sctp_getaddrs_old structure so that use-space library
|
||||
* can avoid any unnecessary allocations. The only defferent part
|
||||
* can avoid any unnecessary allocations. The only different part
|
||||
* is that we store the actual length of the address buffer into the
|
||||
* addrs_num structure member. That way we can re-use the existing
|
||||
* code.
|
||||
*/
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_sctp_getaddrs_old {
|
||||
sctp_assoc_t assoc_id;
|
||||
s32 addr_num;
|
||||
compat_uptr_t addrs; /* struct sockaddr * */
|
||||
};
|
||||
#endif
|
||||
|
||||
static int sctp_getsockopt_connectx3(struct sock *sk, int len,
|
||||
char __user *optval,
|
||||
int __user *optlen)
|
||||
|
@ -1381,16 +1390,30 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len,
|
|||
sctp_assoc_t assoc_id = 0;
|
||||
int err = 0;
|
||||
|
||||
if (len < sizeof(param))
|
||||
return -EINVAL;
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (is_compat_task()) {
|
||||
struct compat_sctp_getaddrs_old param32;
|
||||
|
||||
if (copy_from_user(¶m, optval, sizeof(param)))
|
||||
if (len < sizeof(param32))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(¶m32, optval, sizeof(param32)))
|
||||
return -EFAULT;
|
||||
|
||||
err = __sctp_setsockopt_connectx(sk,
|
||||
(struct sockaddr __user *)param.addrs,
|
||||
param.addr_num, &assoc_id);
|
||||
param.assoc_id = param32.assoc_id;
|
||||
param.addr_num = param32.addr_num;
|
||||
param.addrs = compat_ptr(param32.addrs);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (len < sizeof(param))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(¶m, optval, sizeof(param)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
|
||||
param.addrs, param.addr_num,
|
||||
&assoc_id);
|
||||
if (err == 0 || err == -EINPROGRESS) {
|
||||
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
|
||||
return -EFAULT;
|
||||
|
@ -2092,12 +2115,6 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
|
|||
sctp_skb_pull(skb, copied);
|
||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||
|
||||
/* When only partial message is copied to the user, increase
|
||||
* rwnd by that amount. If all the data in the skb is read,
|
||||
* rwnd is updated when the event is freed.
|
||||
*/
|
||||
if (!sctp_ulpevent_is_notification(event))
|
||||
sctp_assoc_rwnd_increase(event->asoc, copied);
|
||||
goto out;
|
||||
} else if ((event->msg_flags & MSG_NOTIFICATION) ||
|
||||
(event->msg_flags & MSG_EOR))
|
||||
|
|
|
@ -151,6 +151,7 @@ static struct ctl_table sctp_net_table[] = {
|
|||
},
|
||||
{
|
||||
.procname = "cookie_hmac_alg",
|
||||
.data = &init_net.sctp.sctp_hmac_alg,
|
||||
.maxlen = 8,
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_sctp_do_hmac_alg,
|
||||
|
@ -401,7 +402,9 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
|
|||
|
||||
int sctp_sysctl_net_register(struct net *net)
|
||||
{
|
||||
struct ctl_table *table;
|
||||
struct ctl_table *table = sctp_net_table;
|
||||
|
||||
if (!net_eq(net, &init_net)) {
|
||||
int i;
|
||||
|
||||
table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
|
||||
|
@ -410,6 +413,7 @@ int sctp_sysctl_net_register(struct net *net)
|
|||
|
||||
for (i = 0; table[i].data; i++)
|
||||
table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
|
||||
}
|
||||
|
||||
net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
|
||||
return 0;
|
||||
|
|
|
@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
|
|||
skb = sctp_event2skb(event);
|
||||
/* Set the owner and charge rwnd for bytes received. */
|
||||
sctp_ulpevent_set_owner(event, asoc);
|
||||
sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb));
|
||||
sctp_assoc_rwnd_update(asoc, false);
|
||||
|
||||
if (!skb->data_len)
|
||||
return;
|
||||
|
@ -1011,6 +1011,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
|
|||
{
|
||||
struct sk_buff *skb, *frag;
|
||||
unsigned int len;
|
||||
struct sctp_association *asoc;
|
||||
|
||||
/* Current stack structures assume that the rcv buffer is
|
||||
* per socket. For UDP style sockets this is not true as
|
||||
|
@ -1035,8 +1036,11 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
|
|||
}
|
||||
|
||||
done:
|
||||
sctp_assoc_rwnd_increase(event->asoc, len);
|
||||
asoc = event->asoc;
|
||||
sctp_association_hold(asoc);
|
||||
sctp_ulpevent_release_owner(event);
|
||||
sctp_assoc_rwnd_update(asoc, true);
|
||||
sctp_association_put(asoc);
|
||||
}
|
||||
|
||||
static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
|
||||
|
|
|
@ -192,6 +192,7 @@ static inline void k_term_timer(struct timer_list *timer)
|
|||
|
||||
struct tipc_skb_cb {
|
||||
void *handle;
|
||||
bool deferred;
|
||||
};
|
||||
|
||||
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
|
||||
|
|
|
@ -1391,6 +1391,12 @@ static int link_recv_buf_validate(struct sk_buff *buf)
|
|||
u32 hdr_size;
|
||||
u32 min_hdr_size;
|
||||
|
||||
/* If this packet comes from the defer queue, the skb has already
|
||||
* been validated
|
||||
*/
|
||||
if (unlikely(TIPC_SKB_CB(buf)->deferred))
|
||||
return 1;
|
||||
|
||||
if (unlikely(buf->len < MIN_H_SIZE))
|
||||
return 0;
|
||||
|
||||
|
@ -1703,6 +1709,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
|
|||
&l_ptr->newest_deferred_in, buf)) {
|
||||
l_ptr->deferred_inqueue_sz++;
|
||||
l_ptr->stats.deferred_recv++;
|
||||
TIPC_SKB_CB(buf)->deferred = true;
|
||||
if ((l_ptr->deferred_inqueue_sz % 16) == 1)
|
||||
tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
|
||||
} else
|
||||
|
|
Loading…
Reference in a new issue