Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/wireless/orinoco/main.c
This commit is contained in:
commit
da8120355e
40 changed files with 192 additions and 97 deletions
|
@ -83,11 +83,12 @@ not detect it missed following items in original chain.
|
|||
obj = kmem_cache_alloc(...);
|
||||
lock_chain(); // typically a spin_lock()
|
||||
obj->key = key;
|
||||
atomic_inc(&obj->refcnt);
|
||||
/*
|
||||
* we need to make sure obj->key is updated before obj->next
|
||||
* or obj->refcnt
|
||||
*/
|
||||
smp_wmb();
|
||||
atomic_set(&obj->refcnt, 1);
|
||||
hlist_add_head_rcu(&obj->obj_node, list);
|
||||
unlock_chain(); // typically a spin_unlock()
|
||||
|
||||
|
@ -159,6 +160,10 @@ out:
|
|||
obj = kmem_cache_alloc(cachep);
|
||||
lock_chain(); // typically a spin_lock()
|
||||
obj->key = key;
|
||||
/*
|
||||
* changes to obj->key must be visible before refcnt one
|
||||
*/
|
||||
smp_wmb();
|
||||
atomic_set(&obj->refcnt, 1);
|
||||
/*
|
||||
* insert obj in RCU way (readers might be traversing chain)
|
||||
|
|
|
@ -4089,6 +4089,7 @@ L: netfilter@vger.kernel.org
|
|||
L: coreteam@netfilter.org
|
||||
W: http://www.netfilter.org/
|
||||
W: http://www.iptables.org/
|
||||
T: git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git
|
||||
S: Supported
|
||||
F: include/linux/netfilter*
|
||||
F: include/linux/netfilter/
|
||||
|
|
|
@ -908,6 +908,7 @@ static const struct net_device_ops rtl8139_netdev_ops = {
|
|||
.ndo_open = rtl8139_open,
|
||||
.ndo_stop = rtl8139_close,
|
||||
.ndo_get_stats = rtl8139_get_stats,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = rtl8139_set_mac_address,
|
||||
.ndo_start_xmit = rtl8139_start_xmit,
|
||||
|
|
|
@ -1142,7 +1142,9 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
|
|||
.ndo_start_xmit = eth_xmit,
|
||||
.ndo_set_multicast_list = eth_set_mcast_list,
|
||||
.ndo_do_ioctl = eth_ioctl,
|
||||
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
};
|
||||
|
||||
static int __devinit eth_init_one(struct platform_device *pdev)
|
||||
|
|
|
@ -188,14 +188,14 @@ struct atl1c_tpd_ext_desc {
|
|||
#define RRS_HDS_TYPE_DATA 2
|
||||
|
||||
#define RRS_IS_NO_HDS_TYPE(flag) \
|
||||
(((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == 0)
|
||||
((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0)
|
||||
|
||||
#define RRS_IS_HDS_HEAD(flag) \
|
||||
(((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
|
||||
((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
|
||||
RRS_HDS_TYPE_HEAD)
|
||||
|
||||
#define RRS_IS_HDS_DATA(flag) \
|
||||
(((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
|
||||
((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
|
||||
RRS_HDS_TYPE_DATA)
|
||||
|
||||
/* rrs word 3 bit 0:31 */
|
||||
|
@ -245,7 +245,7 @@ struct atl1c_tpd_ext_desc {
|
|||
#define RRS_PACKET_TYPE_802_3 1
|
||||
#define RRS_PACKET_TYPE_ETH 0
|
||||
#define RRS_PACKET_IS_ETH(word) \
|
||||
(((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK == \
|
||||
((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \
|
||||
RRS_PACKET_TYPE_ETH)
|
||||
#define RRS_RXD_IS_VALID(word) \
|
||||
((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1)
|
||||
|
|
|
@ -1689,7 +1689,7 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
|
|||
if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
|
||||
rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
|
||||
RRS_RX_RFD_CNT_MASK;
|
||||
if (unlikely(rfd_num) != 1)
|
||||
if (unlikely(rfd_num != 1))
|
||||
/* TODO support mul rfd*/
|
||||
if (netif_msg_rx_err(adapter))
|
||||
dev_warn(&pdev->dev,
|
||||
|
|
|
@ -2071,7 +2071,7 @@ static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
|||
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (wol->wolopts & (WAKE_MCAST|WAKE_BCAST|WAKE_MCAST))
|
||||
if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* these settings will always override what we currently have */
|
||||
|
|
|
@ -1459,8 +1459,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||
* ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
|
||||
*/
|
||||
if (bond->slave_cnt == 0) {
|
||||
if (slave_dev->type != ARPHRD_ETHER)
|
||||
bond_setup_by_slave(bond_dev, slave_dev);
|
||||
if (bond_dev->type != slave_dev->type) {
|
||||
dev_close(bond_dev);
|
||||
pr_debug("%s: change device type from %d to %d\n",
|
||||
bond_dev->name, bond_dev->type, slave_dev->type);
|
||||
if (slave_dev->type != ARPHRD_ETHER)
|
||||
bond_setup_by_slave(bond_dev, slave_dev);
|
||||
else
|
||||
ether_setup(bond_dev);
|
||||
dev_open(bond_dev);
|
||||
}
|
||||
} else if (bond_dev->type != slave_dev->type) {
|
||||
pr_err(DRV_NAME ": %s ether type (%d) is different "
|
||||
"from other slaves (%d), can not enslave it.\n",
|
||||
|
|
|
@ -1524,6 +1524,7 @@ static void net_timeout(struct net_device *dev)
|
|||
static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct net_local *lp = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
|
||||
if (net_debug > 3) {
|
||||
printk("%s: sent %d byte packet of type %x\n",
|
||||
|
@ -1535,7 +1536,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
ask the chip to start transmitting before the
|
||||
whole packet has been completely uploaded. */
|
||||
|
||||
spin_lock_irq(&lp->lock);
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
netif_stop_queue(dev);
|
||||
|
||||
/* initiate a transmit sequence */
|
||||
|
@ -1549,13 +1550,13 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
* we're waiting for TxOk, so return 1 and requeue this packet.
|
||||
*/
|
||||
|
||||
spin_unlock_irq(&lp->lock);
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
if (net_debug) printk("cs89x0: Tx buffer not free!\n");
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
/* Write the contents of the packet */
|
||||
writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
|
||||
spin_unlock_irq(&lp->lock);
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
lp->stats.tx_bytes += skb->len;
|
||||
dev->trans_start = jiffies;
|
||||
dev_kfree_skb (skb);
|
||||
|
|
|
@ -1897,6 +1897,9 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
|
|||
|
||||
if (ioread8(&nic->csr->scb.status) & rus_no_res)
|
||||
nic->ru_running = RU_SUSPENDED;
|
||||
pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
|
||||
sizeof(struct rfd),
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
|
|
|
@ -3080,7 +3080,9 @@ static const struct net_device_ops ehea_netdev_ops = {
|
|||
.ndo_poll_controller = ehea_netpoll,
|
||||
#endif
|
||||
.ndo_get_stats = ehea_get_stats,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_set_mac_address = ehea_set_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_multicast_list = ehea_set_multicast_list,
|
||||
.ndo_change_mtu = ehea_change_mtu,
|
||||
.ndo_vlan_rx_register = ehea_vlan_rx_register,
|
||||
|
|
|
@ -1642,6 +1642,7 @@ static const struct net_device_ops fec_netdev_ops = {
|
|||
.ndo_stop = fec_enet_close,
|
||||
.ndo_start_xmit = fec_enet_start_xmit,
|
||||
.ndo_set_multicast_list = set_multicast_list,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_tx_timeout = fec_timeout,
|
||||
.ndo_set_mac_address = fec_set_mac_address,
|
||||
|
|
|
@ -156,6 +156,8 @@ static const struct net_device_ops gfar_netdev_ops = {
|
|||
.ndo_tx_timeout = gfar_timeout,
|
||||
.ndo_do_ioctl = gfar_ioctl,
|
||||
.ndo_vlan_rx_register = gfar_vlan_rx_register,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = gfar_netpoll,
|
||||
#endif
|
||||
|
|
|
@ -127,14 +127,48 @@ static void igb_restore_vlan(struct igb_adapter *);
|
|||
static void igb_ping_all_vfs(struct igb_adapter *);
|
||||
static void igb_msg_task(struct igb_adapter *);
|
||||
static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
|
||||
static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
|
||||
static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
|
||||
static void igb_vmm_control(struct igb_adapter *);
|
||||
static inline void igb_set_vmolr(struct e1000_hw *, int);
|
||||
static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int);
|
||||
static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
|
||||
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
|
||||
|
||||
static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
|
||||
{
|
||||
u32 reg_data;
|
||||
|
||||
reg_data = rd32(E1000_VMOLR(vfn));
|
||||
reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
|
||||
E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
|
||||
E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
|
||||
E1000_VMOLR_AUPE | /* Accept untagged packets */
|
||||
E1000_VMOLR_STRVLAN; /* Strip vlan tags */
|
||||
wr32(E1000_VMOLR(vfn), reg_data);
|
||||
}
|
||||
|
||||
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
|
||||
int vfn)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 vmolr;
|
||||
|
||||
vmolr = rd32(E1000_VMOLR(vfn));
|
||||
vmolr &= ~E1000_VMOLR_RLPML_MASK;
|
||||
vmolr |= size | E1000_VMOLR_LPE;
|
||||
wr32(E1000_VMOLR(vfn), vmolr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
|
||||
{
|
||||
u32 reg_data;
|
||||
|
||||
reg_data = rd32(E1000_RAH(entry));
|
||||
reg_data &= ~E1000_RAH_POOL_MASK;
|
||||
reg_data |= E1000_RAH_POOL_1 << pool;;
|
||||
wr32(E1000_RAH(entry), reg_data);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int igb_suspend(struct pci_dev *, pm_message_t);
|
||||
static int igb_resume(struct pci_dev *);
|
||||
|
@ -5418,43 +5452,6 @@ static void igb_io_resume(struct pci_dev *pdev)
|
|||
igb_get_hw_control(adapter);
|
||||
}
|
||||
|
||||
static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
|
||||
{
|
||||
u32 reg_data;
|
||||
|
||||
reg_data = rd32(E1000_VMOLR(vfn));
|
||||
reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
|
||||
E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
|
||||
E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
|
||||
E1000_VMOLR_AUPE | /* Accept untagged packets */
|
||||
E1000_VMOLR_STRVLAN; /* Strip vlan tags */
|
||||
wr32(E1000_VMOLR(vfn), reg_data);
|
||||
}
|
||||
|
||||
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
|
||||
int vfn)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 vmolr;
|
||||
|
||||
vmolr = rd32(E1000_VMOLR(vfn));
|
||||
vmolr &= ~E1000_VMOLR_RLPML_MASK;
|
||||
vmolr |= size | E1000_VMOLR_LPE;
|
||||
wr32(E1000_VMOLR(vfn), vmolr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
|
||||
{
|
||||
u32 reg_data;
|
||||
|
||||
reg_data = rd32(E1000_RAH(entry));
|
||||
reg_data &= ~E1000_RAH_POOL_MASK;
|
||||
reg_data |= E1000_RAH_POOL_1 << pool;;
|
||||
wr32(E1000_RAH(entry), reg_data);
|
||||
}
|
||||
|
||||
static void igb_set_mc_list_pools(struct igb_adapter *adapter,
|
||||
int entry_count, u16 total_rar_filters)
|
||||
{
|
||||
|
|
|
@ -430,7 +430,8 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
* hardware interrupt handler. Queue flow control is
|
||||
* thus managed under this lock as well.
|
||||
*/
|
||||
spin_lock_irq(&np->lock);
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&np->lock, flags);
|
||||
|
||||
add_to_tx_ring(np, skb, length);
|
||||
dev->trans_start = jiffies;
|
||||
|
@ -446,7 +447,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
* is when the transmit statistics are updated.
|
||||
*/
|
||||
|
||||
spin_unlock_irq(&np->lock);
|
||||
spin_unlock_irqrestore(&np->lock, flags);
|
||||
#else
|
||||
/* This is the case for older hardware which takes
|
||||
* a single transmit buffer at a time, and it is
|
||||
|
|
|
@ -106,8 +106,6 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
|
|||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
|
||||
|
||||
return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
|
||||
}
|
||||
|
||||
|
@ -116,8 +114,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
|
|||
u8 err = 0;
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
|
||||
|
||||
if (state > 0) {
|
||||
/* Turn on DCB */
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
|
||||
|
@ -175,6 +171,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
|
|||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
int i, j;
|
||||
|
||||
memset(perm_addr, 0xff, MAX_ADDR_LEN);
|
||||
|
||||
for (i = 0; i < netdev->addr_len; i++)
|
||||
perm_addr[i] = adapter->hw.mac.perm_addr[i];
|
||||
|
||||
|
|
|
@ -134,8 +134,10 @@ int phy_scan_fixups(struct phy_device *phydev)
|
|||
|
||||
err = fixup->run(phydev);
|
||||
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
mutex_unlock(&phy_fixup_lock);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&phy_fixup_lock);
|
||||
|
|
|
@ -270,6 +270,9 @@ static const struct net_device_ops plip_netdev_ops = {
|
|||
.ndo_stop = plip_close,
|
||||
.ndo_start_xmit = plip_tx_packet,
|
||||
.ndo_do_ioctl = plip_ioctl,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
};
|
||||
|
||||
/* Entry point of PLIP driver.
|
||||
|
|
|
@ -1411,6 +1411,7 @@ static const struct net_device_ops gelic_netdevice_ops = {
|
|||
.ndo_set_multicast_list = gelic_net_set_multi,
|
||||
.ndo_change_mtu = gelic_net_change_mtu,
|
||||
.ndo_tx_timeout = gelic_net_tx_timeout,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = gelic_net_poll_controller,
|
||||
|
|
|
@ -2707,6 +2707,7 @@ static const struct net_device_ops gelic_wl_netdevice_ops = {
|
|||
.ndo_set_multicast_list = gelic_net_set_multi,
|
||||
.ndo_change_mtu = gelic_net_change_mtu,
|
||||
.ndo_tx_timeout = gelic_net_tx_timeout,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = gelic_net_poll_controller,
|
||||
|
|
|
@ -1151,14 +1151,7 @@ static void sky2_rx_stop(struct sky2_port *sky2)
|
|||
|
||||
/* reset the Rx prefetch unit */
|
||||
sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
|
||||
|
||||
/* Reset the RAM Buffer receive queue */
|
||||
sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_RST_SET);
|
||||
|
||||
/* Reset Rx MAC FIFO */
|
||||
sky2_write8(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), GMF_RST_SET);
|
||||
|
||||
sky2_read8(hw, B0_CTST);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
/* Clean out receive buffer area, assumes receiver hardware stopped */
|
||||
|
|
|
@ -1774,6 +1774,7 @@ static const struct net_device_ops smc_netdev_ops = {
|
|||
.ndo_start_xmit = smc_hard_start_xmit,
|
||||
.ndo_tx_timeout = smc_timeout,
|
||||
.ndo_set_multicast_list = smc_set_multicast_list,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
|
|
@ -1779,6 +1779,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
|
|||
.ndo_get_stats = smsc911x_get_stats,
|
||||
.ndo_set_multicast_list = smsc911x_set_multicast_list,
|
||||
.ndo_do_ioctl = smsc911x_do_ioctl,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = smsc911x_set_mac_address,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
|
|
@ -1016,7 +1016,9 @@ static const struct net_device_ops vnet_ops = {
|
|||
.ndo_open = vnet_open,
|
||||
.ndo_stop = vnet_close,
|
||||
.ndo_set_multicast_list = vnet_set_rx_mode,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_set_mac_address = vnet_set_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_tx_timeout = vnet_tx_timeout,
|
||||
.ndo_change_mtu = vnet_change_mtu,
|
||||
.ndo_start_xmit = vnet_start_xmit,
|
||||
|
|
|
@ -311,7 +311,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
* bmCRC = 0 : CRC = 0xDEADBEEF
|
||||
*/
|
||||
if (header & BIT(14))
|
||||
crc2 = ~crc32_le(~0, skb2->data, len);
|
||||
crc2 = ~crc32_le(~0, skb2->data, skb2->len);
|
||||
else
|
||||
crc2 = 0xdeadbeef;
|
||||
|
||||
|
|
|
@ -999,6 +999,9 @@ static const struct net_device_ops kaweth_netdev_ops = {
|
|||
.ndo_tx_timeout = kaweth_tx_timeout,
|
||||
.ndo_set_multicast_list = kaweth_set_rx_mode,
|
||||
.ndo_get_stats = kaweth_netdev_stats,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
};
|
||||
|
||||
static int kaweth_probe(
|
||||
|
|
|
@ -1493,6 +1493,9 @@ static const struct net_device_ops pegasus_netdev_ops = {
|
|||
.ndo_set_multicast_list = pegasus_set_multicast,
|
||||
.ndo_get_stats = pegasus_netdev_stats,
|
||||
.ndo_tx_timeout = pegasus_tx_timeout,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
};
|
||||
|
||||
static struct usb_driver pegasus_driver = {
|
||||
|
|
|
@ -621,6 +621,7 @@ static const struct net_device_ops rhine_netdev_ops = {
|
|||
.ndo_start_xmit = rhine_start_tx,
|
||||
.ndo_get_stats = rhine_get_stats,
|
||||
.ndo_set_multicast_list = rhine_set_rx_mode,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_do_ioctl = netdev_ioctl,
|
||||
|
|
|
@ -2072,6 +2072,8 @@ static const struct net_device_ops orinoco_netdev_ops = {
|
|||
.ndo_start_xmit = orinoco_xmit,
|
||||
.ndo_set_multicast_list = orinoco_set_multicast_list,
|
||||
.ndo_change_mtu = orinoco_change_mtu,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_tx_timeout = orinoco_tx_timeout,
|
||||
.ndo_get_stats = orinoco_get_stats,
|
||||
};
|
||||
|
|
|
@ -1342,12 +1342,12 @@ static inline int skb_network_offset(const struct sk_buff *skb)
|
|||
* shifting the start of the packet by 2 bytes. Drivers should do this
|
||||
* with:
|
||||
*
|
||||
* skb_reserve(NET_IP_ALIGN);
|
||||
* skb_reserve(skb, NET_IP_ALIGN);
|
||||
*
|
||||
* The downside to this alignment of the IP header is that the DMA is now
|
||||
* unaligned. On some architectures the cost of an unaligned DMA is high
|
||||
* and this cost outweighs the gains made by aligning the IP header.
|
||||
*
|
||||
*
|
||||
* Since this trade off varies between architectures, we allow NET_IP_ALIGN
|
||||
* to be overridden.
|
||||
*/
|
||||
|
|
|
@ -104,15 +104,15 @@ struct net;
|
|||
|
||||
/**
|
||||
* struct sock_common - minimal network layer representation of sockets
|
||||
* @skc_node: main hash linkage for various protocol lookup tables
|
||||
* @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
|
||||
* @skc_refcnt: reference count
|
||||
* @skc_hash: hash value used with various protocol lookup tables
|
||||
* @skc_family: network address family
|
||||
* @skc_state: Connection state
|
||||
* @skc_reuse: %SO_REUSEADDR setting
|
||||
* @skc_bound_dev_if: bound device index if != 0
|
||||
* @skc_node: main hash linkage for various protocol lookup tables
|
||||
* @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
|
||||
* @skc_bind_node: bind hash linkage for various protocol lookup tables
|
||||
* @skc_refcnt: reference count
|
||||
* @skc_hash: hash value used with various protocol lookup tables
|
||||
* @skc_prot: protocol handlers inside a network family
|
||||
* @skc_net: reference to the network namespace of this socket
|
||||
*
|
||||
|
@ -120,17 +120,21 @@ struct net;
|
|||
* for struct sock and struct inet_timewait_sock.
|
||||
*/
|
||||
struct sock_common {
|
||||
unsigned short skc_family;
|
||||
volatile unsigned char skc_state;
|
||||
unsigned char skc_reuse;
|
||||
int skc_bound_dev_if;
|
||||
/*
|
||||
* first fields are not copied in sock_copy()
|
||||
*/
|
||||
union {
|
||||
struct hlist_node skc_node;
|
||||
struct hlist_nulls_node skc_nulls_node;
|
||||
};
|
||||
struct hlist_node skc_bind_node;
|
||||
atomic_t skc_refcnt;
|
||||
|
||||
unsigned int skc_hash;
|
||||
unsigned short skc_family;
|
||||
volatile unsigned char skc_state;
|
||||
unsigned char skc_reuse;
|
||||
int skc_bound_dev_if;
|
||||
struct hlist_node skc_bind_node;
|
||||
struct proto *skc_prot;
|
||||
#ifdef CONFIG_NET_NS
|
||||
struct net *skc_net;
|
||||
|
@ -208,15 +212,17 @@ struct sock {
|
|||
* don't add nothing before this first member (__sk_common) --acme
|
||||
*/
|
||||
struct sock_common __sk_common;
|
||||
#define sk_node __sk_common.skc_node
|
||||
#define sk_nulls_node __sk_common.skc_nulls_node
|
||||
#define sk_refcnt __sk_common.skc_refcnt
|
||||
|
||||
#define sk_copy_start __sk_common.skc_hash
|
||||
#define sk_hash __sk_common.skc_hash
|
||||
#define sk_family __sk_common.skc_family
|
||||
#define sk_state __sk_common.skc_state
|
||||
#define sk_reuse __sk_common.skc_reuse
|
||||
#define sk_bound_dev_if __sk_common.skc_bound_dev_if
|
||||
#define sk_node __sk_common.skc_node
|
||||
#define sk_nulls_node __sk_common.skc_nulls_node
|
||||
#define sk_bind_node __sk_common.skc_bind_node
|
||||
#define sk_refcnt __sk_common.skc_refcnt
|
||||
#define sk_hash __sk_common.skc_hash
|
||||
#define sk_prot __sk_common.skc_prot
|
||||
#define sk_net __sk_common.skc_net
|
||||
kmemcheck_bitfield_begin(flags);
|
||||
|
|
|
@ -75,6 +75,7 @@ static __initdata const char banner[] = KERN_INFO
|
|||
MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
|
||||
MODULE_ALIAS("can-proto-2");
|
||||
|
||||
/* easy access to can_frame payload */
|
||||
static inline u64 GET_U64(const struct can_frame *cp)
|
||||
|
@ -1469,6 +1470,9 @@ static int bcm_release(struct socket *sock)
|
|||
bo->ifindex = 0;
|
||||
}
|
||||
|
||||
sock_orphan(sk);
|
||||
sock->sk = NULL;
|
||||
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ static __initdata const char banner[] =
|
|||
MODULE_DESCRIPTION("PF_CAN raw protocol");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
|
||||
MODULE_ALIAS("can-proto-1");
|
||||
|
||||
#define MASK_ALL 0
|
||||
|
||||
|
@ -306,6 +307,9 @@ static int raw_release(struct socket *sock)
|
|||
ro->bound = 0;
|
||||
ro->count = 0;
|
||||
|
||||
sock_orphan(sk);
|
||||
sock->sk = NULL;
|
||||
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
|
||||
|
|
|
@ -919,13 +919,19 @@ static inline void sock_lock_init(struct sock *sk)
|
|||
af_family_keys + sk->sk_family);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
|
||||
* even temporarly, because of RCU lookups. sk_node should also be left as is.
|
||||
*/
|
||||
static void sock_copy(struct sock *nsk, const struct sock *osk)
|
||||
{
|
||||
#ifdef CONFIG_SECURITY_NETWORK
|
||||
void *sptr = nsk->sk_security;
|
||||
#endif
|
||||
|
||||
memcpy(nsk, osk, osk->sk_prot->obj_size);
|
||||
BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
|
||||
sizeof(osk->sk_node) + sizeof(osk->sk_refcnt));
|
||||
memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
|
||||
osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
|
||||
#ifdef CONFIG_SECURITY_NETWORK
|
||||
nsk->sk_security = sptr;
|
||||
security_sk_clone(osk, nsk);
|
||||
|
@ -939,8 +945,23 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
|||
struct kmem_cache *slab;
|
||||
|
||||
slab = prot->slab;
|
||||
if (slab != NULL)
|
||||
sk = kmem_cache_alloc(slab, priority);
|
||||
if (slab != NULL) {
|
||||
sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
|
||||
if (!sk)
|
||||
return sk;
|
||||
if (priority & __GFP_ZERO) {
|
||||
/*
|
||||
* caches using SLAB_DESTROY_BY_RCU should let
|
||||
* sk_node.next un-modified. Special care is taken
|
||||
* when initializing object to zero.
|
||||
*/
|
||||
if (offsetof(struct sock, sk_node.next) != 0)
|
||||
memset(sk, 0, offsetof(struct sock, sk_node.next));
|
||||
memset(&sk->sk_node.pprev, 0,
|
||||
prot->obj_size - offsetof(struct sock,
|
||||
sk_node.pprev));
|
||||
}
|
||||
}
|
||||
else
|
||||
sk = kmalloc(prot->obj_size, priority);
|
||||
|
||||
|
@ -1125,6 +1146,11 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
|
|||
|
||||
newsk->sk_err = 0;
|
||||
newsk->sk_priority = 0;
|
||||
/*
|
||||
* Before updating sk_refcnt, we must commit prior changes to memory
|
||||
* (Documentation/RCU/rculist_nulls.txt for details)
|
||||
*/
|
||||
smp_wmb();
|
||||
atomic_set(&newsk->sk_refcnt, 2);
|
||||
|
||||
/*
|
||||
|
@ -1840,6 +1866,11 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
|||
|
||||
sk->sk_stamp = ktime_set(-1L, 0);
|
||||
|
||||
/*
|
||||
* Before updating sk_refcnt, we must commit prior changes to memory
|
||||
* (Documentation/RCU/rculist_nulls.txt for details)
|
||||
*/
|
||||
smp_wmb();
|
||||
atomic_set(&sk->sk_refcnt, 1);
|
||||
atomic_set(&sk->sk_wmem_alloc, 1);
|
||||
atomic_set(&sk->sk_drops, 0);
|
||||
|
|
|
@ -735,10 +735,10 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
|
||||
tos = tiph->tos;
|
||||
if (tos&1) {
|
||||
if (tos == 1) {
|
||||
tos = 0;
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
tos = old_iph->tos;
|
||||
tos &= ~1;
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
@ -1243,7 +1243,6 @@ int ip_push_pending_frames(struct sock *sk)
|
|||
skb->len += tmp_skb->len;
|
||||
skb->data_len += tmp_skb->len;
|
||||
skb->truesize += tmp_skb->truesize;
|
||||
__sock_put(tmp_skb->sk);
|
||||
tmp_skb->destructor = NULL;
|
||||
tmp_skb->sk = NULL;
|
||||
}
|
||||
|
|
|
@ -1474,7 +1474,6 @@ int ip6_push_pending_frames(struct sock *sk)
|
|||
skb->len += tmp_skb->len;
|
||||
skb->data_len += tmp_skb->len;
|
||||
skb->truesize += tmp_skb->truesize;
|
||||
__sock_put(tmp_skb->sk);
|
||||
tmp_skb->destructor = NULL;
|
||||
tmp_skb->sk = NULL;
|
||||
}
|
||||
|
|
|
@ -1018,6 +1018,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
|
|||
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
|
||||
dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr);
|
||||
dev->flags = IFF_NOARP;
|
||||
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
|
||||
dev->iflink = 0;
|
||||
dev->addr_len = 4;
|
||||
dev->features |= NETIF_F_NETNS_LOCAL;
|
||||
|
|
|
@ -561,23 +561,38 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
|
|||
}
|
||||
}
|
||||
|
||||
ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp);
|
||||
/*
|
||||
* Do not use kmem_cache_zalloc(), as this cache uses
|
||||
* SLAB_DESTROY_BY_RCU.
|
||||
*/
|
||||
ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
|
||||
if (ct == NULL) {
|
||||
pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
|
||||
atomic_dec(&net->ct.count);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
|
||||
* and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
|
||||
*/
|
||||
memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
|
||||
sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
|
||||
spin_lock_init(&ct->lock);
|
||||
atomic_set(&ct->ct_general.use, 1);
|
||||
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
|
||||
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
|
||||
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
|
||||
ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
|
||||
/* Don't set timer yet: wait for confirmation */
|
||||
setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
|
||||
#ifdef CONFIG_NET_NS
|
||||
ct->ct_net = net;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* changes to lookup keys must be done before setting refcnt to 1
|
||||
*/
|
||||
smp_wmb();
|
||||
atomic_set(&ct->ct_general.use, 1);
|
||||
return ct;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
|
||||
|
|
|
@ -330,7 +330,8 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
|
|||
fcount++;
|
||||
|
||||
if (info->flags & XT_OSF_LOG)
|
||||
nf_log_packet(p->hooknum, 0, skb, p->in, p->out, NULL,
|
||||
nf_log_packet(p->family, p->hooknum, skb,
|
||||
p->in, p->out, NULL,
|
||||
"%s [%s:%s] : %pi4:%d -> %pi4:%d hops=%d\n",
|
||||
f->genre, f->version, f->subtype,
|
||||
&ip->saddr, ntohs(tcp->source),
|
||||
|
@ -345,7 +346,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
|
|||
rcu_read_unlock();
|
||||
|
||||
if (!fcount && (info->flags & XT_OSF_LOG))
|
||||
nf_log_packet(p->hooknum, 0, skb, p->in, p->out, NULL,
|
||||
nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL,
|
||||
"Remote OS is not known: %pi4:%u -> %pi4:%u\n",
|
||||
&ip->saddr, ntohs(tcp->source),
|
||||
&ip->daddr, ntohs(tcp->dest));
|
||||
|
|
Loading…
Reference in a new issue