Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (68 commits)
  net: can: janz-ican3: world-writable sysfs termination file
  net: can: at91_can: world-writable sysfs files
  MAINTAINERS: update email ids of the be2net driver maintainers.
  bridge: Don't put partly initialized fdb into hash
  r8169: prevent RxFIFO induced loops in the irq handler.
  r8169: RxFIFO overflow oddities with 8168 chipsets.
  r8169: use RxFIFO overflow workaround for 8168c chipset.
  include/net/genetlink.h: Allow genlmsg_cancel to accept a NULL argument
  net: Provide compat support for SIOCGETMIFCNT_IN6 and SIOCGETSGCNT_IN6.
  net: Support compat SIOCGETVIFCNT ioctl in ipv4.
  net: Fix bug in compat SIOCGETSGCNT handling.
  niu: Fix races between up/down and get_stats.
  tcp_ecn is an integer not a boolean
  atl1c: Add missing PCI device ID
  s390: Fix possibly wrong size in strncmp (smsgiucv)
  s390: Fix wrong size in memcmp (netiucv)
  qeth: allow OSA CHPARM change in suspend state
  qeth: allow HiperSockets framesize change in suspend
  qeth: add more strict MTU checking
  qeth: show new mac-address if its setting fails
  ...
This commit is contained in:
Linus Torvalds 2011-02-04 13:20:01 -08:00
commit 44f2c5c841
60 changed files with 637 additions and 289 deletions

View file

@ -187,7 +187,7 @@ tcp_cookie_size - INTEGER
tcp_dsack - BOOLEAN
Allows TCP to send "duplicate" SACKs.
tcp_ecn - BOOLEAN
tcp_ecn - INTEGER
Enable Explicit Congestion Notification (ECN) in TCP. ECN is only
used when both ends of the TCP flow support it. It is useful to
avoid losses due to congestion (when the bottleneck router supports

View file

@ -5551,12 +5551,11 @@ S: Supported
F: drivers/scsi/be2iscsi/
SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER
M: Sathya Perla <sathyap@serverengines.com>
M: Subbu Seetharaman <subbus@serverengines.com>
M: Sarveshwar Bandi <sarveshwarb@serverengines.com>
M: Ajit Khaparde <ajitk@serverengines.com>
M: Sathya Perla <sathya.perla@emulex.com>
M: Subbu Seetharaman <subbu.seetharaman@emulex.com>
M: Ajit Khaparde <ajit.khaparde@emulex.com>
L: netdev@vger.kernel.org
W: http://www.serverengines.com
W: http://www.emulex.com
S: Supported
F: drivers/net/benet/
@ -6787,12 +6786,12 @@ S: Maintained
F: drivers/net/wireless/wl1251/*
WL1271 WIRELESS DRIVER
M: Luciano Coelho <luciano.coelho@nokia.com>
M: Luciano Coelho <coelho@ti.com>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/wl12xx
T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
S: Maintained
F: drivers/net/wireless/wl12xx/wl1271*
F: drivers/net/wireless/wl12xx/
F: include/linux/wl12xx.h
WL3501 WIRELESS PCMCIA CARD DRIVER

View file

@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup);
static int __init icn_init(void)
{
char *p;
char rev[20];
char rev[21];
memset(&dev, 0, sizeof(icn_dev));
dev.memaddr = (membase & 0x0ffc000);
@ -1638,6 +1638,7 @@ static int __init icn_init(void)
if ((p = strchr(revision, ':'))) {
strncpy(rev, p + 1, 20);
rev[20] = '\0';
p = strchr(rev, '$');
if (p)
*p = 0;

View file

@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
/* required last entry */
{ 0 }
};

View file

@ -312,11 +312,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
if (adapter->link_up != link_up) {
adapter->link_speed = -1;
if (link_up) {
netif_start_queue(netdev);
netif_carrier_on(netdev);
printk(KERN_INFO "%s: Link up\n", netdev->name);
} else {
netif_stop_queue(netdev);
netif_carrier_off(netdev);
printk(KERN_INFO "%s: Link down\n", netdev->name);
}
@ -2628,8 +2626,6 @@ static void be_netdev_init(struct net_device *netdev)
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
netif_stop_queue(netdev);
}
static void be_unmap_pci_bars(struct be_adapter *adapter)

View file

@ -22,8 +22,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
#define DRV_MODULE_VERSION "1.62.00-4"
#define DRV_MODULE_RELDATE "2011/01/18"
#define DRV_MODULE_VERSION "1.62.00-5"
#define DRV_MODULE_RELDATE "2011/01/30"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE

View file

@ -3948,48 +3948,6 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
return rc;
}
static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
u16 val;
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val == 0) {
/* Mustn't set low power mode in 8073 A0 */
return;
}
/* Disable PLL sequencer (use read-modify-write to clear bit 13) */
bnx2x_cl45_read(bp, phy,
MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
val &= ~(1<<13);
bnx2x_cl45_write(bp, phy,
MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
/* PLL controls */
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
/* Tx Controls */
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
/* Rx Controls */
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
/* Enable PLL sequencer (use read-modify-write to set bit 13) */
bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
val |= (1<<13);
bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
}
/******************************************************************/
/* BCM8073 PHY SECTION */
/******************************************************************/
@ -4148,8 +4106,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_8073_set_pause_cl37(params, phy, vars);
bnx2x_8073_set_xaui_low_power_mode(bp, phy);
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
@ -6519,6 +6475,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x80);
/* Tell LED3 to blink on source */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
&val);
val &= ~(7<<6);
val |= (1<<6); /* A83B[8:6]= 1 */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
val);
}
break;
}
@ -7720,10 +7688,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
u16 val;
s8 port;
s8 port = 0;
s8 port_of_path = 0;
bnx2x_ext_phy_hw_reset(bp, 0);
u32 swap_val, swap_override;
swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
port ^= (swap_val && swap_override);
bnx2x_ext_phy_hw_reset(bp, port);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;

View file

@ -2301,15 +2301,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
/* accept matched ucast */
drop_all_ucast = 0;
}
if (filters & BNX2X_ACCEPT_MULTICAST) {
if (filters & BNX2X_ACCEPT_MULTICAST)
/* accept matched mcast */
drop_all_mcast = 0;
if (IS_MF_SI(bp))
/* since mcast addresses won't arrive with ovlan,
* fw needs to accept all of them in
* switch-independent mode */
accp_all_mcast = 1;
}
if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
/* accept all mcast */
drop_all_ucast = 0;
@ -5296,10 +5291,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
}
}
bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
bp->common.shmem_base,
bp->common.shmem2_base);
bnx2x_setup_fan_failure_detection(bp);
/* clear PXP2 attentions */
@ -5503,9 +5494,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
bnx2x_init_block(bp, MCP_BLOCK, init_stage);
bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
bp->common.shmem_base,
bp->common.shmem2_base);
if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
bp->common.shmem2_base, port)) {
u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@ -8379,6 +8367,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
(ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
bp->mdio.prtad =
XGXS_EXT_PHY_ADDR(ext_phy_config);
/*
* Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
* In MF mode, it is set to cover self test cases
*/
if (IS_MF(bp))
bp->port.need_hw_lock = 1;
else
bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
bp->common.shmem_base,
bp->common.shmem2_base);
}
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)

View file

@ -23,7 +23,7 @@ config CAN_SLCAN
As only the sending and receiving of CAN frames is implemented, this
driver should work with the (serial/USB) CAN hardware from:
www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
Userspace tools to attach the SLCAN line discipline (slcan_attach,
slcand) can be found in the can-utils at the SocketCAN SVN, see

View file

@ -1109,7 +1109,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
return ret;
}
static DEVICE_ATTR(mb0_id, S_IWUGO | S_IRUGO,
static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
static struct attribute *at91_sysfs_attrs[] = {

View file

@ -1618,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
return count;
}
static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
ican3_sysfs_set_term);
static struct attribute *ican3_sysfs_attrs[] = {

View file

@ -1,6 +1,6 @@
config CAN_SOFTING
tristate "Softing Gmbh CAN generic support"
depends on CAN_DEV
depends on CAN_DEV && HAS_IOMEM
---help---
Support for CAN cards from Softing Gmbh & some cards
from Vector Gmbh.

View file

@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev)
}
}
/* Change buffer ownership for this last frame, back to the adapter */
for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) {
for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
}
writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev)
/*
** Update entry information
*/
lp->rx_new = (++lp->rx_new) & lp->rxRingMask;
lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
}
return 0;
@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev)
}
/* Update all the pointers */
lp->tx_old = (++lp->tx_old) & lp->txRingMask;
lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
}
return 0;

View file

@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
/* Free all the skbuffs in the queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
skb = np->rx_skbuff[i];
if (skb) {
pci_unmap_single(np->pdev,
@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
dev_kfree_skb (skb);
np->rx_skbuff[i] = NULL;
}
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];

View file

@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
if (netif_msg_hw(priv))
printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
endptr + 1);
enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv);
enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
}
static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,

View file

@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np)
{
struct niu_parent *parent = np->parent;
int first_rx_channel, first_tx_channel;
int num_rx_rings, num_tx_rings;
struct rx_ring_info *rx_rings;
struct tx_ring_info *tx_rings;
int i, port, err;
port = np->port;
@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np)
first_tx_channel += parent->txchan_per_port[i];
}
np->num_rx_rings = parent->rxchan_per_port[port];
np->num_tx_rings = parent->txchan_per_port[port];
num_rx_rings = parent->rxchan_per_port[port];
num_tx_rings = parent->txchan_per_port[port];
netif_set_real_num_rx_queues(np->dev, np->num_rx_rings);
netif_set_real_num_tx_queues(np->dev, np->num_tx_rings);
np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
GFP_KERNEL);
rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
GFP_KERNEL);
err = -ENOMEM;
if (!np->rx_rings)
if (!rx_rings)
goto out_err;
np->num_rx_rings = num_rx_rings;
smp_wmb();
np->rx_rings = rx_rings;
netif_set_real_num_rx_queues(np->dev, num_rx_rings);
for (i = 0; i < np->num_rx_rings; i++) {
struct rx_ring_info *rp = &np->rx_rings[i];
@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np)
return err;
}
np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info),
GFP_KERNEL);
tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
GFP_KERNEL);
err = -ENOMEM;
if (!np->tx_rings)
if (!tx_rings)
goto out_err;
np->num_tx_rings = num_tx_rings;
smp_wmb();
np->tx_rings = tx_rings;
netif_set_real_num_tx_queues(np->dev, num_tx_rings);
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np)
static void niu_get_rx_stats(struct niu *np)
{
unsigned long pkts, dropped, errors, bytes;
struct rx_ring_info *rx_rings;
int i;
pkts = dropped = errors = bytes = 0;
rx_rings = ACCESS_ONCE(np->rx_rings);
if (!rx_rings)
goto no_rings;
for (i = 0; i < np->num_rx_rings; i++) {
struct rx_ring_info *rp = &np->rx_rings[i];
struct rx_ring_info *rp = &rx_rings[i];
niu_sync_rx_discard_stats(np, rp, 0);
@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np)
dropped += rp->rx_dropped;
errors += rp->rx_errors;
}
no_rings:
np->dev->stats.rx_packets = pkts;
np->dev->stats.rx_bytes = bytes;
np->dev->stats.rx_dropped = dropped;
@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np)
static void niu_get_tx_stats(struct niu *np)
{
unsigned long pkts, errors, bytes;
struct tx_ring_info *tx_rings;
int i;
pkts = errors = bytes = 0;
tx_rings = ACCESS_ONCE(np->tx_rings);
if (!tx_rings)
goto no_rings;
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
struct tx_ring_info *rp = &tx_rings[i];
pkts += rp->tx_packets;
bytes += rp->tx_bytes;
errors += rp->tx_errors;
}
no_rings:
np->dev->stats.tx_packets = pkts;
np->dev->stats.tx_bytes = bytes;
np->dev->stats.tx_errors = errors;
@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
niu_get_rx_stats(np);
niu_get_tx_stats(np);
if (netif_running(dev)) {
niu_get_rx_stats(np);
niu_get_tx_stats(np);
}
return &dev->stats;
}

View file

@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev)
/*
* Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
* Early datasheets said to poll the reset bit, but now they say that
* it "is not a reliable indicator and subsequently should be ignored."
* We wait at least 10ms.
* We wait at least 2ms.
*/
mdelay(10);
mdelay(2);
/*
* Reset RBCR[01] back to zero as per magic incantation.

View file

@ -973,7 +973,8 @@ static void __rtl8169_check_link_status(struct net_device *dev,
if (pm)
pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
netif_info(tp, ifup, dev, "link up\n");
if (net_ratelimit())
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
@ -3757,7 +3758,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
tp->mac_version == RTL_GIGA_MAC_VER_22) {
tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow;
}
@ -4639,12 +4641,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
break;
}
/* Work around for rx fifo overflow */
if (unlikely(status & RxFIFOOver) &&
(tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
if (unlikely(status & RxFIFOOver)) {
switch (tp->mac_version) {
/* Work around for rx fifo overflow */
case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_22:
case RTL_GIGA_MAC_VER_26:
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
goto done;
/* Testers needed. */
case RTL_GIGA_MAC_VER_17:
case RTL_GIGA_MAC_VER_19:
case RTL_GIGA_MAC_VER_20:
case RTL_GIGA_MAC_VER_21:
case RTL_GIGA_MAC_VER_23:
case RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
/* Experimental science. Pktgen proof. */
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_25:
if (status == RxFIFOOver)
goto done;
break;
default:
break;
}
}
if (unlikely(status & SYSErr)) {
@ -4680,7 +4703,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
(status & RxFIFOOver) ? (status | RxOverflow) : status);
status = RTL_R16(IntrStatus);
}
done:
return IRQ_RETVAL(handled);
}

View file

@ -3690,7 +3690,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
if (status != VXGE_HW_OK)
goto exit;
if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
(rts_table !=
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
*data1 = 0;

View file

@ -838,9 +838,9 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah)
for (i = 0; i < qmax; i++) {
err = ath5k_hw_stop_tx_dma(ah, i);
/* -EINVAL -> queue inactive */
if (err != -EINVAL)
if (err && err != -EINVAL)
return err;
}
return err;
return 0;
}

View file

@ -86,7 +86,7 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
if (!ah->ah_bwmode) {
dur = ieee80211_generic_frame_duration(sc->hw,
NULL, len, rate);
return dur;
return le16_to_cpu(dur);
}
bitrate = rate->bitrate;
@ -265,8 +265,6 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
* what rate we should choose to TX ACKs. */
tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
tx_time = le16_to_cpu(tx_time);
ath5k_hw_reg_write(ah, tx_time, reg);
if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))

View file

@ -426,9 +426,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
}
/* WAR for ASPM system hang */
if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
val |= (AR_WA_BIT6 | AR_WA_BIT7);
}
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;

View file

@ -142,9 +142,6 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
{
ath9k_htc_exit_debug(priv->ah);
ath9k_hw_deinit(priv->ah);
tasklet_kill(&priv->swba_tasklet);
tasklet_kill(&priv->rx_tasklet);
tasklet_kill(&priv->tx_tasklet);
kfree(priv->ah);
priv->ah = NULL;
}

View file

@ -1025,12 +1025,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
int ret = 0;
u8 cmd_rsp;
/* Cancel all the running timers/work .. */
cancel_work_sync(&priv->fatal_work);
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
ath9k_led_stop_brightness(priv);
mutex_lock(&priv->mutex);
if (priv->op_flags & OP_INVALID) {
@ -1044,8 +1038,23 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
WMI_CMD(WMI_DISABLE_INTR_CMDID);
WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
WMI_CMD(WMI_STOP_RECV_CMDID);
tasklet_kill(&priv->swba_tasklet);
tasklet_kill(&priv->rx_tasklet);
tasklet_kill(&priv->tx_tasklet);
skb_queue_purge(&priv->tx_queue);
mutex_unlock(&priv->mutex);
/* Cancel all the running timers/work .. */
cancel_work_sync(&priv->fatal_work);
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
ath9k_led_stop_brightness(priv);
mutex_lock(&priv->mutex);
/* Remove monitor interface here */
if (ah->opmode == NL80211_IFTYPE_MONITOR) {
if (ath9k_htc_remove_monitor_interface(priv))

View file

@ -598,8 +598,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
err_queues:
ath9k_hw_deinit(ah);
err_hw:
tasklet_kill(&sc->intr_tq);
tasklet_kill(&sc->bcon_tasklet);
kfree(ah);
sc->sc_ah = NULL;
@ -807,9 +805,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
ath9k_hw_deinit(sc->sc_ah);
tasklet_kill(&sc->intr_tq);
tasklet_kill(&sc->bcon_tasklet);
kfree(sc->sc_ah);
sc->sc_ah = NULL;
}
@ -824,6 +819,8 @@ void ath9k_deinit_device(struct ath_softc *sc)
wiphy_rfkill_stop_polling(sc->hw->wiphy);
ath_deinit_leds(sc);
ath9k_ps_restore(sc);
for (i = 0; i < sc->num_sec_wiphy; i++) {
struct ath_wiphy *aphy = sc->sec_wiphy[i];
if (aphy == NULL)

View file

@ -325,6 +325,8 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
{
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_tx_control txctl;
int time_left;
@ -342,8 +344,12 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
init_completion(&sc->paprd_complete);
sc->paprd_pending = true;
txctl.paprd = BIT(chain);
if (ath_tx_start(hw, skb, &txctl) != 0)
if (ath_tx_start(hw, skb, &txctl) != 0) {
ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n");
dev_kfree_skb_any(skb);
return false;
}
time_left = wait_for_completion_timeout(&sc->paprd_complete,
msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
@ -953,8 +959,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
}
int ath_reset(struct ath_softc *sc, bool retry_tx)
@ -1309,6 +1313,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_lock_bh(&sc->sc_pcu_lock);
/* prevent tasklets to enable interrupts once we disable them */
ah->imask &= ~ATH9K_INT_GLOBAL;
/* make sure h/w will not generate any interrupt
* before setting the invalid flag. */
ath9k_hw_disable_interrupts(ah);
@ -1326,6 +1333,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
/* we can now sync irq and kill any running tasklets, since we already
* disabled interrupts and not holding a spin lock */
synchronize_irq(sc->irq);
tasklet_kill(&sc->intr_tq);
tasklet_kill(&sc->bcon_tasklet);
ath9k_ps_restore(sc);
sc->ps_idle = true;

View file

@ -726,9 +726,9 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
}
static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
u8 efuse_data, u8 offset, int *bcontinual,
u8 *write_state, struct pgpkt_struct target_pkt,
int *repeat_times, int *bresult, u8 word_en)
u8 efuse_data, u8 offset, int *bcontinual,
u8 *write_state, struct pgpkt_struct *target_pkt,
int *repeat_times, int *bresult, u8 word_en)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct pgpkt_struct tmp_pkt;
@ -744,8 +744,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
tmp_pkt.word_en = tmp_header & 0x0F;
tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
if (tmp_pkt.offset != target_pkt.offset) {
efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1;
if (tmp_pkt.offset != target_pkt->offset) {
*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
*write_state = PG_STATE_HEADER;
} else {
for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
@ -756,23 +756,23 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
}
if (bdataempty == false) {
efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1;
*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
*write_state = PG_STATE_HEADER;
} else {
match_word_en = 0x0F;
if (!((target_pkt.word_en & BIT(0)) |
if (!((target_pkt->word_en & BIT(0)) |
(tmp_pkt.word_en & BIT(0))))
match_word_en &= (~BIT(0));
if (!((target_pkt.word_en & BIT(1)) |
if (!((target_pkt->word_en & BIT(1)) |
(tmp_pkt.word_en & BIT(1))))
match_word_en &= (~BIT(1));
if (!((target_pkt.word_en & BIT(2)) |
if (!((target_pkt->word_en & BIT(2)) |
(tmp_pkt.word_en & BIT(2))))
match_word_en &= (~BIT(2));
if (!((target_pkt.word_en & BIT(3)) |
if (!((target_pkt->word_en & BIT(3)) |
(tmp_pkt.word_en & BIT(3))))
match_word_en &= (~BIT(3));
@ -780,7 +780,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
badworden = efuse_word_enable_data_write(
hw, *efuse_addr + 1,
tmp_pkt.word_en,
target_pkt.data);
target_pkt->data);
if (0x0F != (badworden & 0x0F)) {
u8 reorg_offset = offset;
@ -791,26 +791,26 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
}
tmp_word_en = 0x0F;
if ((target_pkt.word_en & BIT(0)) ^
if ((target_pkt->word_en & BIT(0)) ^
(match_word_en & BIT(0)))
tmp_word_en &= (~BIT(0));
if ((target_pkt.word_en & BIT(1)) ^
if ((target_pkt->word_en & BIT(1)) ^
(match_word_en & BIT(1)))
tmp_word_en &= (~BIT(1));
if ((target_pkt.word_en & BIT(2)) ^
if ((target_pkt->word_en & BIT(2)) ^
(match_word_en & BIT(2)))
tmp_word_en &= (~BIT(2));
if ((target_pkt.word_en & BIT(3)) ^
if ((target_pkt->word_en & BIT(3)) ^
(match_word_en & BIT(3)))
tmp_word_en &= (~BIT(3));
if ((tmp_word_en & 0x0F) != 0x0F) {
*efuse_addr = efuse_get_current_size(hw);
target_pkt.offset = offset;
target_pkt.word_en = tmp_word_en;
target_pkt->offset = offset;
target_pkt->word_en = tmp_word_en;
} else
*bcontinual = false;
*write_state = PG_STATE_HEADER;
@ -821,8 +821,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
}
} else {
*efuse_addr += (2 * tmp_word_cnts) + 1;
target_pkt.offset = offset;
target_pkt.word_en = word_en;
target_pkt->offset = offset;
target_pkt->word_en = word_en;
*write_state = PG_STATE_HEADER;
}
}
@ -938,7 +938,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
efuse_write_data_case1(hw, &efuse_addr,
efuse_data, offset,
&bcontinual,
&write_state, target_pkt,
&write_state, &target_pkt,
&repeat_times, &bresult,
word_en);
else

View file

@ -110,9 +110,8 @@ static void wl1271_spi_reset(struct wl1271 *wl)
spi_message_add_tail(&t, &m);
spi_sync(wl_to_spi(wl), &m);
kfree(cmd);
wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
kfree(cmd);
}
static void wl1271_spi_init(struct wl1271 *wl)

View file

@ -120,6 +120,9 @@ struct netfront_info {
unsigned long rx_pfn_array[NET_RX_RING_SIZE];
struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
/* Statistics */
int rx_gso_checksum_fixup;
};
struct netfront_rx_info {
@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
return cons;
}
static int skb_checksum_setup(struct sk_buff *skb)
static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
{
struct iphdr *iph;
unsigned char *th;
int err = -EPROTO;
int recalculate_partial_csum = 0;
/*
* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
* peers can fail to set NETRXF_csum_blank when sending a GSO
* frame. In this case force the SKB to CHECKSUM_PARTIAL and
* recalculate the partial checksum.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
struct netfront_info *np = netdev_priv(dev);
np->rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
recalculate_partial_csum = 1;
}
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
if (skb->protocol != htons(ETH_P_IP))
goto out;
@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
switch (iph->protocol) {
case IPPROTO_TCP:
skb->csum_offset = offsetof(struct tcphdr, check);
if (recalculate_partial_csum) {
struct tcphdr *tcph = (struct tcphdr *)th;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - iph->ihl*4,
IPPROTO_TCP, 0);
}
break;
case IPPROTO_UDP:
skb->csum_offset = offsetof(struct udphdr, check);
if (recalculate_partial_csum) {
struct udphdr *udph = (struct udphdr *)th;
udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - iph->ihl*4,
IPPROTO_UDP, 0);
}
break;
default:
if (net_ratelimit())
@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb_checksum_setup(skb)) {
kfree_skb(skb);
packets_dropped++;
dev->stats.rx_errors++;
continue;
}
if (checksum_setup(dev, skb)) {
kfree_skb(skb);
packets_dropped++;
dev->stats.rx_errors++;
continue;
}
dev->stats.rx_packets++;
@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
}
}
static const struct xennet_stat {
char name[ETH_GSTRING_LEN];
u16 offset;
} xennet_stats[] = {
{
"rx_gso_checksum_fixup",
offsetof(struct netfront_info, rx_gso_checksum_fixup)
},
};
static int xennet_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
case ETH_SS_STATS:
return ARRAY_SIZE(xennet_stats);
default:
return -EINVAL;
}
}
static void xennet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 * data)
{
void *np = netdev_priv(dev);
int i;
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
data[i] = *(int *)(np + xennet_stats[i].offset);
}
static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
memcpy(data + i * ETH_GSTRING_LEN,
xennet_stats[i].name, ETH_GSTRING_LEN);
break;
}
}
static const struct ethtool_ops xennet_ethtool_ops =
{
.set_tx_csum = ethtool_op_set_tx_csum,
.set_sg = xennet_set_sg,
.set_tso = xennet_set_tso,
.get_link = ethtool_op_get_link,
.get_sset_count = xennet_get_sset_count,
.get_ethtool_stats = xennet_get_ethtool_stats,
.get_strings = xennet_get_strings,
};
#ifdef CONFIG_SYSFS

View file

@ -565,7 +565,7 @@ static int netiucv_callback_connreq(struct iucv_path *path,
struct iucv_event ev;
int rc;
if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
if (memcmp(iucvMagic, ipuser, 16))
/* ipuser must match iucvMagic. */
return -EINVAL;
rc = -EINVAL;

View file

@ -988,16 +988,30 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
if (chp_dsc != NULL) {
/* CHPP field bit 6 == 1 -> single queue */
if ((chp_dsc->chpp & 0x02) == 0x02)
if ((chp_dsc->chpp & 0x02) == 0x02) {
if ((atomic_read(&card->qdio.state) !=
QETH_QDIO_UNINITIALIZED) &&
(card->qdio.no_out_queues == 4))
/* change from 4 to 1 outbound queues */
qeth_free_qdio_buffers(card);
card->qdio.no_out_queues = 1;
if (card->qdio.default_out_queue != 0)
dev_info(&card->gdev->dev,
"Priority Queueing not supported\n");
card->qdio.default_out_queue = 0;
} else {
if ((atomic_read(&card->qdio.state) !=
QETH_QDIO_UNINITIALIZED) &&
(card->qdio.no_out_queues == 1)) {
/* change from 1 to 4 outbound queues */
qeth_free_qdio_buffers(card);
card->qdio.default_out_queue = 2;
}
card->qdio.no_out_queues = 4;
}
card->info.func_level = 0x4100 + chp_dsc->desc;
kfree(chp_dsc);
}
if (card->qdio.no_out_queues == 1) {
card->qdio.default_out_queue = 0;
dev_info(&card->gdev->dev,
"Priority Queueing not supported\n");
}
QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
return;
@ -1832,33 +1846,6 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
}
}
static inline int qeth_get_max_mtu_for_card(int cardtype)
{
switch (cardtype) {
case QETH_CARD_TYPE_UNKNOWN:
case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_OSM:
case QETH_CARD_TYPE_OSX:
return 61440;
case QETH_CARD_TYPE_IQD:
return 57344;
default:
return 1500;
}
}
static inline int qeth_get_mtu_out_of_mpc(int cardtype)
{
switch (cardtype) {
case QETH_CARD_TYPE_IQD:
return 1;
default:
return 0;
}
}
static inline int qeth_get_mtu_outof_framesize(int framesize)
{
switch (framesize) {
@ -1881,10 +1868,9 @@ static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSM:
case QETH_CARD_TYPE_OSX:
return ((mtu >= 576) && (mtu <= 61440));
case QETH_CARD_TYPE_IQD:
return ((mtu >= 576) &&
(mtu <= card->info.max_mtu + 4096 - 32));
(mtu <= card->info.max_mtu));
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_UNKNOWN:
default:
@ -1907,7 +1893,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
memcpy(&card->token.ulp_filter_r,
QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
if (qeth_get_mtu_out_of_mpc(card->info.type)) {
if (card->info.type == QETH_CARD_TYPE_IQD) {
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
mtu = qeth_get_mtu_outof_framesize(framesize);
if (!mtu) {
@ -1915,12 +1901,21 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
card->info.max_mtu = mtu;
if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
/* frame size has changed */
if (card->dev &&
((card->dev->mtu == card->info.initial_mtu) ||
(card->dev->mtu > mtu)))
card->dev->mtu = mtu;
qeth_free_qdio_buffers(card);
}
card->info.initial_mtu = mtu;
card->info.max_mtu = mtu;
card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
} else {
card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
iob->data);
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
}
@ -3775,6 +3770,47 @@ static inline int qeth_get_qdio_q_format(struct qeth_card *card)
}
}
static void qeth_determine_capabilities(struct qeth_card *card)
{
int rc;
int length;
char *prcd;
struct ccw_device *ddev;
int ddev_offline = 0;
QETH_DBF_TEXT(SETUP, 2, "detcapab");
ddev = CARD_DDEV(card);
if (!ddev->online) {
ddev_offline = 1;
rc = ccw_device_set_online(ddev);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
goto out;
}
}
rc = qeth_read_conf_data(card, (void **) &prcd, &length);
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
dev_name(&card->gdev->dev), rc);
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_offline;
}
qeth_configure_unitaddr(card, prcd);
qeth_configure_blkt_default(card, prcd);
kfree(prcd);
rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
out_offline:
if (ddev_offline == 1)
ccw_device_set_offline(ddev);
out:
return;
}
static int qeth_qdio_establish(struct qeth_card *card)
{
struct qdio_initialize init_data;
@ -3905,6 +3941,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
atomic_set(&card->force_alloc_skb, 0);
qeth_get_channel_path_desc(card);
retry:
if (retries)
QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
@ -3933,6 +3970,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
else
goto retry;
}
qeth_determine_capabilities(card);
qeth_init_tokens(card);
qeth_init_func_level(card);
rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@ -4202,41 +4240,6 @@ void qeth_core_free_discipline(struct qeth_card *card)
card->discipline.ccwgdriver = NULL;
}
static void qeth_determine_capabilities(struct qeth_card *card)
{
int rc;
int length;
char *prcd;
QETH_DBF_TEXT(SETUP, 2, "detcapab");
rc = ccw_device_set_online(CARD_DDEV(card));
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
goto out;
}
rc = qeth_read_conf_data(card, (void **) &prcd, &length);
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
dev_name(&card->gdev->dev), rc);
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_offline;
}
qeth_configure_unitaddr(card, prcd);
qeth_configure_blkt_default(card, prcd);
kfree(prcd);
rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
out_offline:
ccw_device_set_offline(CARD_DDEV(card));
out:
return;
}
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;

View file

@ -573,13 +573,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
case IPA_RC_L2_DUP_LAYER3_MAC:
dev_warn(&card->gdev->dev,
"MAC address %pM already exists\n",
card->dev->dev_addr);
cmd->data.setdelmac.mac);
break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
dev_warn(&card->gdev->dev,
"MAC address %pM is not authorized\n",
card->dev->dev_addr);
cmd->data.setdelmac.mac);
break;
default:
break;

View file

@ -60,7 +60,7 @@ static struct iucv_handler smsg_handler = {
static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
u8 ipuser[16])
{
if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0)
if (strncmp(ipvmid, "*MSG ", 8) != 0)
return -EINVAL;
/* Path pending from *MSG. */
return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL);

View file

@ -128,8 +128,7 @@ static void handle_tx(struct vhost_net *net)
size_t hdr_size;
struct socket *sock;
/* TODO: check that we are running from vhost_worker?
* Not sure it's worth it, it's straight-forward enough. */
/* TODO: check that we are running from vhost_worker? */
sock = rcu_dereference_check(vq->private_data, 1);
if (!sock)
return;
@ -306,7 +305,8 @@ static void handle_rx_big(struct vhost_net *net)
size_t len, total_len = 0;
int err;
size_t hdr_size;
struct socket *sock = rcu_dereference(vq->private_data);
/* TODO: check that we are running from vhost_worker? */
struct socket *sock = rcu_dereference_check(vq->private_data, 1);
if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
return;
@ -415,7 +415,8 @@ static void handle_rx_mergeable(struct vhost_net *net)
int err, headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
struct socket *sock = rcu_dereference(vq->private_data);
/* TODO: check that we are running from vhost_worker? */
struct socket *sock = rcu_dereference_check(vq->private_data, 1);
if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
return;

View file

@ -173,9 +173,9 @@ static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
{
unsigned acked_features;
acked_features =
rcu_dereference_index_check(dev->acked_features,
lockdep_is_held(&dev->mutex));
/* TODO: check that we are running from vhost_worker or dev mutex is
* held? */
acked_features = rcu_dereference_index_check(dev->acked_features, 1);
return acked_features & (1 << bit);
}

View file

@ -1,5 +1,6 @@
header-y += byteorder/
header-y += can/
header-y += caif/
header-y += dvb/
header-y += hdlc/
header-y += isdn/

View file

@ -0,0 +1,2 @@
header-y += caif_socket.h
header-y += if_caif.h

View file

@ -150,6 +150,7 @@ static inline int ip_mroute_opt(int opt)
extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip_mr_init(void);
#else
static inline

View file

@ -136,6 +136,7 @@ extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int
extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
extern int ip6_mr_input(struct sk_buff *skb);
extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip6_mr_init(void);
extern void ip6_mr_cleanup(void);
#else

View file

@ -195,7 +195,8 @@ static inline int genlmsg_end(struct sk_buff *skb, void *hdr)
*/
static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
{
nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
if (hdr)
nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
}
/**

View file

@ -77,9 +77,6 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
if (e == NULL)
return;
if (!(e->ctmask & (1 << event)))
return;
set_bit(event, &e->cache);
}

View file

@ -753,6 +753,8 @@ struct proto {
int level,
int optname, char __user *optval,
int __user *option);
int (*compat_ioctl)(struct sock *sk,
unsigned int cmd, unsigned long arg);
#endif
int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len);

View file

@ -64,6 +64,7 @@ static void free_info(struct kref *ref)
spin_unlock_bh(&bat_priv->vis_list_lock);
kfree_skb(info->skb_packet);
kfree(info);
}
/* Compare two vis packets, used by the hashing algorithm */
@ -268,10 +269,10 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
buff_pos += sprintf(buff + buff_pos, "%pM,",
entry->addr);
for (i = 0; i < packet->entries; i++)
for (j = 0; j < packet->entries; j++)
buff_pos += vis_data_read_entry(
buff + buff_pos,
&entries[i],
&entries[j],
entry->addr,
entry->primary);
@ -444,7 +445,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
info);
if (hash_added < 0) {
/* did not work (for some reason) */
kref_put(&old_info->refcount, free_info);
kref_put(&info->refcount, free_info);
info = NULL;
}
@ -815,7 +816,7 @@ static void send_vis_packets(struct work_struct *work)
container_of(work, struct delayed_work, work);
struct bat_priv *bat_priv =
container_of(delayed_work, struct bat_priv, vis_work);
struct vis_info *info, *temp;
struct vis_info *info;
spin_lock_bh(&bat_priv->vis_hash_lock);
purge_vis_packets(bat_priv);
@ -825,8 +826,9 @@ static void send_vis_packets(struct work_struct *work)
send_list_add(bat_priv, bat_priv->my_vis_info);
}
list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
send_list) {
while (!list_empty(&bat_priv->vis_send_list)) {
info = list_first_entry(&bat_priv->vis_send_list,
typeof(*info), send_list);
kref_get(&info->refcount);
spin_unlock_bh(&bat_priv->vis_hash_lock);

View file

@ -328,12 +328,12 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (fdb) {
memcpy(fdb->addr.addr, addr, ETH_ALEN);
hlist_add_head_rcu(&fdb->hlist, head);
fdb->dst = source;
fdb->is_local = is_local;
fdb->is_static = is_local;
fdb->ageing_timer = jiffies;
hlist_add_head_rcu(&fdb->hlist, head);
}
return fdb;
}

View file

@ -2563,7 +2563,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
map = rcu_dereference(rxqueue->rps_map);
if (map) {
if (map->len == 1) {
if (map->len == 1 &&
!rcu_dereference_raw(rxqueue->rps_flow_table)) {
tcpu = map->cpus[0];
if (cpu_online(tcpu))
cpu = tcpu;
@ -3424,6 +3425,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
__skb_pull(skb, skb_headlen(skb));
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
skb->vlan_tci = 0;
skb->dev = napi->dev;
skb->skb_iif = 0;
napi->skb = skb;
}

View file

@ -1121,8 +1121,7 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
return -EOPNOTSUPP;
if (af_ops->validate_link_af) {
err = af_ops->validate_link_af(dev,
tb[IFLA_AF_SPEC]);
err = af_ops->validate_link_af(dev, af);
if (err < 0)
return err;
}
@ -1672,6 +1671,9 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
dest_net = rtnl_link_get_net(net, tb);
if (IS_ERR(dest_net))
return PTR_ERR(dest_net);
dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
if (IS_ERR(dev))

View file

@ -210,6 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
kmemcheck_annotate_variable(shinfo->destructor_arg);
if (fclone) {
struct sk_buff *child = skb + 1;

View file

@ -265,13 +265,13 @@ static void ec_tx_done(struct sk_buff *skb, int result)
static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
struct net_device *dev;
struct ec_addr addr;
int err;
unsigned char port, cb;
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
struct sock *sk = sock->sk;
struct sk_buff *skb;
struct ec_cb *eb;
#endif
@ -488,10 +488,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
error_free_buf:
vfree(userbuf);
error:
#else
err = -EPROTOTYPE;
#endif
error:
mutex_unlock(&econet_mutex);
return err;

View file

@ -880,6 +880,19 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
EXPORT_SYMBOL(inet_ioctl);
#ifdef CONFIG_COMPAT
int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
int err = -ENOIOCTLCMD;
if (sk->sk_prot->compat_ioctl)
err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
return err;
}
#endif
const struct proto_ops inet_stream_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
@ -903,6 +916,7 @@ const struct proto_ops inet_stream_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
.compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_stream_ops);
@ -929,6 +943,7 @@ const struct proto_ops inet_dgram_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
.compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_dgram_ops);
@ -959,6 +974,7 @@ static const struct proto_ops inet_sockraw_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
.compat_ioctl = inet_compat_ioctl,
#endif
};

View file

@ -60,6 +60,7 @@
#include <linux/notifier.h>
#include <linux/if_arp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
#include <net/ipip.h>
#include <net/checksum.h>
#include <net/netlink.h>
@ -1434,6 +1435,81 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
}
}
#ifdef CONFIG_COMPAT
struct compat_sioc_sg_req {
struct in_addr src;
struct in_addr grp;
compat_ulong_t pktcnt;
compat_ulong_t bytecnt;
compat_ulong_t wrong_if;
};
struct compat_sioc_vif_req {
vifi_t vifi; /* Which iface */
compat_ulong_t icount;
compat_ulong_t ocount;
compat_ulong_t ibytes;
compat_ulong_t obytes;
};
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
{
struct compat_sioc_sg_req sr;
struct compat_sioc_vif_req vr;
struct vif_device *vif;
struct mfc_cache *c;
struct net *net = sock_net(sk);
struct mr_table *mrt;
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
if (mrt == NULL)
return -ENOENT;
switch (cmd) {
case SIOCGETVIFCNT:
if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
if (vr.vifi >= mrt->maxvif)
return -EINVAL;
read_lock(&mrt_lock);
vif = &mrt->vif_table[vr.vifi];
if (VIF_EXISTS(mrt, vr.vifi)) {
vr.icount = vif->pkt_in;
vr.ocount = vif->pkt_out;
vr.ibytes = vif->bytes_in;
vr.obytes = vif->bytes_out;
read_unlock(&mrt_lock);
if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
read_unlock(&mrt_lock);
return -EADDRNOTAVAIL;
case SIOCGETSGCNT:
if (copy_from_user(&sr, arg, sizeof(sr)))
return -EFAULT;
rcu_read_lock();
c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
if (c) {
sr.pktcnt = c->mfc_un.res.pkt;
sr.bytecnt = c->mfc_un.res.bytes;
sr.wrong_if = c->mfc_un.res.wrong_if;
rcu_read_unlock();
if (copy_to_user(arg, &sr, sizeof(sr)))
return -EFAULT;
return 0;
}
rcu_read_unlock();
return -EADDRNOTAVAIL;
default:
return -ENOIOCTLCMD;
}
}
#endif
static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{

View file

@ -60,12 +60,12 @@ static int checkentry(const struct xt_tgchk_param *par)
if (mangle->flags & ~ARPT_MANGLE_MASK ||
!(mangle->flags & ARPT_MANGLE_MASK))
return false;
return -EINVAL;
if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT &&
mangle->target != XT_CONTINUE)
return false;
return true;
return -EINVAL;
return 0;
}
static struct xt_target arpt_mangle_reg __read_mostly = {

View file

@ -76,6 +76,7 @@
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
static struct raw_hashinfo raw_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
@ -838,6 +839,23 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
}
}
#ifdef CONFIG_COMPAT
static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
case SIOCINQ:
return -ENOIOCTLCMD;
default:
#ifdef CONFIG_IP_MROUTE
return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
#else
return -ENOIOCTLCMD;
#endif
}
}
#endif
struct proto raw_prot = {
.name = "RAW",
.owner = THIS_MODULE,
@ -860,6 +878,7 @@ struct proto raw_prot = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_raw_setsockopt,
.compat_getsockopt = compat_raw_getsockopt,
.compat_ioctl = compat_raw_ioctl,
#endif
};

View file

@ -2707,6 +2707,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
return NULL;
}
static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
{
return 0;
}
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
@ -2716,6 +2721,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
.protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
.check = ipv4_blackhole_dst_check,
.default_mtu = ipv4_blackhole_default_mtu,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
};

View file

@ -34,6 +34,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/compat.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/sock.h>
@ -1804,6 +1805,80 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
}
}
#ifdef CONFIG_COMPAT
struct compat_sioc_sg_req6 {
struct sockaddr_in6 src;
struct sockaddr_in6 grp;
compat_ulong_t pktcnt;
compat_ulong_t bytecnt;
compat_ulong_t wrong_if;
};
struct compat_sioc_mif_req6 {
mifi_t mifi;
compat_ulong_t icount;
compat_ulong_t ocount;
compat_ulong_t ibytes;
compat_ulong_t obytes;
};
int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
{
struct compat_sioc_sg_req6 sr;
struct compat_sioc_mif_req6 vr;
struct mif_device *vif;
struct mfc6_cache *c;
struct net *net = sock_net(sk);
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
if (mrt == NULL)
return -ENOENT;
switch (cmd) {
case SIOCGETMIFCNT_IN6:
if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
read_lock(&mrt_lock);
vif = &mrt->vif6_table[vr.mifi];
if (MIF_EXISTS(mrt, vr.mifi)) {
vr.icount = vif->pkt_in;
vr.ocount = vif->pkt_out;
vr.ibytes = vif->bytes_in;
vr.obytes = vif->bytes_out;
read_unlock(&mrt_lock);
if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
read_unlock(&mrt_lock);
return -EADDRNOTAVAIL;
case SIOCGETSGCNT_IN6:
if (copy_from_user(&sr, arg, sizeof(sr)))
return -EFAULT;
read_lock(&mrt_lock);
c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
if (c) {
sr.pktcnt = c->mfc_un.res.pkt;
sr.bytecnt = c->mfc_un.res.bytes;
sr.wrong_if = c->mfc_un.res.wrong_if;
read_unlock(&mrt_lock);
if (copy_to_user(arg, &sr, sizeof(sr)))
return -EFAULT;
return 0;
}
read_unlock(&mrt_lock);
return -EADDRNOTAVAIL;
default:
return -ENOIOCTLCMD;
}
}
#endif
static inline int ip6mr_forward2_finish(struct sk_buff *skb)
{

View file

@ -31,6 +31,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/skbuff.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
@ -1157,6 +1158,23 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
}
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
case SIOCINQ:
return -ENOIOCTLCMD;
default:
#ifdef CONFIG_IPV6_MROUTE
return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
#else
return -ENOIOCTLCMD;
#endif
}
}
#endif
static void rawv6_close(struct sock *sk, long timeout)
{
if (inet_sk(sk)->inet_num == IPPROTO_RAW)
@ -1215,6 +1233,7 @@ struct proto rawv6_prot = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_rawv6_setsockopt,
.compat_getsockopt = compat_rawv6_getsockopt,
.compat_ioctl = compat_rawv6_ioctl,
#endif
};

View file

@ -113,6 +113,11 @@ static struct dst_ops ip6_dst_ops_template = {
.local_out = __ip6_local_out,
};
static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
{
return 0;
}
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
@ -122,6 +127,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.protocol = cpu_to_be16(ETH_P_IPV6),
.destroy = ip6_dst_destroy,
.check = ip6_dst_check,
.default_mtu = ip6_blackhole_default_mtu,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
};
@ -194,7 +200,6 @@ static void ip6_dst_destroy(struct dst_entry *dst)
in6_dev_put(idev);
}
if (peer) {
BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
rt->rt6i_peer = NULL;
inet_putpeer(peer);
}
@ -204,9 +209,6 @@ void rt6_bind_peer(struct rt6_info *rt, int create)
{
struct inet_peer *peer;
if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
return;
peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
inet_putpeer(peer);

View file

@ -15,6 +15,8 @@
#include <net/addrconf.h>
#include <net/inet_frag.h>
static struct ctl_table empty[1];
static ctl_table ipv6_table_template[] = {
{
.procname = "route",
@ -35,6 +37,12 @@ static ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "neigh",
.maxlen = 0,
.mode = 0555,
.child = empty,
},
{ }
};
@ -152,7 +160,6 @@ static struct ctl_table_header *ip6_base;
int ipv6_static_sysctl_register(void)
{
static struct ctl_table empty[1];
ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
if (ip6_base == NULL)
return -ENOMEM;

View file

@ -63,6 +63,9 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
* this does not harm and it happens very rarely. */
unsigned long missed = e->missed;
if (!((events | missed) & e->ctmask))
goto out_unlock;
ret = notify->fcn(events | missed, &item);
if (unlikely(ret < 0 || missed)) {
spin_lock_bh(&ct->lock);

View file

@ -667,6 +667,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
IPCTNL_MSG_CT_NEW, ct) < 0) {
nf_conntrack_get(&ct->ct_general);
cb->args[1] = (unsigned long)ct;
goto out;
}

View file

@ -53,15 +53,13 @@ iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
}
static inline int
iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b)
iprange_ipv6_lt(const struct in6_addr *a, const struct in6_addr *b)
{
unsigned int i;
int r;
for (i = 0; i < 4; ++i) {
r = ntohl(a->s6_addr32[i]) - ntohl(b->s6_addr32[i]);
if (r != 0)
return r;
if (a->s6_addr32[i] != b->s6_addr32[i])
return ntohl(a->s6_addr32[i]) < ntohl(b->s6_addr32[i]);
}
return 0;
@ -75,15 +73,15 @@ iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
bool m;
if (info->flags & IPRANGE_SRC) {
m = iprange_ipv6_sub(&iph->saddr, &info->src_min.in6) < 0;
m |= iprange_ipv6_sub(&iph->saddr, &info->src_max.in6) > 0;
m = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
m ^= !!(info->flags & IPRANGE_SRC_INV);
if (m)
return false;
}
if (info->flags & IPRANGE_DST) {
m = iprange_ipv6_sub(&iph->daddr, &info->dst_min.in6) < 0;
m |= iprange_ipv6_sub(&iph->daddr, &info->dst_max.in6) > 0;
m = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
m ^= !!(info->flags & IPRANGE_DST_INV);
if (m)
return false;