Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (82 commits) [NET]: Make sure sockets implement splice_read netconsole: avoid null pointer dereference at show_local_mac() [IPV6]: Fix reversed local_df test in ip6_fragment [XFRM]: Avoid bogus BUG() when throwing new policy away. [AF_KEY]: Fix bug in spdadd [NETFILTER] nf_conntrack_proto_tcp.c: Mistyped state corrected. net: xfrm statistics depend on INET [NETFILTER]: make secmark_tg_destroy() static [INET]: Unexport inet_listen_wlock [INET]: Unexport __inet_hash_connect [NET]: Improve cache line coherency of ingress qdisc [NET]: Fix race in dev_close(). (Bug 9750) [IPSEC]: Fix bogus usage of u64 on input sequence number [RTNETLINK]: Send a single notification on device state changes. [NETLABLE]: Hide netlbl_unlabel_audit_addr6 under ifdef CONFIG_IPV6. [NETLABEL]: Don't produce unused variables when IPv6 is off. [NETLABEL]: Compilation for CONFIG_AUDIT=n case. [GENETLINK]: Relax dances with genl_lock. [NETLABEL]: Fix lookup logic of netlbl_domhsh_search_def. [IPV6]: remove unused method declaration (net/ndisc.h). ...
This commit is contained in:
commit
f6866fecd6
60 changed files with 5664 additions and 1954 deletions
|
@ -208,6 +208,7 @@ static int hci_uart_close(struct hci_dev *hdev)
|
|||
return 0;
|
||||
|
||||
hci_uart_flush(hdev);
|
||||
hdev->flush = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ static int debug = -1;
|
|||
* Warning: 64K ring has hardware issues and may lock up.
|
||||
*/
|
||||
#if defined(CONFIG_SH_DREAMCAST)
|
||||
#define RX_BUF_IDX 1 /* 16K ring */
|
||||
#define RX_BUF_IDX 0 /* 8K ring */
|
||||
#else
|
||||
#define RX_BUF_IDX 2 /* 32K ring */
|
||||
#endif
|
||||
|
|
|
@ -931,6 +931,14 @@ config ENC28J60_WRITEVERIFY
|
|||
Enable the verify after the buffer write useful for debugging purpose.
|
||||
If unsure, say N.
|
||||
|
||||
config DM9000_DEBUGLEVEL
|
||||
int "DM9000 maximum debug level"
|
||||
depends on DM9000
|
||||
default 4
|
||||
help
|
||||
The maximum level of debugging code compiled into the DM9000
|
||||
driver.
|
||||
|
||||
config SMC911X
|
||||
tristate "SMSC LAN911[5678] support"
|
||||
select CRC32
|
||||
|
@ -2352,6 +2360,16 @@ config GELIC_NET
|
|||
To compile this driver as a module, choose M here: the
|
||||
module will be called ps3_gelic.
|
||||
|
||||
config GELIC_WIRELESS
|
||||
bool "PS3 Wireless support"
|
||||
depends on GELIC_NET
|
||||
help
|
||||
This option adds the support for the wireless feature of PS3.
|
||||
If you have the wireless-less model of PS3 or have no plan to
|
||||
use wireless feature, disabling this option saves memory. As
|
||||
the driver automatically distinguishes the models, you can
|
||||
safely enable this option even if you have a wireless-less model.
|
||||
|
||||
config GIANFAR
|
||||
tristate "Gianfar Ethernet"
|
||||
depends on FSL_SOC
|
||||
|
|
|
@ -70,7 +70,8 @@ obj-$(CONFIG_BNX2X) += bnx2x.o
|
|||
spidernet-y += spider_net.o spider_net_ethtool.o
|
||||
obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
|
||||
obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
|
||||
ps3_gelic-objs += ps3_gelic_net.o
|
||||
gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
|
||||
ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
|
||||
obj-$(CONFIG_TC35815) += tc35815.o
|
||||
obj-$(CONFIG_SKGE) += skge.o
|
||||
obj-$(CONFIG_SKY2) += sky2.o
|
||||
|
|
|
@ -404,7 +404,7 @@ void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
|
|||
if (neigh->nud_state & NUD_FAILED) {
|
||||
arpq = e->arpq_head;
|
||||
e->arpq_head = e->arpq_tail = NULL;
|
||||
} else if (neigh_is_connected(neigh))
|
||||
} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
|
||||
setup_l2e_send_pending(dev, NULL, e);
|
||||
} else {
|
||||
e->state = neigh_is_connected(neigh) ?
|
||||
|
|
|
@ -1059,6 +1059,14 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
|
|||
htonl(V_WR_TID(q->token)));
|
||||
}
|
||||
|
||||
static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
|
||||
struct sge_txq *q)
|
||||
{
|
||||
netif_stop_queue(dev);
|
||||
set_bit(TXQ_ETH, &qs->txq_stopped);
|
||||
q->stops++;
|
||||
}
|
||||
|
||||
/**
|
||||
* eth_xmit - add a packet to the Ethernet Tx queue
|
||||
* @skb: the packet
|
||||
|
@ -1090,31 +1098,18 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
ndesc = calc_tx_descs(skb);
|
||||
|
||||
if (unlikely(credits < ndesc)) {
|
||||
if (!netif_queue_stopped(dev)) {
|
||||
netif_stop_queue(dev);
|
||||
set_bit(TXQ_ETH, &qs->txq_stopped);
|
||||
q->stops++;
|
||||
dev_err(&adap->pdev->dev,
|
||||
"%s: Tx ring %u full while queue awake!\n",
|
||||
dev->name, q->cntxt_id & 7);
|
||||
}
|
||||
t3_stop_queue(dev, qs, q);
|
||||
dev_err(&adap->pdev->dev,
|
||||
"%s: Tx ring %u full while queue awake!\n",
|
||||
dev->name, q->cntxt_id & 7);
|
||||
spin_unlock(&q->lock);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
q->in_use += ndesc;
|
||||
if (unlikely(credits - ndesc < q->stop_thres)) {
|
||||
q->stops++;
|
||||
netif_stop_queue(dev);
|
||||
set_bit(TXQ_ETH, &qs->txq_stopped);
|
||||
#if !USE_GTS
|
||||
if (should_restart_tx(q) &&
|
||||
test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
|
||||
q->restarts++;
|
||||
netif_wake_queue(dev);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
if (unlikely(credits - ndesc < q->stop_thres))
|
||||
if (USE_GTS || !should_restart_tx(q))
|
||||
t3_stop_queue(dev, qs, q);
|
||||
|
||||
gen = q->gen;
|
||||
q->unacked += ndesc;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -926,8 +926,6 @@ e1000_probe(struct pci_dev *pdev,
|
|||
{
|
||||
struct net_device *netdev;
|
||||
struct e1000_adapter *adapter;
|
||||
unsigned long mmio_start, mmio_len;
|
||||
unsigned long flash_start, flash_len;
|
||||
|
||||
static int cards_found = 0;
|
||||
static int global_quad_port_a = 0; /* global ksp3 port a indication */
|
||||
|
@ -970,11 +968,9 @@ e1000_probe(struct pci_dev *pdev,
|
|||
adapter->hw.back = adapter;
|
||||
adapter->msg_enable = (1 << debug) - 1;
|
||||
|
||||
mmio_start = pci_resource_start(pdev, BAR_0);
|
||||
mmio_len = pci_resource_len(pdev, BAR_0);
|
||||
|
||||
err = -EIO;
|
||||
adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
|
||||
adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
|
||||
pci_resource_len(pdev, BAR_0));
|
||||
if (!adapter->hw.hw_addr)
|
||||
goto err_ioremap;
|
||||
|
||||
|
@ -1009,10 +1005,6 @@ e1000_probe(struct pci_dev *pdev,
|
|||
#endif
|
||||
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
|
||||
|
||||
netdev->mem_start = mmio_start;
|
||||
netdev->mem_end = mmio_start + mmio_len;
|
||||
netdev->base_addr = adapter->hw.io_base;
|
||||
|
||||
adapter->bd_number = cards_found;
|
||||
|
||||
/* setup the private structure */
|
||||
|
@ -1025,9 +1017,9 @@ e1000_probe(struct pci_dev *pdev,
|
|||
* because it depends on mac_type */
|
||||
if ((adapter->hw.mac_type == e1000_ich8lan) &&
|
||||
(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
|
||||
flash_start = pci_resource_start(pdev, 1);
|
||||
flash_len = pci_resource_len(pdev, 1);
|
||||
adapter->hw.flash_address = ioremap(flash_start, flash_len);
|
||||
adapter->hw.flash_address =
|
||||
ioremap(pci_resource_start(pdev, 1),
|
||||
pci_resource_len(pdev, 1));
|
||||
if (!adapter->hw.flash_address)
|
||||
goto err_flashmap;
|
||||
}
|
||||
|
|
|
@ -166,21 +166,24 @@
|
|||
* Hardware access:
|
||||
*/
|
||||
|
||||
#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
|
||||
#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
|
||||
#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
|
||||
#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
|
||||
#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
|
||||
#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
|
||||
#define DEV_HAS_MSI 0x0040 /* device supports MSI */
|
||||
#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
|
||||
#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
|
||||
#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
|
||||
#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
|
||||
#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
|
||||
#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
|
||||
#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
|
||||
#define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */
|
||||
#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */
|
||||
#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */
|
||||
#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */
|
||||
#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */
|
||||
#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */
|
||||
#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */
|
||||
#define DEV_HAS_MSI 0x00040 /* device supports MSI */
|
||||
#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */
|
||||
#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */
|
||||
#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */
|
||||
#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */
|
||||
#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */
|
||||
#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */
|
||||
#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */
|
||||
#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */
|
||||
#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */
|
||||
#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */
|
||||
#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */
|
||||
|
||||
enum {
|
||||
NvRegIrqStatus = 0x000,
|
||||
|
@ -266,9 +269,12 @@ enum {
|
|||
#define NVREG_RNDSEED_FORCE3 0x7400
|
||||
|
||||
NvRegTxDeferral = 0xA0,
|
||||
#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
|
||||
#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
|
||||
#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
|
||||
#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
|
||||
#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
|
||||
#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
|
||||
#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
|
||||
#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
|
||||
#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
|
||||
NvRegRxDeferral = 0xA4,
|
||||
#define NVREG_RX_DEFERRAL_DEFAULT 0x16
|
||||
NvRegMacAddrA = 0xA8,
|
||||
|
@ -318,8 +324,10 @@ enum {
|
|||
NvRegTxRingPhysAddrHigh = 0x148,
|
||||
NvRegRxRingPhysAddrHigh = 0x14C,
|
||||
NvRegTxPauseFrame = 0x170,
|
||||
#define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080
|
||||
#define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010
|
||||
#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
|
||||
#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
|
||||
#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
|
||||
#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
|
||||
NvRegMIIStatus = 0x180,
|
||||
#define NVREG_MIISTAT_ERROR 0x0001
|
||||
#define NVREG_MIISTAT_LINKCHANGE 0x0008
|
||||
|
@ -2751,7 +2759,12 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
|
|||
if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
|
||||
u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
|
||||
if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
|
||||
writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
|
||||
u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
|
||||
if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
|
||||
pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
|
||||
if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)
|
||||
pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
|
||||
writel(pause_enable, base + NvRegTxPauseFrame);
|
||||
writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
|
||||
np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
|
||||
} else {
|
||||
|
@ -2785,6 +2798,7 @@ static int nv_update_linkspeed(struct net_device *dev)
|
|||
int retval = 0;
|
||||
u32 control_1000, status_1000, phyreg, pause_flags, txreg;
|
||||
u32 txrxFlags = 0;
|
||||
u32 phy_exp;
|
||||
|
||||
/* BMSR_LSTATUS is latched, read it twice:
|
||||
* we want the current value.
|
||||
|
@ -2912,13 +2926,25 @@ static int nv_update_linkspeed(struct net_device *dev)
|
|||
phyreg |= PHY_1000;
|
||||
writel(phyreg, base + NvRegPhyInterface);
|
||||
|
||||
phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
|
||||
if (phyreg & PHY_RGMII) {
|
||||
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
|
||||
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
|
||||
txreg = NVREG_TX_DEFERRAL_RGMII_1000;
|
||||
else
|
||||
txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
|
||||
} else {
|
||||
if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
|
||||
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
|
||||
txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
|
||||
else
|
||||
txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
|
||||
} else {
|
||||
txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
txreg = NVREG_TX_DEFERRAL_DEFAULT;
|
||||
if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
|
||||
txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
|
||||
else
|
||||
txreg = NVREG_TX_DEFERRAL_DEFAULT;
|
||||
}
|
||||
writel(txreg, base + NvRegTxDeferral);
|
||||
|
||||
|
@ -5155,7 +5181,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
|
|||
}
|
||||
|
||||
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
|
||||
if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
|
||||
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
|
||||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
|
||||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
|
||||
np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
|
||||
}
|
||||
|
||||
|
@ -5559,107 +5587,107 @@ static struct pci_device_id pci_tbl[] = {
|
|||
},
|
||||
{ /* MCP55 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
},
|
||||
{ /* MCP55 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
},
|
||||
{ /* MCP61 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP61 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP61 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP61 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP67 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP67 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP67 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP67 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
},
|
||||
{ /* MCP73 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP73 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP73 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP73 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP77 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP77 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP77 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP77 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP79 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP79 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP79 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{ /* MCP79 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
|
||||
},
|
||||
{0,},
|
||||
};
|
||||
|
|
|
@ -309,8 +309,8 @@ static ssize_t show_local_mac(struct netconsole_target *nt, char *buf)
|
|||
struct net_device *dev = nt->np.dev;
|
||||
|
||||
DECLARE_MAC_BUF(mac);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
print_mac(mac, dev->dev_addr));
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", dev ?
|
||||
print_mac(mac, dev->dev_addr) : "ff:ff:ff:ff:ff:ff");
|
||||
}
|
||||
|
||||
static ssize_t show_remote_mac(struct netconsole_target *nt, char *buf)
|
||||
|
|
1140
drivers/net/ni52.c
1140
drivers/net/ni52.c
File diff suppressed because it is too large
Load diff
|
@ -36,12 +36,12 @@
|
|||
|
||||
struct scp_struct
|
||||
{
|
||||
unsigned short zero_dum0; /* has to be zero */
|
||||
unsigned char sysbus; /* 0=16Bit,1=8Bit */
|
||||
unsigned char zero_dum1; /* has to be zero for 586 */
|
||||
unsigned short zero_dum2;
|
||||
unsigned short zero_dum3;
|
||||
char *iscp; /* pointer to the iscp-block */
|
||||
u16 zero_dum0; /* has to be zero */
|
||||
u8 sysbus; /* 0=16Bit,1=8Bit */
|
||||
u8 zero_dum1; /* has to be zero for 586 */
|
||||
u8 zero_dum2;
|
||||
u8 zero_dum3;
|
||||
u32 iscp; /* pointer to the iscp-block */
|
||||
};
|
||||
|
||||
|
||||
|
@ -50,10 +50,10 @@ struct scp_struct
|
|||
*/
|
||||
struct iscp_struct
|
||||
{
|
||||
unsigned char busy; /* 586 clears after successful init */
|
||||
unsigned char zero_dummy; /* has to be zero */
|
||||
unsigned short scb_offset; /* pointeroffset to the scb_base */
|
||||
char *scb_base; /* base-address of all 16-bit offsets */
|
||||
u8 busy; /* 586 clears after successful init */
|
||||
u8 zero_dummy; /* has to be zero */
|
||||
u16 scb_offset; /* pointeroffset to the scb_base */
|
||||
u32 scb_base; /* base-address of all 16-bit offsets */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -61,16 +61,16 @@ struct iscp_struct
|
|||
*/
|
||||
struct scb_struct
|
||||
{
|
||||
unsigned char rus;
|
||||
unsigned char cus;
|
||||
unsigned char cmd_ruc; /* command word: RU part */
|
||||
unsigned char cmd_cuc; /* command word: CU part & ACK */
|
||||
unsigned short cbl_offset; /* pointeroffset, command block list */
|
||||
unsigned short rfa_offset; /* pointeroffset, receive frame area */
|
||||
unsigned short crc_errs; /* CRC-Error counter */
|
||||
unsigned short aln_errs; /* alignmenterror counter */
|
||||
unsigned short rsc_errs; /* Resourceerror counter */
|
||||
unsigned short ovrn_errs; /* OVerrunerror counter */
|
||||
u8 rus;
|
||||
u8 cus;
|
||||
u8 cmd_ruc; /* command word: RU part */
|
||||
u8 cmd_cuc; /* command word: CU part & ACK */
|
||||
u16 cbl_offset; /* pointeroffset, command block list */
|
||||
u16 rfa_offset; /* pointeroffset, receive frame area */
|
||||
u16 crc_errs; /* CRC-Error counter */
|
||||
u16 aln_errs; /* alignmenterror counter */
|
||||
u16 rsc_errs; /* Resourceerror counter */
|
||||
u16 ovrn_errs; /* OVerrunerror counter */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -119,16 +119,16 @@ struct scb_struct
|
|||
*/
|
||||
struct rfd_struct
|
||||
{
|
||||
unsigned char stat_low; /* status word */
|
||||
unsigned char stat_high; /* status word */
|
||||
unsigned char rfd_sf; /* 82596 mode only */
|
||||
unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
|
||||
unsigned short next; /* linkoffset to next RFD */
|
||||
unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
|
||||
unsigned char dest[6]; /* ethernet-address, destination */
|
||||
unsigned char source[6]; /* ethernet-address, source */
|
||||
unsigned short length; /* 802.3 frame-length */
|
||||
unsigned short zero_dummy; /* dummy */
|
||||
u8 stat_low; /* status word */
|
||||
u8 stat_high; /* status word */
|
||||
u8 rfd_sf; /* 82596 mode only */
|
||||
u8 last; /* Bit15,Last Frame on List / Bit14,suspend */
|
||||
u16 next; /* linkoffset to next RFD */
|
||||
u16 rbd_offset; /* pointeroffset to RBD-buffer */
|
||||
u8 dest[6]; /* ethernet-address, destination */
|
||||
u8 source[6]; /* ethernet-address, source */
|
||||
u16 length; /* 802.3 frame-length */
|
||||
u16 zero_dummy; /* dummy */
|
||||
};
|
||||
|
||||
#define RFD_LAST 0x80 /* last: last rfd in the list */
|
||||
|
@ -153,11 +153,11 @@ struct rfd_struct
|
|||
*/
|
||||
struct rbd_struct
|
||||
{
|
||||
unsigned short status; /* status word,number of used bytes in buff */
|
||||
unsigned short next; /* pointeroffset to next RBD */
|
||||
char *buffer; /* receive buffer address pointer */
|
||||
unsigned short size; /* size of this buffer */
|
||||
unsigned short zero_dummy; /* dummy */
|
||||
u16 status; /* status word,number of used bytes in buff */
|
||||
u16 next; /* pointeroffset to next RBD */
|
||||
u32 buffer; /* receive buffer address pointer */
|
||||
u16 size; /* size of this buffer */
|
||||
u16 zero_dummy; /* dummy */
|
||||
};
|
||||
|
||||
#define RBD_LAST 0x8000 /* last buffer */
|
||||
|
@ -195,9 +195,9 @@ struct rbd_struct
|
|||
*/
|
||||
struct nop_cmd_struct
|
||||
{
|
||||
unsigned short cmd_status; /* status of this command */
|
||||
unsigned short cmd_cmd; /* the command itself (+bits) */
|
||||
unsigned short cmd_link; /* offsetpointer to next command */
|
||||
u16 cmd_status; /* status of this command */
|
||||
u16 cmd_cmd; /* the command itself (+bits) */
|
||||
u16 cmd_link; /* offsetpointer to next command */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -205,10 +205,10 @@ struct nop_cmd_struct
|
|||
*/
|
||||
struct iasetup_cmd_struct
|
||||
{
|
||||
unsigned short cmd_status;
|
||||
unsigned short cmd_cmd;
|
||||
unsigned short cmd_link;
|
||||
unsigned char iaddr[6];
|
||||
u16 cmd_status;
|
||||
u16 cmd_cmd;
|
||||
u16 cmd_link;
|
||||
u8 iaddr[6];
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -216,21 +216,21 @@ struct iasetup_cmd_struct
|
|||
*/
|
||||
struct configure_cmd_struct
|
||||
{
|
||||
unsigned short cmd_status;
|
||||
unsigned short cmd_cmd;
|
||||
unsigned short cmd_link;
|
||||
unsigned char byte_cnt; /* size of the config-cmd */
|
||||
unsigned char fifo; /* fifo/recv monitor */
|
||||
unsigned char sav_bf; /* save bad frames (bit7=1)*/
|
||||
unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
|
||||
unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
|
||||
unsigned char ifs; /* inter frame spacing */
|
||||
unsigned char time_low; /* slot time low */
|
||||
unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
|
||||
unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
|
||||
unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
|
||||
unsigned char fram_len; /* minimal frame len */
|
||||
unsigned char dummy; /* dummy */
|
||||
u16 cmd_status;
|
||||
u16 cmd_cmd;
|
||||
u16 cmd_link;
|
||||
u8 byte_cnt; /* size of the config-cmd */
|
||||
u8 fifo; /* fifo/recv monitor */
|
||||
u8 sav_bf; /* save bad frames (bit7=1)*/
|
||||
u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
|
||||
u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
|
||||
u8 ifs; /* inter frame spacing */
|
||||
u8 time_low; /* slot time low */
|
||||
u8 time_high; /* slot time high(0-2) and max. retries(4-7) */
|
||||
u8 promisc; /* promisc-mode(0) , et al (1-7) */
|
||||
u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */
|
||||
u8 fram_len; /* minimal frame len */
|
||||
u8 dummy; /* dummy */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -238,11 +238,11 @@ struct configure_cmd_struct
|
|||
*/
|
||||
struct mcsetup_cmd_struct
|
||||
{
|
||||
unsigned short cmd_status;
|
||||
unsigned short cmd_cmd;
|
||||
unsigned short cmd_link;
|
||||
unsigned short mc_cnt; /* number of bytes in the MC-List */
|
||||
unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
|
||||
u16 cmd_status;
|
||||
u16 cmd_cmd;
|
||||
u16 cmd_link;
|
||||
u16 mc_cnt; /* number of bytes in the MC-List */
|
||||
u8 mc_list[0][6]; /* pointer to 6 bytes entries */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -250,10 +250,10 @@ struct mcsetup_cmd_struct
|
|||
*/
|
||||
struct dump_cmd_struct
|
||||
{
|
||||
unsigned short cmd_status;
|
||||
unsigned short cmd_cmd;
|
||||
unsigned short cmd_link;
|
||||
unsigned short dump_offset; /* pointeroffset to DUMP space */
|
||||
u16 cmd_status;
|
||||
u16 cmd_cmd;
|
||||
u16 cmd_link;
|
||||
u16 dump_offset; /* pointeroffset to DUMP space */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -261,12 +261,12 @@ struct dump_cmd_struct
|
|||
*/
|
||||
struct transmit_cmd_struct
|
||||
{
|
||||
unsigned short cmd_status;
|
||||
unsigned short cmd_cmd;
|
||||
unsigned short cmd_link;
|
||||
unsigned short tbd_offset; /* pointeroffset to TBD */
|
||||
unsigned char dest[6]; /* destination address of the frame */
|
||||
unsigned short length; /* user defined: 802.3 length / Ether type */
|
||||
u16 cmd_status;
|
||||
u16 cmd_cmd;
|
||||
u16 cmd_link;
|
||||
u16 tbd_offset; /* pointeroffset to TBD */
|
||||
u8 dest[6]; /* destination address of the frame */
|
||||
u16 length; /* user defined: 802.3 length / Ether type */
|
||||
};
|
||||
|
||||
#define TCMD_ERRMASK 0x0fa0
|
||||
|
@ -281,10 +281,10 @@ struct transmit_cmd_struct
|
|||
|
||||
struct tdr_cmd_struct
|
||||
{
|
||||
unsigned short cmd_status;
|
||||
unsigned short cmd_cmd;
|
||||
unsigned short cmd_link;
|
||||
unsigned short status;
|
||||
u16 cmd_status;
|
||||
u16 cmd_cmd;
|
||||
u16 cmd_link;
|
||||
u16 status;
|
||||
};
|
||||
|
||||
#define TDR_LNK_OK 0x8000 /* No link problem identified */
|
||||
|
@ -298,9 +298,9 @@ struct tdr_cmd_struct
|
|||
*/
|
||||
struct tbd_struct
|
||||
{
|
||||
unsigned short size; /* size + EOF-Flag(15) */
|
||||
unsigned short next; /* pointeroffset to next TBD */
|
||||
char *buffer; /* pointer to buffer */
|
||||
u16 size; /* size + EOF-Flag(15) */
|
||||
u16 next; /* pointeroffset to next TBD */
|
||||
u32 buffer; /* pointer to buffer */
|
||||
};
|
||||
|
||||
#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
|
||||
|
|
|
@ -174,7 +174,11 @@ static int homepna[MAX_UNITS];
|
|||
#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
|
||||
#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
|
||||
|
||||
#define PKT_BUF_SZ 1544
|
||||
#define PKT_BUF_SKB 1544
|
||||
/* actual buffer length after being aligned */
|
||||
#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
|
||||
/* chip wants twos complement of the (aligned) buffer length */
|
||||
#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
|
||||
|
||||
/* Offsets from base I/O address. */
|
||||
#define PCNET32_WIO_RDP 0x10
|
||||
|
@ -604,7 +608,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
|
|||
/* now allocate any new buffers needed */
|
||||
for (; new < size; new++ ) {
|
||||
struct sk_buff *rx_skbuff;
|
||||
new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ);
|
||||
new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
|
||||
if (!(rx_skbuff = new_skb_list[new])) {
|
||||
/* keep the original lists and buffers */
|
||||
if (netif_msg_drv(lp))
|
||||
|
@ -613,20 +617,20 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
|
|||
dev->name);
|
||||
goto free_all_new;
|
||||
}
|
||||
skb_reserve(rx_skbuff, 2);
|
||||
skb_reserve(rx_skbuff, NET_IP_ALIGN);
|
||||
|
||||
new_dma_addr_list[new] =
|
||||
pci_map_single(lp->pci_dev, rx_skbuff->data,
|
||||
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
|
||||
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||
new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
|
||||
new_rx_ring[new].buf_length = cpu_to_le16(2 - PKT_BUF_SZ);
|
||||
new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
|
||||
new_rx_ring[new].status = cpu_to_le16(0x8000);
|
||||
}
|
||||
/* and free any unneeded buffers */
|
||||
for (; new < lp->rx_ring_size; new++) {
|
||||
if (lp->rx_skbuff[new]) {
|
||||
pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
|
||||
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
|
||||
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb(lp->rx_skbuff[new]);
|
||||
}
|
||||
}
|
||||
|
@ -651,7 +655,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
|
|||
for (; --new >= lp->rx_ring_size; ) {
|
||||
if (new_skb_list[new]) {
|
||||
pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
|
||||
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
|
||||
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb(new_skb_list[new]);
|
||||
}
|
||||
}
|
||||
|
@ -678,7 +682,7 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
|
|||
wmb(); /* Make sure adapter sees owner change */
|
||||
if (lp->rx_skbuff[i]) {
|
||||
pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
|
||||
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
|
||||
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb_any(lp->rx_skbuff[i]);
|
||||
}
|
||||
lp->rx_skbuff[i] = NULL;
|
||||
|
@ -1201,7 +1205,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
|
|||
pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
|
||||
|
||||
/* Discard oversize frames. */
|
||||
if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
|
||||
if (unlikely(pkt_len > PKT_BUF_SIZE)) {
|
||||
if (netif_msg_drv(lp))
|
||||
printk(KERN_ERR "%s: Impossible packet size %d!\n",
|
||||
dev->name, pkt_len);
|
||||
|
@ -1218,26 +1222,26 @@ static void pcnet32_rx_entry(struct net_device *dev,
|
|||
if (pkt_len > rx_copybreak) {
|
||||
struct sk_buff *newskb;
|
||||
|
||||
if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) {
|
||||
skb_reserve(newskb, 2);
|
||||
if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) {
|
||||
skb_reserve(newskb, NET_IP_ALIGN);
|
||||
skb = lp->rx_skbuff[entry];
|
||||
pci_unmap_single(lp->pci_dev,
|
||||
lp->rx_dma_addr[entry],
|
||||
PKT_BUF_SZ - 2,
|
||||
PKT_BUF_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
skb_put(skb, pkt_len);
|
||||
lp->rx_skbuff[entry] = newskb;
|
||||
lp->rx_dma_addr[entry] =
|
||||
pci_map_single(lp->pci_dev,
|
||||
newskb->data,
|
||||
PKT_BUF_SZ - 2,
|
||||
PKT_BUF_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
|
||||
rx_in_place = 1;
|
||||
} else
|
||||
skb = NULL;
|
||||
} else {
|
||||
skb = dev_alloc_skb(pkt_len + 2);
|
||||
skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
|
||||
}
|
||||
|
||||
if (skb == NULL) {
|
||||
|
@ -1250,7 +1254,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
|
|||
}
|
||||
skb->dev = dev;
|
||||
if (!rx_in_place) {
|
||||
skb_reserve(skb, 2); /* 16 byte align */
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
skb_put(skb, pkt_len); /* Make room */
|
||||
pci_dma_sync_single_for_cpu(lp->pci_dev,
|
||||
lp->rx_dma_addr[entry],
|
||||
|
@ -1291,7 +1295,7 @@ static int pcnet32_rx(struct net_device *dev, int budget)
|
|||
* The docs say that the buffer length isn't touched, but Andrew
|
||||
* Boyd of QNX reports that some revs of the 79C965 clear it.
|
||||
*/
|
||||
rxp->buf_length = cpu_to_le16(2 - PKT_BUF_SZ);
|
||||
rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
|
||||
wmb(); /* Make sure owner changes after others are visible */
|
||||
rxp->status = cpu_to_le16(0x8000);
|
||||
entry = (++lp->cur_rx) & lp->rx_mod_mask;
|
||||
|
@ -1774,8 +1778,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
|
|||
memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
|
||||
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE) {
|
||||
for (i = 0; i < 6; i++)
|
||||
printk(" %2.2x", dev->dev_addr[i]);
|
||||
DECLARE_MAC_BUF(mac);
|
||||
printk(" %s", print_mac(mac, dev->dev_addr));
|
||||
|
||||
/* Version 0x2623 and 0x2624 */
|
||||
if (((chip_version + 1) & 0xfffe) == 0x2624) {
|
||||
|
@ -2396,7 +2400,7 @@ static int pcnet32_init_ring(struct net_device *dev)
|
|||
if (rx_skbuff == NULL) {
|
||||
if (!
|
||||
(rx_skbuff = lp->rx_skbuff[i] =
|
||||
dev_alloc_skb(PKT_BUF_SZ))) {
|
||||
dev_alloc_skb(PKT_BUF_SKB))) {
|
||||
/* there is not much, we can do at this point */
|
||||
if (netif_msg_drv(lp))
|
||||
printk(KERN_ERR
|
||||
|
@ -2404,16 +2408,16 @@ static int pcnet32_init_ring(struct net_device *dev)
|
|||
dev->name);
|
||||
return -1;
|
||||
}
|
||||
skb_reserve(rx_skbuff, 2);
|
||||
skb_reserve(rx_skbuff, NET_IP_ALIGN);
|
||||
}
|
||||
|
||||
rmb();
|
||||
if (lp->rx_dma_addr[i] == 0)
|
||||
lp->rx_dma_addr[i] =
|
||||
pci_map_single(lp->pci_dev, rx_skbuff->data,
|
||||
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
|
||||
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||
lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
|
||||
lp->rx_ring[i].buf_length = cpu_to_le16(2 - PKT_BUF_SZ);
|
||||
lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
|
||||
wmb(); /* Make sure owner changes after all others are visible */
|
||||
lp->rx_ring[i].status = cpu_to_le16(0x8000);
|
||||
}
|
||||
|
|
|
@ -236,12 +236,12 @@ module_init(fixed_mdio_bus_init);
|
|||
static void __exit fixed_mdio_bus_exit(void)
|
||||
{
|
||||
struct fixed_mdio_bus *fmb = &platform_fmb;
|
||||
struct fixed_phy *fp;
|
||||
struct fixed_phy *fp, *tmp;
|
||||
|
||||
mdiobus_unregister(&fmb->mii_bus);
|
||||
platform_device_unregister(pdev);
|
||||
|
||||
list_for_each_entry(fp, &fmb->phys, node) {
|
||||
list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
|
||||
list_del(&fp->node);
|
||||
kfree(fp);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -35,198 +35,323 @@
|
|||
#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
|
||||
#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
|
||||
#define GELIC_NET_RXBUF_ALIGN 128
|
||||
#define GELIC_NET_RX_CSUM_DEFAULT 1 /* hw chksum */
|
||||
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
|
||||
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
|
||||
#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
|
||||
#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
|
||||
#define GELIC_NET_VLAN_POS (VLAN_ETH_ALEN * 2)
|
||||
#define GELIC_NET_VLAN_MAX 4
|
||||
|
||||
#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
|
||||
|
||||
enum gelic_net_int0_status {
|
||||
GELIC_NET_GDTDCEINT = 24,
|
||||
GELIC_NET_GRFANMINT = 28,
|
||||
};
|
||||
/* virtual interrupt status register bits */
|
||||
/* INT1 */
|
||||
#define GELIC_CARD_TX_RAM_FULL_ERR 0x0000000000000001L
|
||||
#define GELIC_CARD_RX_RAM_FULL_ERR 0x0000000000000002L
|
||||
#define GELIC_CARD_TX_SHORT_FRAME_ERR 0x0000000000000004L
|
||||
#define GELIC_CARD_TX_INVALID_DESCR_ERR 0x0000000000000008L
|
||||
#define GELIC_CARD_RX_FIFO_FULL_ERR 0x0000000000002000L
|
||||
#define GELIC_CARD_RX_DESCR_CHAIN_END 0x0000000000004000L
|
||||
#define GELIC_CARD_RX_INVALID_DESCR_ERR 0x0000000000008000L
|
||||
#define GELIC_CARD_TX_RESPONCE_ERR 0x0000000000010000L
|
||||
#define GELIC_CARD_RX_RESPONCE_ERR 0x0000000000100000L
|
||||
#define GELIC_CARD_TX_PROTECTION_ERR 0x0000000000400000L
|
||||
#define GELIC_CARD_RX_PROTECTION_ERR 0x0000000004000000L
|
||||
#define GELIC_CARD_TX_TCP_UDP_CHECKSUM_ERR 0x0000000008000000L
|
||||
#define GELIC_CARD_PORT_STATUS_CHANGED 0x0000000020000000L
|
||||
#define GELIC_CARD_WLAN_EVENT_RECEIVED 0x0000000040000000L
|
||||
#define GELIC_CARD_WLAN_COMMAND_COMPLETED 0x0000000080000000L
|
||||
/* INT 0 */
|
||||
#define GELIC_CARD_TX_FLAGGED_DESCR 0x0004000000000000L
|
||||
#define GELIC_CARD_RX_FLAGGED_DESCR 0x0040000000000000L
|
||||
#define GELIC_CARD_TX_TRANSFER_END 0x0080000000000000L
|
||||
#define GELIC_CARD_TX_DESCR_CHAIN_END 0x0100000000000000L
|
||||
#define GELIC_CARD_NUMBER_OF_RX_FRAME 0x1000000000000000L
|
||||
#define GELIC_CARD_ONE_TIME_COUNT_TIMER 0x4000000000000000L
|
||||
#define GELIC_CARD_FREE_RUN_COUNT_TIMER 0x8000000000000000L
|
||||
|
||||
/* GHIINT1STS bits */
|
||||
enum gelic_net_int1_status {
|
||||
GELIC_NET_GDADCEINT = 14,
|
||||
};
|
||||
/* initial interrupt mask */
|
||||
#define GELIC_CARD_TXINT GELIC_CARD_TX_DESCR_CHAIN_END
|
||||
|
||||
/* interrupt mask */
|
||||
#define GELIC_NET_TXINT (1L << (GELIC_NET_GDTDCEINT + 32))
|
||||
|
||||
#define GELIC_NET_RXINT0 (1L << (GELIC_NET_GRFANMINT + 32))
|
||||
#define GELIC_NET_RXINT1 (1L << GELIC_NET_GDADCEINT)
|
||||
#define GELIC_NET_RXINT (GELIC_NET_RXINT0 | GELIC_NET_RXINT1)
|
||||
#define GELIC_CARD_RXINT (GELIC_CARD_RX_DESCR_CHAIN_END | \
|
||||
GELIC_CARD_NUMBER_OF_RX_FRAME)
|
||||
|
||||
/* RX descriptor data_status bits */
|
||||
#define GELIC_NET_RXDMADU 0x80000000 /* destination MAC addr unknown */
|
||||
#define GELIC_NET_RXLSTFBF 0x40000000 /* last frame buffer */
|
||||
#define GELIC_NET_RXIPCHK 0x20000000 /* IP checksum performed */
|
||||
#define GELIC_NET_RXTCPCHK 0x10000000 /* TCP/UDP checksup performed */
|
||||
#define GELIC_NET_RXIPSPKT 0x08000000 /* IPsec packet */
|
||||
#define GELIC_NET_RXIPSAHPRT 0x04000000 /* IPsec AH protocol performed */
|
||||
#define GELIC_NET_RXIPSESPPRT 0x02000000 /* IPsec ESP protocol performed */
|
||||
#define GELIC_NET_RXSESPAH 0x01000000 /*
|
||||
* IPsec ESP protocol auth
|
||||
* performed
|
||||
*/
|
||||
enum gelic_descr_rx_status {
|
||||
GELIC_DESCR_RXDMADU = 0x80000000, /* destination MAC addr unknown */
|
||||
GELIC_DESCR_RXLSTFBF = 0x40000000, /* last frame buffer */
|
||||
GELIC_DESCR_RXIPCHK = 0x20000000, /* IP checksum performed */
|
||||
GELIC_DESCR_RXTCPCHK = 0x10000000, /* TCP/UDP checksup performed */
|
||||
GELIC_DESCR_RXWTPKT = 0x00C00000, /*
|
||||
* wakeup trigger packet
|
||||
* 01: Magic Packet (TM)
|
||||
* 10: ARP packet
|
||||
* 11: Multicast MAC addr
|
||||
*/
|
||||
GELIC_DESCR_RXVLNPKT = 0x00200000, /* VLAN packet */
|
||||
/* bit 20..16 reserved */
|
||||
GELIC_DESCR_RXRRECNUM = 0x0000ff00, /* reception receipt number */
|
||||
/* bit 7..0 reserved */
|
||||
};
|
||||
|
||||
#define GELIC_NET_RXWTPKT 0x00C00000 /*
|
||||
* wakeup trigger packet
|
||||
* 01: Magic Packet (TM)
|
||||
* 10: ARP packet
|
||||
* 11: Multicast MAC addr
|
||||
*/
|
||||
#define GELIC_NET_RXVLNPKT 0x00200000 /* VLAN packet */
|
||||
/* bit 20..16 reserved */
|
||||
#define GELIC_NET_RXRRECNUM 0x0000ff00 /* reception receipt number */
|
||||
#define GELIC_NET_RXRRECNUM_SHIFT 8
|
||||
/* bit 7..0 reserved */
|
||||
#define GELIC_DESCR_DATA_STATUS_CHK_MASK \
|
||||
(GELIC_DESCR_RXIPCHK | GELIC_DESCR_RXTCPCHK)
|
||||
|
||||
#define GELIC_NET_TXDESC_TAIL 0
|
||||
#define GELIC_NET_DATA_STATUS_CHK_MASK (GELIC_NET_RXIPCHK | GELIC_NET_RXTCPCHK)
|
||||
/* TX descriptor data_status bits */
|
||||
enum gelic_descr_tx_status {
|
||||
GELIC_DESCR_TX_TAIL = 0x00000001, /* gelic treated this
|
||||
* descriptor was end of
|
||||
* a tx frame
|
||||
*/
|
||||
};
|
||||
|
||||
/* RX descriptor data_error bits */
|
||||
/* bit 31 reserved */
|
||||
#define GELIC_NET_RXALNERR 0x40000000 /* alignement error 10/100M */
|
||||
#define GELIC_NET_RXOVERERR 0x20000000 /* oversize error */
|
||||
#define GELIC_NET_RXRNTERR 0x10000000 /* Runt error */
|
||||
#define GELIC_NET_RXIPCHKERR 0x08000000 /* IP checksum error */
|
||||
#define GELIC_NET_RXTCPCHKERR 0x04000000 /* TCP/UDP checksum error */
|
||||
#define GELIC_NET_RXUMCHSP 0x02000000 /* unmatched sp on sp */
|
||||
#define GELIC_NET_RXUMCHSPI 0x01000000 /* unmatched SPI on SAD */
|
||||
#define GELIC_NET_RXUMCHSAD 0x00800000 /* unmatched SAD */
|
||||
#define GELIC_NET_RXIPSAHERR 0x00400000 /* auth error on AH protocol
|
||||
* processing */
|
||||
#define GELIC_NET_RXIPSESPAHERR 0x00200000 /* auth error on ESP protocol
|
||||
* processing */
|
||||
#define GELIC_NET_RXDRPPKT 0x00100000 /* drop packet */
|
||||
#define GELIC_NET_RXIPFMTERR 0x00080000 /* IP packet format error */
|
||||
/* bit 18 reserved */
|
||||
#define GELIC_NET_RXDATAERR 0x00020000 /* IP packet format error */
|
||||
#define GELIC_NET_RXCALERR 0x00010000 /* cariier extension length
|
||||
* error */
|
||||
#define GELIC_NET_RXCREXERR 0x00008000 /* carrier extention error */
|
||||
#define GELIC_NET_RXMLTCST 0x00004000 /* multicast address frame */
|
||||
/* bit 13..0 reserved */
|
||||
#define GELIC_NET_DATA_ERROR_CHK_MASK \
|
||||
(GELIC_NET_RXIPCHKERR | GELIC_NET_RXTCPCHKERR)
|
||||
/* RX descriptor data error bits */
|
||||
enum gelic_descr_rx_error {
|
||||
/* bit 31 reserved */
|
||||
GELIC_DESCR_RXALNERR = 0x40000000, /* alignement error 10/100M */
|
||||
GELIC_DESCR_RXOVERERR = 0x20000000, /* oversize error */
|
||||
GELIC_DESCR_RXRNTERR = 0x10000000, /* Runt error */
|
||||
GELIC_DESCR_RXIPCHKERR = 0x08000000, /* IP checksum error */
|
||||
GELIC_DESCR_RXTCPCHKERR = 0x04000000, /* TCP/UDP checksum error */
|
||||
GELIC_DESCR_RXDRPPKT = 0x00100000, /* drop packet */
|
||||
GELIC_DESCR_RXIPFMTERR = 0x00080000, /* IP packet format error */
|
||||
/* bit 18 reserved */
|
||||
GELIC_DESCR_RXDATAERR = 0x00020000, /* IP packet format error */
|
||||
GELIC_DESCR_RXCALERR = 0x00010000, /* cariier extension length
|
||||
* error */
|
||||
GELIC_DESCR_RXCREXERR = 0x00008000, /* carrier extention error */
|
||||
GELIC_DESCR_RXMLTCST = 0x00004000, /* multicast address frame */
|
||||
/* bit 13..0 reserved */
|
||||
};
|
||||
#define GELIC_DESCR_DATA_ERROR_CHK_MASK \
|
||||
(GELIC_DESCR_RXIPCHKERR | GELIC_DESCR_RXTCPCHKERR)
|
||||
|
||||
/* DMA command and status (RX and TX)*/
|
||||
enum gelic_descr_dma_status {
|
||||
GELIC_DESCR_DMA_COMPLETE = 0x00000000, /* used in tx */
|
||||
GELIC_DESCR_DMA_BUFFER_FULL = 0x00000000, /* used in rx */
|
||||
GELIC_DESCR_DMA_RESPONSE_ERROR = 0x10000000, /* used in rx, tx */
|
||||
GELIC_DESCR_DMA_PROTECTION_ERROR = 0x20000000, /* used in rx, tx */
|
||||
GELIC_DESCR_DMA_FRAME_END = 0x40000000, /* used in rx */
|
||||
GELIC_DESCR_DMA_FORCE_END = 0x50000000, /* used in rx, tx */
|
||||
GELIC_DESCR_DMA_CARDOWNED = 0xa0000000, /* used in rx, tx */
|
||||
GELIC_DESCR_DMA_NOT_IN_USE = 0xb0000000, /* any other value */
|
||||
};
|
||||
|
||||
#define GELIC_DESCR_DMA_STAT_MASK (0xf0000000)
|
||||
|
||||
/* tx descriptor command and status */
|
||||
#define GELIC_NET_DMAC_CMDSTAT_NOCS 0xa0080000 /* middle of frame */
|
||||
#define GELIC_NET_DMAC_CMDSTAT_TCPCS 0xa00a0000
|
||||
#define GELIC_NET_DMAC_CMDSTAT_UDPCS 0xa00b0000
|
||||
#define GELIC_NET_DMAC_CMDSTAT_END_FRAME 0x00040000 /* end of frame */
|
||||
enum gelic_descr_tx_dma_status {
|
||||
/* [19] */
|
||||
GELIC_DESCR_TX_DMA_IKE = 0x00080000, /* IPSEC off */
|
||||
/* [18] */
|
||||
GELIC_DESCR_TX_DMA_FRAME_TAIL = 0x00040000, /* last descriptor of
|
||||
* the packet
|
||||
*/
|
||||
/* [17..16] */
|
||||
GELIC_DESCR_TX_DMA_TCP_CHKSUM = 0x00020000, /* TCP packet */
|
||||
GELIC_DESCR_TX_DMA_UDP_CHKSUM = 0x00030000, /* UDP packet */
|
||||
GELIC_DESCR_TX_DMA_NO_CHKSUM = 0x00000000, /* no checksum */
|
||||
|
||||
#define GELIC_NET_DMAC_CMDSTAT_RXDCEIS 0x00000002 /* descriptor chain end
|
||||
* interrupt status */
|
||||
|
||||
#define GELIC_NET_DMAC_CMDSTAT_CHAIN_END 0x00000002 /* RXDCEIS:DMA stopped */
|
||||
#define GELIC_NET_DESCR_IND_PROC_SHIFT 28
|
||||
#define GELIC_NET_DESCR_IND_PROC_MASKO 0x0fffffff
|
||||
|
||||
|
||||
enum gelic_net_descr_status {
|
||||
GELIC_NET_DESCR_COMPLETE = 0x00, /* used in tx */
|
||||
GELIC_NET_DESCR_BUFFER_FULL = 0x00, /* used in rx */
|
||||
GELIC_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
|
||||
GELIC_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
|
||||
GELIC_NET_DESCR_FRAME_END = 0x04, /* used in rx */
|
||||
GELIC_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
|
||||
GELIC_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
|
||||
GELIC_NET_DESCR_NOT_IN_USE = 0x0b /* any other value */
|
||||
/* [1] */
|
||||
GELIC_DESCR_TX_DMA_CHAIN_END = 0x00000002, /* DMA terminated
|
||||
* due to chain end
|
||||
*/
|
||||
};
|
||||
|
||||
#define GELIC_DESCR_DMA_CMD_NO_CHKSUM \
|
||||
(GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
|
||||
GELIC_DESCR_TX_DMA_NO_CHKSUM)
|
||||
|
||||
#define GELIC_DESCR_DMA_CMD_TCP_CHKSUM \
|
||||
(GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
|
||||
GELIC_DESCR_TX_DMA_TCP_CHKSUM)
|
||||
|
||||
#define GELIC_DESCR_DMA_CMD_UDP_CHKSUM \
|
||||
(GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
|
||||
GELIC_DESCR_TX_DMA_UDP_CHKSUM)
|
||||
|
||||
enum gelic_descr_rx_dma_status {
|
||||
/* [ 1 ] */
|
||||
GELIC_DESCR_RX_DMA_CHAIN_END = 0x00000002, /* DMA terminated
|
||||
* due to chain end
|
||||
*/
|
||||
};
|
||||
|
||||
/* for lv1_net_control */
|
||||
#define GELIC_NET_GET_MAC_ADDRESS 0x0000000000000001
|
||||
#define GELIC_NET_GET_ETH_PORT_STATUS 0x0000000000000002
|
||||
#define GELIC_NET_SET_NEGOTIATION_MODE 0x0000000000000003
|
||||
#define GELIC_NET_GET_VLAN_ID 0x0000000000000004
|
||||
enum gelic_lv1_net_control_code {
|
||||
GELIC_LV1_GET_MAC_ADDRESS = 1,
|
||||
GELIC_LV1_GET_ETH_PORT_STATUS = 2,
|
||||
GELIC_LV1_SET_NEGOTIATION_MODE = 3,
|
||||
GELIC_LV1_GET_VLAN_ID = 4,
|
||||
GELIC_LV1_GET_CHANNEL = 6,
|
||||
GELIC_LV1_POST_WLAN_CMD = 9,
|
||||
GELIC_LV1_GET_WLAN_CMD_RESULT = 10,
|
||||
GELIC_LV1_GET_WLAN_EVENT = 11
|
||||
};
|
||||
|
||||
#define GELIC_NET_LINK_UP 0x0000000000000001
|
||||
#define GELIC_NET_FULL_DUPLEX 0x0000000000000002
|
||||
#define GELIC_NET_AUTO_NEG 0x0000000000000004
|
||||
#define GELIC_NET_SPEED_10 0x0000000000000010
|
||||
#define GELIC_NET_SPEED_100 0x0000000000000020
|
||||
#define GELIC_NET_SPEED_1000 0x0000000000000040
|
||||
/* status returened from GET_ETH_PORT_STATUS */
|
||||
enum gelic_lv1_ether_port_status {
|
||||
GELIC_LV1_ETHER_LINK_UP = 0x0000000000000001L,
|
||||
GELIC_LV1_ETHER_FULL_DUPLEX = 0x0000000000000002L,
|
||||
GELIC_LV1_ETHER_AUTO_NEG = 0x0000000000000004L,
|
||||
|
||||
#define GELIC_NET_VLAN_ALL 0x0000000000000001
|
||||
#define GELIC_NET_VLAN_WIRED 0x0000000000000002
|
||||
#define GELIC_NET_VLAN_WIRELESS 0x0000000000000003
|
||||
#define GELIC_NET_VLAN_PSP 0x0000000000000004
|
||||
#define GELIC_NET_VLAN_PORT0 0x0000000000000010
|
||||
#define GELIC_NET_VLAN_PORT1 0x0000000000000011
|
||||
#define GELIC_NET_VLAN_PORT2 0x0000000000000012
|
||||
#define GELIC_NET_VLAN_DAEMON_CLIENT_BSS 0x0000000000000013
|
||||
#define GELIC_NET_VLAN_LIBERO_CLIENT_BSS 0x0000000000000014
|
||||
#define GELIC_NET_VLAN_NO_ENTRY -6
|
||||
GELIC_LV1_ETHER_SPEED_10 = 0x0000000000000010L,
|
||||
GELIC_LV1_ETHER_SPEED_100 = 0x0000000000000020L,
|
||||
GELIC_LV1_ETHER_SPEED_1000 = 0x0000000000000040L,
|
||||
GELIC_LV1_ETHER_SPEED_MASK = 0x0000000000000070L
|
||||
};
|
||||
|
||||
#define GELIC_NET_PORT 2 /* for port status */
|
||||
enum gelic_lv1_vlan_index {
|
||||
/* for outgoing packets */
|
||||
GELIC_LV1_VLAN_TX_ETHERNET = 0x0000000000000002L,
|
||||
GELIC_LV1_VLAN_TX_WIRELESS = 0x0000000000000003L,
|
||||
/* for incoming packets */
|
||||
GELIC_LV1_VLAN_RX_ETHERNET = 0x0000000000000012L,
|
||||
GELIC_LV1_VLAN_RX_WIRELESS = 0x0000000000000013L
|
||||
};
|
||||
|
||||
/* size of hardware part of gelic descriptor */
|
||||
#define GELIC_NET_DESCR_SIZE (32)
|
||||
struct gelic_net_descr {
|
||||
#define GELIC_DESCR_SIZE (32)
|
||||
|
||||
enum gelic_port_type {
|
||||
GELIC_PORT_ETHERNET = 0,
|
||||
GELIC_PORT_WIRELESS = 1,
|
||||
GELIC_PORT_MAX
|
||||
};
|
||||
|
||||
struct gelic_descr {
|
||||
/* as defined by the hardware */
|
||||
u32 buf_addr;
|
||||
u32 buf_size;
|
||||
u32 next_descr_addr;
|
||||
u32 dmac_cmd_status;
|
||||
u32 result_size;
|
||||
u32 valid_size; /* all zeroes for tx */
|
||||
u32 data_status;
|
||||
u32 data_error; /* all zeroes for tx */
|
||||
__be32 buf_addr;
|
||||
__be32 buf_size;
|
||||
__be32 next_descr_addr;
|
||||
__be32 dmac_cmd_status;
|
||||
__be32 result_size;
|
||||
__be32 valid_size; /* all zeroes for tx */
|
||||
__be32 data_status;
|
||||
__be32 data_error; /* all zeroes for tx */
|
||||
|
||||
/* used in the driver */
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t bus_addr;
|
||||
struct gelic_net_descr *next;
|
||||
struct gelic_net_descr *prev;
|
||||
struct vlan_ethhdr vlan;
|
||||
struct gelic_descr *next;
|
||||
struct gelic_descr *prev;
|
||||
} __attribute__((aligned(32)));
|
||||
|
||||
struct gelic_net_descr_chain {
|
||||
struct gelic_descr_chain {
|
||||
/* we walk from tail to head */
|
||||
struct gelic_net_descr *head;
|
||||
struct gelic_net_descr *tail;
|
||||
struct gelic_descr *head;
|
||||
struct gelic_descr *tail;
|
||||
};
|
||||
|
||||
struct gelic_net_card {
|
||||
struct net_device *netdev;
|
||||
struct gelic_vlan_id {
|
||||
u16 tx;
|
||||
u16 rx;
|
||||
};
|
||||
|
||||
struct gelic_card {
|
||||
struct napi_struct napi;
|
||||
struct net_device *netdev[GELIC_PORT_MAX];
|
||||
/*
|
||||
* hypervisor requires irq_status should be
|
||||
* 8 bytes aligned, but u64 member is
|
||||
* always disposed in that manner
|
||||
*/
|
||||
u64 irq_status;
|
||||
u64 ghiintmask;
|
||||
u64 irq_mask;
|
||||
|
||||
struct ps3_system_bus_device *dev;
|
||||
u32 vlan_id[GELIC_NET_VLAN_MAX];
|
||||
int vlan_index;
|
||||
struct gelic_vlan_id vlan[GELIC_PORT_MAX];
|
||||
int vlan_required;
|
||||
|
||||
struct gelic_net_descr_chain tx_chain;
|
||||
struct gelic_net_descr_chain rx_chain;
|
||||
struct gelic_descr_chain tx_chain;
|
||||
struct gelic_descr_chain rx_chain;
|
||||
int rx_dma_restart_required;
|
||||
/* gurad dmac descriptor chain*/
|
||||
spinlock_t chain_lock;
|
||||
|
||||
int rx_csum;
|
||||
/* guard tx_dma_progress */
|
||||
spinlock_t tx_dma_lock;
|
||||
/*
|
||||
* tx_lock guards tx descriptor list and
|
||||
* tx_dma_progress.
|
||||
*/
|
||||
spinlock_t tx_lock;
|
||||
int tx_dma_progress;
|
||||
|
||||
struct work_struct tx_timeout_task;
|
||||
atomic_t tx_timeout_task_counter;
|
||||
wait_queue_head_t waitq;
|
||||
|
||||
struct gelic_net_descr *tx_top, *rx_top;
|
||||
struct gelic_net_descr descr[0];
|
||||
/* only first user should up the card */
|
||||
struct semaphore updown_lock;
|
||||
atomic_t users;
|
||||
|
||||
u64 ether_port_status;
|
||||
/* original address returned by kzalloc */
|
||||
void *unalign;
|
||||
|
||||
/*
|
||||
* each netdevice has copy of irq
|
||||
*/
|
||||
unsigned int irq;
|
||||
struct gelic_descr *tx_top, *rx_top;
|
||||
struct gelic_descr descr[0]; /* must be the last */
|
||||
};
|
||||
|
||||
struct gelic_port {
|
||||
struct gelic_card *card;
|
||||
struct net_device *netdev;
|
||||
enum gelic_port_type type;
|
||||
long priv[0]; /* long for alignment */
|
||||
};
|
||||
|
||||
extern unsigned long p_to_lp(long pa);
|
||||
static inline struct gelic_card *port_to_card(struct gelic_port *p)
|
||||
{
|
||||
return p->card;
|
||||
}
|
||||
static inline struct net_device *port_to_netdev(struct gelic_port *p)
|
||||
{
|
||||
return p->netdev;
|
||||
}
|
||||
static inline struct gelic_card *netdev_card(struct net_device *d)
|
||||
{
|
||||
return ((struct gelic_port *)netdev_priv(d))->card;
|
||||
}
|
||||
static inline struct gelic_port *netdev_port(struct net_device *d)
|
||||
{
|
||||
return (struct gelic_port *)netdev_priv(d);
|
||||
}
|
||||
static inline struct device *ctodev(struct gelic_card *card)
|
||||
{
|
||||
return &card->dev->core;
|
||||
}
|
||||
static inline u64 bus_id(struct gelic_card *card)
|
||||
{
|
||||
return card->dev->bus_id;
|
||||
}
|
||||
static inline u64 dev_id(struct gelic_card *card)
|
||||
{
|
||||
return card->dev->dev_id;
|
||||
}
|
||||
|
||||
static inline void *port_priv(struct gelic_port *port)
|
||||
{
|
||||
return port->priv;
|
||||
}
|
||||
|
||||
extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
|
||||
/* shared netdev ops */
|
||||
extern void gelic_card_up(struct gelic_card *card);
|
||||
extern void gelic_card_down(struct gelic_card *card);
|
||||
extern int gelic_net_open(struct net_device *netdev);
|
||||
extern int gelic_net_stop(struct net_device *netdev);
|
||||
extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
|
||||
extern void gelic_net_set_multi(struct net_device *netdev);
|
||||
extern void gelic_net_tx_timeout(struct net_device *netdev);
|
||||
extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
|
||||
extern int gelic_net_setup_netdev(struct net_device *netdev,
|
||||
struct gelic_card *card);
|
||||
|
||||
/* shared ethtool ops */
|
||||
extern void gelic_net_get_drvinfo(struct net_device *netdev,
|
||||
struct ethtool_drvinfo *info);
|
||||
extern u32 gelic_net_get_rx_csum(struct net_device *netdev);
|
||||
extern int gelic_net_set_rx_csum(struct net_device *netdev, u32 data);
|
||||
extern void gelic_net_poll_controller(struct net_device *netdev);
|
||||
|
||||
#endif /* _GELIC_NET_H */
|
||||
|
|
2753
drivers/net/ps3_gelic_wireless.c
Normal file
2753
drivers/net/ps3_gelic_wireless.c
Normal file
File diff suppressed because it is too large
Load diff
329
drivers/net/ps3_gelic_wireless.h
Normal file
329
drivers/net/ps3_gelic_wireless.h
Normal file
|
@ -0,0 +1,329 @@
|
|||
/*
|
||||
* PS3 gelic network driver.
|
||||
*
|
||||
* Copyright (C) 2007 Sony Computer Entertainment Inc.
|
||||
* Copyright 2007 Sony Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
#ifndef _GELIC_WIRELESS_H
|
||||
#define _GELIC_WIRELESS_H
|
||||
|
||||
#include <linux/wireless.h>
|
||||
#include <net/iw_handler.h>
|
||||
|
||||
|
||||
/* return value from GELIC_LV1_GET_WLAN_EVENT netcontrol */
|
||||
enum gelic_lv1_wl_event {
|
||||
GELIC_LV1_WL_EVENT_DEVICE_READY = 0x01, /* Eurus ready */
|
||||
GELIC_LV1_WL_EVENT_SCAN_COMPLETED = 0x02, /* Scan has completed */
|
||||
GELIC_LV1_WL_EVENT_DEAUTH = 0x04, /* Deauthed by the AP */
|
||||
GELIC_LV1_WL_EVENT_BEACON_LOST = 0x08, /* Beacon lost detected */
|
||||
GELIC_LV1_WL_EVENT_CONNECTED = 0x10, /* Connected to AP */
|
||||
GELIC_LV1_WL_EVENT_WPA_CONNECTED = 0x20, /* WPA connection */
|
||||
GELIC_LV1_WL_EVENT_WPA_ERROR = 0x40, /* MIC error */
|
||||
};
|
||||
|
||||
/* arguments for GELIC_LV1_POST_WLAN_COMMAND netcontrol */
|
||||
enum gelic_eurus_command {
|
||||
GELIC_EURUS_CMD_ASSOC = 1, /* association start */
|
||||
GELIC_EURUS_CMD_DISASSOC = 2, /* disassociate */
|
||||
GELIC_EURUS_CMD_START_SCAN = 3, /* scan start */
|
||||
GELIC_EURUS_CMD_GET_SCAN = 4, /* get scan result */
|
||||
GELIC_EURUS_CMD_SET_COMMON_CFG = 5, /* set common config */
|
||||
GELIC_EURUS_CMD_GET_COMMON_CFG = 6, /* set common config */
|
||||
GELIC_EURUS_CMD_SET_WEP_CFG = 7, /* set WEP config */
|
||||
GELIC_EURUS_CMD_GET_WEP_CFG = 8, /* get WEP config */
|
||||
GELIC_EURUS_CMD_SET_WPA_CFG = 9, /* set WPA config */
|
||||
GELIC_EURUS_CMD_GET_WPA_CFG = 10, /* get WPA config */
|
||||
GELIC_EURUS_CMD_GET_RSSI_CFG = 11, /* get RSSI info. */
|
||||
GELIC_EURUS_CMD_MAX_INDEX
|
||||
};
|
||||
|
||||
/* for GELIC_EURUS_CMD_COMMON_CFG */
|
||||
enum gelic_eurus_bss_type {
|
||||
GELIC_EURUS_BSS_INFRA = 0,
|
||||
GELIC_EURUS_BSS_ADHOC = 1, /* not supported */
|
||||
};
|
||||
|
||||
enum gelic_eurus_auth_method {
|
||||
GELIC_EURUS_AUTH_OPEN = 0, /* FIXME: WLAN_AUTH_OPEN */
|
||||
GELIC_EURUS_AUTH_SHARED = 1, /* not supported */
|
||||
};
|
||||
|
||||
enum gelic_eurus_opmode {
|
||||
GELIC_EURUS_OPMODE_11BG = 0, /* 802.11b/g */
|
||||
GELIC_EURUS_OPMODE_11B = 1, /* 802.11b only */
|
||||
GELIC_EURUS_OPMODE_11G = 2, /* 802.11g only */
|
||||
};
|
||||
|
||||
struct gelic_eurus_common_cfg {
|
||||
/* all fields are big endian */
|
||||
u16 scan_index;
|
||||
u16 bss_type; /* infra or adhoc */
|
||||
u16 auth_method; /* shared key or open */
|
||||
u16 op_mode; /* B/G */
|
||||
} __attribute__((packed));
|
||||
|
||||
|
||||
/* for GELIC_EURUS_CMD_WEP_CFG */
|
||||
enum gelic_eurus_wep_security {
|
||||
GELIC_EURUS_WEP_SEC_NONE = 0,
|
||||
GELIC_EURUS_WEP_SEC_40BIT = 1,
|
||||
GELIC_EURUS_WEP_SEC_104BIT = 2,
|
||||
};
|
||||
|
||||
struct gelic_eurus_wep_cfg {
|
||||
/* all fields are big endian */
|
||||
u16 security;
|
||||
u8 key[4][16];
|
||||
} __attribute__((packed));
|
||||
|
||||
/* for GELIC_EURUS_CMD_WPA_CFG */
|
||||
enum gelic_eurus_wpa_security {
|
||||
GELIC_EURUS_WPA_SEC_NONE = 0x0000,
|
||||
/* group=TKIP, pairwise=TKIP */
|
||||
GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP = 0x0001,
|
||||
/* group=AES, pairwise=AES */
|
||||
GELIC_EURUS_WPA_SEC_WPA_AES_AES = 0x0002,
|
||||
/* group=TKIP, pairwise=TKIP */
|
||||
GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP = 0x0004,
|
||||
/* group=AES, pairwise=AES */
|
||||
GELIC_EURUS_WPA_SEC_WPA2_AES_AES = 0x0008,
|
||||
/* group=TKIP, pairwise=AES */
|
||||
GELIC_EURUS_WPA_SEC_WPA_TKIP_AES = 0x0010,
|
||||
/* group=TKIP, pairwise=AES */
|
||||
GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES = 0x0020,
|
||||
};
|
||||
|
||||
enum gelic_eurus_wpa_psk_type {
|
||||
GELIC_EURUS_WPA_PSK_PASSPHRASE = 0, /* passphrase string */
|
||||
GELIC_EURUS_WPA_PSK_BIN = 1, /* 32 bytes binary key */
|
||||
};
|
||||
|
||||
#define GELIC_WL_EURUS_PSK_MAX_LEN 64
|
||||
#define WPA_PSK_LEN 32 /* WPA spec says 256bit */
|
||||
|
||||
struct gelic_eurus_wpa_cfg {
|
||||
/* all fields are big endian */
|
||||
u16 security;
|
||||
u16 psk_type; /* psk key encoding type */
|
||||
u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
|
||||
} __attribute__((packed));
|
||||
|
||||
/* for GELIC_EURUS_CMD_{START,GET}_SCAN */
|
||||
enum gelic_eurus_scan_capability {
|
||||
GELIC_EURUS_SCAN_CAP_ADHOC = 0x0000,
|
||||
GELIC_EURUS_SCAN_CAP_INFRA = 0x0001,
|
||||
GELIC_EURUS_SCAN_CAP_MASK = 0x0001,
|
||||
};
|
||||
|
||||
enum gelic_eurus_scan_sec_type {
|
||||
GELIC_EURUS_SCAN_SEC_NONE = 0x0000,
|
||||
GELIC_EURUS_SCAN_SEC_WEP = 0x0100,
|
||||
GELIC_EURUS_SCAN_SEC_WPA = 0x0200,
|
||||
GELIC_EURUS_SCAN_SEC_WPA2 = 0x0400,
|
||||
GELIC_EURUS_SCAN_SEC_MASK = 0x0f00,
|
||||
};
|
||||
|
||||
enum gelic_eurus_scan_sec_wep_type {
|
||||
GELIC_EURUS_SCAN_SEC_WEP_UNKNOWN = 0x0000,
|
||||
GELIC_EURUS_SCAN_SEC_WEP_40 = 0x0001,
|
||||
GELIC_EURUS_SCAN_SEC_WEP_104 = 0x0002,
|
||||
GELIC_EURUS_SCAN_SEC_WEP_MASK = 0x0003,
|
||||
};
|
||||
|
||||
enum gelic_eurus_scan_sec_wpa_type {
|
||||
GELIC_EURUS_SCAN_SEC_WPA_UNKNOWN = 0x0000,
|
||||
GELIC_EURUS_SCAN_SEC_WPA_TKIP = 0x0001,
|
||||
GELIC_EURUS_SCAN_SEC_WPA_AES = 0x0002,
|
||||
GELIC_EURUS_SCAN_SEC_WPA_MASK = 0x0003,
|
||||
};
|
||||
|
||||
/*
|
||||
* hw BSS information structure returned from GELIC_EURUS_CMD_GET_SCAN
|
||||
*/
|
||||
struct gelic_eurus_scan_info {
|
||||
/* all fields are big endian */
|
||||
__be16 size;
|
||||
__be16 rssi; /* percentage */
|
||||
__be16 channel; /* channel number */
|
||||
__be16 beacon_period; /* FIXME: in msec unit */
|
||||
__be16 capability;
|
||||
__be16 security;
|
||||
u8 bssid[8]; /* last ETH_ALEN are valid. bssid[0],[1] are unused */
|
||||
u8 essid[32]; /* IW_ESSID_MAX_SIZE */
|
||||
u8 rate[16]; /* first MAX_RATES_LENGTH(12) are valid */
|
||||
u8 ext_rate[16]; /* first MAX_RATES_EX_LENGTH(16) are valid */
|
||||
__be32 reserved1;
|
||||
__be32 reserved2;
|
||||
__be32 reserved3;
|
||||
__be32 reserved4;
|
||||
u8 elements[0]; /* ie */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* the hypervisor returns bbs up to 16 */
|
||||
#define GELIC_EURUS_MAX_SCAN (16)
|
||||
struct gelic_wl_scan_info {
|
||||
struct list_head list;
|
||||
struct gelic_eurus_scan_info *hwinfo;
|
||||
|
||||
int valid; /* set 1 if this entry was in latest scanned list
|
||||
* from Eurus */
|
||||
unsigned int eurus_index; /* index in the Eurus list */
|
||||
unsigned long last_scanned; /* acquired time */
|
||||
|
||||
unsigned int rate_len;
|
||||
unsigned int rate_ext_len;
|
||||
unsigned int essid_len;
|
||||
};
|
||||
|
||||
/* for GELIC_EURUS_CMD_GET_RSSI */
|
||||
struct gelic_eurus_rssi_info {
|
||||
/* big endian */
|
||||
__be16 rssi;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
|
||||
/* for 'stat' member of gelic_wl_info */
|
||||
enum gelic_wl_info_status_bit {
|
||||
GELIC_WL_STAT_CONFIGURED,
|
||||
GELIC_WL_STAT_CH_INFO, /* ch info aquired */
|
||||
GELIC_WL_STAT_ESSID_SET, /* ESSID specified by userspace */
|
||||
GELIC_WL_STAT_BSSID_SET, /* BSSID specified by userspace */
|
||||
GELIC_WL_STAT_WPA_PSK_SET, /* PMK specified by userspace */
|
||||
GELIC_WL_STAT_WPA_LEVEL_SET, /* WEP or WPA[2] selected */
|
||||
};
|
||||
|
||||
/* for 'scan_stat' member of gelic_wl_info */
|
||||
enum gelic_wl_scan_state {
|
||||
/* just initialized or get last scan result failed */
|
||||
GELIC_WL_SCAN_STAT_INIT,
|
||||
/* scan request issued, accepted or chip is scanning */
|
||||
GELIC_WL_SCAN_STAT_SCANNING,
|
||||
/* scan results retrieved */
|
||||
GELIC_WL_SCAN_STAT_GOT_LIST,
|
||||
};
|
||||
|
||||
/* for 'cipher_method' */
|
||||
enum gelic_wl_cipher_method {
|
||||
GELIC_WL_CIPHER_NONE,
|
||||
GELIC_WL_CIPHER_WEP,
|
||||
GELIC_WL_CIPHER_TKIP,
|
||||
GELIC_WL_CIPHER_AES,
|
||||
};
|
||||
|
||||
/* for 'wpa_level' */
|
||||
enum gelic_wl_wpa_level {
|
||||
GELIC_WL_WPA_LEVEL_NONE,
|
||||
GELIC_WL_WPA_LEVEL_WPA,
|
||||
GELIC_WL_WPA_LEVEL_WPA2,
|
||||
};
|
||||
|
||||
/* for 'assoc_stat' */
|
||||
enum gelic_wl_assoc_state {
|
||||
GELIC_WL_ASSOC_STAT_DISCONN,
|
||||
GELIC_WL_ASSOC_STAT_ASSOCIATING,
|
||||
GELIC_WL_ASSOC_STAT_ASSOCIATED,
|
||||
};
|
||||
/* part of private data alloc_etherdev() allocated */
|
||||
#define GELIC_WEP_KEYS 4
|
||||
struct gelic_wl_info {
|
||||
/* bss list */
|
||||
struct semaphore scan_lock;
|
||||
struct list_head network_list;
|
||||
struct list_head network_free_list;
|
||||
struct gelic_wl_scan_info *networks;
|
||||
|
||||
unsigned long scan_age; /* last scanned time */
|
||||
enum gelic_wl_scan_state scan_stat;
|
||||
struct completion scan_done;
|
||||
|
||||
/* eurus command queue */
|
||||
struct workqueue_struct *eurus_cmd_queue;
|
||||
struct completion cmd_done_intr;
|
||||
|
||||
/* eurus event handling */
|
||||
struct workqueue_struct *event_queue;
|
||||
struct delayed_work event_work;
|
||||
|
||||
/* wl status bits */
|
||||
unsigned long stat;
|
||||
enum gelic_eurus_auth_method auth_method; /* open/shared */
|
||||
enum gelic_wl_cipher_method group_cipher_method;
|
||||
enum gelic_wl_cipher_method pairwise_cipher_method;
|
||||
enum gelic_wl_wpa_level wpa_level; /* wpa/wpa2 */
|
||||
|
||||
/* association handling */
|
||||
struct semaphore assoc_stat_lock;
|
||||
struct delayed_work assoc_work;
|
||||
enum gelic_wl_assoc_state assoc_stat;
|
||||
struct completion assoc_done;
|
||||
|
||||
spinlock_t lock;
|
||||
u16 ch_info; /* available channels. bit0 = ch1 */
|
||||
/* WEP keys */
|
||||
u8 key[GELIC_WEP_KEYS][IW_ENCODING_TOKEN_MAX];
|
||||
unsigned long key_enabled;
|
||||
unsigned int key_len[GELIC_WEP_KEYS];
|
||||
unsigned int current_key;
|
||||
/* WWPA PSK */
|
||||
u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN];
|
||||
enum gelic_eurus_wpa_psk_type psk_type;
|
||||
unsigned int psk_len;
|
||||
|
||||
u8 essid[IW_ESSID_MAX_SIZE];
|
||||
u8 bssid[ETH_ALEN]; /* userland requested */
|
||||
u8 active_bssid[ETH_ALEN]; /* associated bssid */
|
||||
unsigned int essid_len;
|
||||
|
||||
/* buffer for hypervisor IO */
|
||||
void *buf;
|
||||
|
||||
struct iw_public_data wireless_data;
|
||||
struct iw_statistics iwstat;
|
||||
};
|
||||
|
||||
#define GELIC_WL_BSS_MAX_ENT 32
|
||||
#define GELIC_WL_ASSOC_RETRY 50
|
||||
static inline struct gelic_port *wl_port(struct gelic_wl_info *wl)
|
||||
{
|
||||
return container_of((void *)wl, struct gelic_port, priv);
|
||||
}
|
||||
static inline struct gelic_wl_info *port_wl(struct gelic_port *port)
|
||||
{
|
||||
return port_priv(port);
|
||||
}
|
||||
|
||||
struct gelic_eurus_cmd {
|
||||
struct work_struct work;
|
||||
struct gelic_wl_info *wl;
|
||||
unsigned int cmd; /* command code */
|
||||
u64 tag;
|
||||
u64 size;
|
||||
void *buffer;
|
||||
unsigned int buf_size;
|
||||
struct completion done;
|
||||
int status;
|
||||
u64 cmd_status;
|
||||
};
|
||||
|
||||
/* private ioctls to pass PSK */
|
||||
#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0)
|
||||
#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1)
|
||||
|
||||
extern int gelic_wl_driver_probe(struct gelic_card *card);
|
||||
extern int gelic_wl_driver_remove(struct gelic_card *card);
|
||||
extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
|
||||
#endif /* _GELIC_WIRELESS_H */
|
|
@ -61,7 +61,6 @@
|
|||
|
||||
/* Time in jiffies before concluding the transmitter is hung. */
|
||||
#define TX_TIMEOUT (6000 * HZ / 1000)
|
||||
#define TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
|
||||
|
||||
/* RDC MAC I/O Size */
|
||||
#define R6040_IO_SIZE 256
|
||||
|
@ -174,8 +173,6 @@ struct r6040_private {
|
|||
struct net_device *dev;
|
||||
struct mii_if_info mii_if;
|
||||
struct napi_struct napi;
|
||||
struct net_device_stats stats;
|
||||
u16 napi_rx_running;
|
||||
void __iomem *base;
|
||||
};
|
||||
|
||||
|
@ -235,17 +232,53 @@ static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
|
|||
phy_write(ioaddr, lp->phy_addr, reg, val);
|
||||
}
|
||||
|
||||
static void r6040_tx_timeout(struct net_device *dev)
|
||||
static void r6040_free_txbufs(struct net_device *dev)
|
||||
{
|
||||
struct r6040_private *priv = netdev_priv(dev);
|
||||
struct r6040_private *lp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
disable_irq(dev->irq);
|
||||
napi_disable(&priv->napi);
|
||||
spin_lock(&priv->lock);
|
||||
dev->stats.tx_errors++;
|
||||
spin_unlock(&priv->lock);
|
||||
for (i = 0; i < TX_DCNT; i++) {
|
||||
if (lp->tx_insert_ptr->skb_ptr) {
|
||||
pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf,
|
||||
MAX_BUF_SIZE, PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
|
||||
lp->rx_insert_ptr->skb_ptr = NULL;
|
||||
}
|
||||
lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
|
||||
}
|
||||
}
|
||||
|
||||
netif_stop_queue(dev);
|
||||
static void r6040_free_rxbufs(struct net_device *dev)
|
||||
{
|
||||
struct r6040_private *lp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < RX_DCNT; i++) {
|
||||
if (lp->rx_insert_ptr->skb_ptr) {
|
||||
pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf,
|
||||
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
|
||||
lp->rx_insert_ptr->skb_ptr = NULL;
|
||||
}
|
||||
lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
|
||||
}
|
||||
}
|
||||
|
||||
static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
|
||||
dma_addr_t desc_dma, int size)
|
||||
{
|
||||
struct r6040_descriptor *desc = desc_ring;
|
||||
dma_addr_t mapping = desc_dma;
|
||||
|
||||
while (size-- > 0) {
|
||||
mapping += sizeof(sizeof(*desc));
|
||||
desc->ndesc = cpu_to_le32(mapping);
|
||||
desc->vndescp = desc + 1;
|
||||
desc++;
|
||||
}
|
||||
desc--;
|
||||
desc->ndesc = cpu_to_le32(desc_dma);
|
||||
desc->vndescp = desc_ring;
|
||||
}
|
||||
|
||||
/* Allocate skb buffer for rx descriptor */
|
||||
|
@ -256,7 +289,7 @@ static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
|
|||
|
||||
descptr = lp->rx_insert_ptr;
|
||||
while (lp->rx_free_desc < RX_DCNT) {
|
||||
descptr->skb_ptr = dev_alloc_skb(MAX_BUF_SIZE);
|
||||
descptr->skb_ptr = netdev_alloc_skb(dev, MAX_BUF_SIZE);
|
||||
|
||||
if (!descptr->skb_ptr)
|
||||
break;
|
||||
|
@ -272,6 +305,63 @@ static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
|
|||
lp->rx_insert_ptr = descptr;
|
||||
}
|
||||
|
||||
static void r6040_alloc_txbufs(struct net_device *dev)
|
||||
{
|
||||
struct r6040_private *lp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = lp->base;
|
||||
|
||||
lp->tx_free_desc = TX_DCNT;
|
||||
|
||||
lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
|
||||
r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
|
||||
|
||||
iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
|
||||
iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
|
||||
}
|
||||
|
||||
static void r6040_alloc_rxbufs(struct net_device *dev)
|
||||
{
|
||||
struct r6040_private *lp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = lp->base;
|
||||
|
||||
lp->rx_free_desc = 0;
|
||||
|
||||
lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
|
||||
r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
|
||||
|
||||
rx_buf_alloc(lp, dev);
|
||||
|
||||
iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
|
||||
iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
|
||||
}
|
||||
|
||||
static void r6040_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
struct r6040_private *priv = netdev_priv(dev);
|
||||
void __iomem *ioaddr = priv->base;
|
||||
|
||||
printk(KERN_WARNING "%s: transmit timed out, status %4.4x, PHY status "
|
||||
"%4.4x\n",
|
||||
dev->name, ioread16(ioaddr + MIER),
|
||||
mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
|
||||
|
||||
disable_irq(dev->irq);
|
||||
napi_disable(&priv->napi);
|
||||
spin_lock(&priv->lock);
|
||||
/* Clear all descriptors */
|
||||
r6040_free_txbufs(dev);
|
||||
r6040_free_rxbufs(dev);
|
||||
r6040_alloc_txbufs(dev);
|
||||
r6040_alloc_rxbufs(dev);
|
||||
|
||||
/* Reset MAC */
|
||||
iowrite16(MAC_RST, ioaddr + MCR1);
|
||||
spin_unlock(&priv->lock);
|
||||
enable_irq(dev->irq);
|
||||
|
||||
dev->stats.tx_errors++;
|
||||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
static struct net_device_stats *r6040_get_stats(struct net_device *dev)
|
||||
{
|
||||
|
@ -280,11 +370,11 @@ static struct net_device_stats *r6040_get_stats(struct net_device *dev)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
priv->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
|
||||
priv->stats.multicast += ioread8(ioaddr + ME_CNT0);
|
||||
dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
|
||||
dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
return &priv->stats;
|
||||
return &dev->stats;
|
||||
}
|
||||
|
||||
/* Stop RDC MAC and Free the allocated resource */
|
||||
|
@ -293,7 +383,6 @@ static void r6040_down(struct net_device *dev)
|
|||
struct r6040_private *lp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = lp->base;
|
||||
struct pci_dev *pdev = lp->pdev;
|
||||
int i;
|
||||
int limit = 2048;
|
||||
u16 *adrp;
|
||||
u16 cmd;
|
||||
|
@ -313,27 +402,12 @@ static void r6040_down(struct net_device *dev)
|
|||
iowrite16(adrp[1], ioaddr + MID_0M);
|
||||
iowrite16(adrp[2], ioaddr + MID_0H);
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
/* Free RX buffer */
|
||||
for (i = 0; i < RX_DCNT; i++) {
|
||||
if (lp->rx_insert_ptr->skb_ptr) {
|
||||
pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf,
|
||||
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
|
||||
lp->rx_insert_ptr->skb_ptr = NULL;
|
||||
}
|
||||
lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
|
||||
}
|
||||
r6040_free_rxbufs(dev);
|
||||
|
||||
/* Free TX buffer */
|
||||
for (i = 0; i < TX_DCNT; i++) {
|
||||
if (lp->tx_insert_ptr->skb_ptr) {
|
||||
pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf,
|
||||
MAX_BUF_SIZE, PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
|
||||
lp->rx_insert_ptr->skb_ptr = NULL;
|
||||
}
|
||||
lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
|
||||
}
|
||||
r6040_free_txbufs(dev);
|
||||
|
||||
/* Free Descriptor memory */
|
||||
pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
|
||||
|
@ -432,19 +506,24 @@ static int r6040_rx(struct net_device *dev, int limit)
|
|||
|
||||
/* Check for errors */
|
||||
err = ioread16(ioaddr + MLSR);
|
||||
if (err & 0x0400) priv->stats.rx_errors++;
|
||||
if (err & 0x0400)
|
||||
dev->stats.rx_errors++;
|
||||
/* RX FIFO over-run */
|
||||
if (err & 0x8000) priv->stats.rx_fifo_errors++;
|
||||
if (err & 0x8000)
|
||||
dev->stats.rx_fifo_errors++;
|
||||
/* RX descriptor unavailable */
|
||||
if (err & 0x0080) priv->stats.rx_frame_errors++;
|
||||
if (err & 0x0080)
|
||||
dev->stats.rx_frame_errors++;
|
||||
/* Received packet with length over buffer lenght */
|
||||
if (err & 0x0020) priv->stats.rx_over_errors++;
|
||||
if (err & 0x0020)
|
||||
dev->stats.rx_over_errors++;
|
||||
/* Received packet with too long or short */
|
||||
if (err & (0x0010|0x0008)) priv->stats.rx_length_errors++;
|
||||
if (err & (0x0010 | 0x0008))
|
||||
dev->stats.rx_length_errors++;
|
||||
/* Received packet with CRC errors */
|
||||
if (err & 0x0004) {
|
||||
spin_lock(&priv->lock);
|
||||
priv->stats.rx_crc_errors++;
|
||||
dev->stats.rx_crc_errors++;
|
||||
spin_unlock(&priv->lock);
|
||||
}
|
||||
|
||||
|
@ -469,8 +548,8 @@ static int r6040_rx(struct net_device *dev, int limit)
|
|||
/* Send to upper layer */
|
||||
netif_receive_skb(skb_ptr);
|
||||
dev->last_rx = jiffies;
|
||||
priv->dev->stats.rx_packets++;
|
||||
priv->dev->stats.rx_bytes += descptr->len;
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += descptr->len;
|
||||
/* To next descriptor */
|
||||
descptr = descptr->vndescp;
|
||||
priv->rx_free_desc--;
|
||||
|
@ -498,11 +577,13 @@ static void r6040_tx(struct net_device *dev)
|
|||
/* Check for errors */
|
||||
err = ioread16(ioaddr + MLSR);
|
||||
|
||||
if (err & 0x0200) priv->stats.rx_fifo_errors++;
|
||||
if (err & (0x2000 | 0x4000)) priv->stats.tx_carrier_errors++;
|
||||
if (err & 0x0200)
|
||||
dev->stats.rx_fifo_errors++;
|
||||
if (err & (0x2000 | 0x4000))
|
||||
dev->stats.tx_carrier_errors++;
|
||||
|
||||
if (descptr->status & 0x8000)
|
||||
break; /* Not complte */
|
||||
break; /* Not complete */
|
||||
skb_ptr = descptr->skb_ptr;
|
||||
pci_unmap_single(priv->pdev, descptr->buf,
|
||||
skb_ptr->len, PCI_DMA_TODEVICE);
|
||||
|
@ -545,7 +626,6 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
|
|||
struct r6040_private *lp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = lp->base;
|
||||
u16 status;
|
||||
int handled = 1;
|
||||
|
||||
/* Mask off RDC MAC interrupt */
|
||||
iowrite16(MSK_INT, ioaddr + MIER);
|
||||
|
@ -565,7 +645,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
|
|||
if (status & 0x10)
|
||||
r6040_tx(dev);
|
||||
|
||||
return IRQ_RETVAL(handled);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
@ -577,53 +657,15 @@ static void r6040_poll_controller(struct net_device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
|
||||
dma_addr_t desc_dma, int size)
|
||||
{
|
||||
struct r6040_descriptor *desc = desc_ring;
|
||||
dma_addr_t mapping = desc_dma;
|
||||
|
||||
while (size-- > 0) {
|
||||
mapping += sizeof(sizeof(*desc));
|
||||
desc->ndesc = cpu_to_le32(mapping);
|
||||
desc->vndescp = desc + 1;
|
||||
desc++;
|
||||
}
|
||||
desc--;
|
||||
desc->ndesc = cpu_to_le32(desc_dma);
|
||||
desc->vndescp = desc_ring;
|
||||
}
|
||||
|
||||
/* Init RDC MAC */
|
||||
static void r6040_up(struct net_device *dev)
|
||||
{
|
||||
struct r6040_private *lp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = lp->base;
|
||||
|
||||
/* Initialize */
|
||||
lp->tx_free_desc = TX_DCNT;
|
||||
lp->rx_free_desc = 0;
|
||||
/* Init descriptor */
|
||||
lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
|
||||
lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
|
||||
/* Init TX descriptor */
|
||||
r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
|
||||
|
||||
/* Init RX descriptor */
|
||||
r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
|
||||
|
||||
/* Allocate buffer for RX descriptor */
|
||||
rx_buf_alloc(lp, dev);
|
||||
|
||||
/*
|
||||
* TX and RX descriptor start registers.
|
||||
* Lower 16-bits to MxD_SA0. Higher 16-bits to MxD_SA1.
|
||||
*/
|
||||
iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
|
||||
iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
|
||||
|
||||
iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
|
||||
iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
|
||||
/* Initialise and alloc RX/TX buffers */
|
||||
r6040_alloc_txbufs(dev);
|
||||
r6040_alloc_rxbufs(dev);
|
||||
|
||||
/* Buffer Size Register */
|
||||
iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
|
||||
|
@ -689,8 +731,7 @@ static void r6040_timer(unsigned long data)
|
|||
}
|
||||
|
||||
/* Timer active again */
|
||||
lp->timer.expires = TIMER_WUT;
|
||||
add_timer(&lp->timer);
|
||||
mod_timer(&lp->timer, jiffies + round_jiffies(HZ));
|
||||
}
|
||||
|
||||
/* Read/set MAC address routines */
|
||||
|
@ -746,14 +787,10 @@ static int r6040_open(struct net_device *dev)
|
|||
napi_enable(&lp->napi);
|
||||
netif_start_queue(dev);
|
||||
|
||||
if (lp->switch_sig != ICPLUS_PHY_ID) {
|
||||
/* set and active a timer process */
|
||||
init_timer(&lp->timer);
|
||||
lp->timer.expires = TIMER_WUT;
|
||||
lp->timer.data = (unsigned long)dev;
|
||||
lp->timer.function = &r6040_timer;
|
||||
add_timer(&lp->timer);
|
||||
}
|
||||
/* set and active a timer process */
|
||||
setup_timer(&lp->timer, r6040_timer, (unsigned long) dev);
|
||||
if (lp->switch_sig != ICPLUS_PHY_ID)
|
||||
mod_timer(&lp->timer, jiffies + HZ);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1630,7 +1630,8 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
|
|||
SIS_PCI_COMMIT();
|
||||
}
|
||||
|
||||
static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
|
||||
static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
|
||||
struct net_device *dev)
|
||||
{
|
||||
u8 from;
|
||||
|
||||
|
|
|
@ -114,11 +114,20 @@ do { \
|
|||
debug_event(claw_dbf_##name,level,(void*)(addr),len); \
|
||||
} while (0)
|
||||
|
||||
/* Allow to sort out low debug levels early to avoid wasted sprints */
|
||||
static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
|
||||
{
|
||||
return (level <= dbf_grp->level);
|
||||
}
|
||||
|
||||
#define CLAW_DBF_TEXT_(level,name,text...) \
|
||||
do { \
|
||||
sprintf(debug_buffer, text); \
|
||||
debug_text_event(claw_dbf_##name,level, debug_buffer);\
|
||||
} while (0)
|
||||
do { \
|
||||
if (claw_dbf_passes(claw_dbf_##name, level)) { \
|
||||
sprintf(debug_buffer, text); \
|
||||
debug_text_event(claw_dbf_##name, level, \
|
||||
debug_buffer); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*******************************************************
|
||||
* Define Control Blocks *
|
||||
|
@ -278,8 +287,6 @@ struct claw_env {
|
|||
__u16 write_size; /* write buffer size */
|
||||
__u16 dev_id; /* device ident */
|
||||
__u8 packing; /* are we packing? */
|
||||
volatile __u8 queme_switch; /* gate for imed packing */
|
||||
volatile unsigned long pk_delay; /* Delay for adaptive packing */
|
||||
__u8 in_use; /* device active flag */
|
||||
struct net_device *ndev; /* backward ptr to the net dev*/
|
||||
};
|
||||
|
|
|
@ -94,7 +94,7 @@ static int
|
|||
lcs_register_debug_facility(void)
|
||||
{
|
||||
lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
|
||||
lcs_dbf_trace = debug_register("lcs_trace", 2, 2, 8);
|
||||
lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
|
||||
if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
|
||||
PRINT_ERR("Not enough memory for debug facility.\n");
|
||||
lcs_unregister_debug_facility();
|
||||
|
|
|
@ -16,11 +16,19 @@ do { \
|
|||
debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
|
||||
} while (0)
|
||||
|
||||
/* Allow to sort out low debug levels early to avoid wasted sprints */
|
||||
static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
|
||||
{
|
||||
return (level <= dbf_grp->level);
|
||||
}
|
||||
|
||||
#define LCS_DBF_TEXT_(level,name,text...) \
|
||||
do { \
|
||||
sprintf(debug_buffer, text); \
|
||||
debug_text_event(lcs_dbf_##name,level, debug_buffer);\
|
||||
} while (0)
|
||||
do { \
|
||||
if (lcs_dbf_passes(lcs_dbf_##name, level)) { \
|
||||
sprintf(debug_buffer, text); \
|
||||
debug_text_event(lcs_dbf_##name, level, debug_buffer); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* sysfs related stuff
|
||||
|
|
|
@ -97,12 +97,22 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
|
|||
|
||||
DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
|
||||
|
||||
#define IUCV_DBF_TEXT_(name,level,text...) \
|
||||
do { \
|
||||
char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
|
||||
sprintf(iucv_dbf_txt_buf, text); \
|
||||
debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
|
||||
put_cpu_var(iucv_dbf_txt_buf); \
|
||||
/* Allow to sort out low debug levels early to avoid wasted sprints */
|
||||
static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
|
||||
{
|
||||
return (level <= dbf_grp->level);
|
||||
}
|
||||
|
||||
#define IUCV_DBF_TEXT_(name, level, text...) \
|
||||
do { \
|
||||
if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
|
||||
char* iucv_dbf_txt_buf = \
|
||||
get_cpu_var(iucv_dbf_txt_buf); \
|
||||
sprintf(iucv_dbf_txt_buf, text); \
|
||||
debug_text_event(iucv_dbf_##name, level, \
|
||||
iucv_dbf_txt_buf); \
|
||||
put_cpu_var(iucv_dbf_txt_buf); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define IUCV_DBF_SPRINTF(name,level,text...) \
|
||||
|
@ -137,6 +147,7 @@ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
|
|||
#define PRINTK_HEADER " iucv: " /* for debugging */
|
||||
|
||||
static struct device_driver netiucv_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "netiucv",
|
||||
.bus = &iucv_bus,
|
||||
};
|
||||
|
@ -572,9 +583,9 @@ static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
|
|||
}
|
||||
|
||||
/**
|
||||
* Dummy NOP action for all statemachines
|
||||
* NOP action for statemachines
|
||||
*/
|
||||
static void fsm_action_nop(fsm_instance *fi, int event, void *arg)
|
||||
static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1110,7 +1121,7 @@ static const fsm_node dev_fsm[] = {
|
|||
|
||||
{ DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
|
||||
{ DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
|
||||
{ DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
|
||||
{ DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
|
||||
};
|
||||
|
||||
static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
#define DM9000_PLATF_8BITONLY (0x0001)
|
||||
#define DM9000_PLATF_16BITONLY (0x0002)
|
||||
#define DM9000_PLATF_32BITONLY (0x0004)
|
||||
#define DM9000_PLATF_EXT_PHY (0x0008)
|
||||
#define DM9000_PLATF_NO_EEPROM (0x0010)
|
||||
|
||||
/* platfrom data for platfrom device structure's platfrom_data field */
|
||||
|
||||
|
|
|
@ -604,6 +604,10 @@ struct net_device
|
|||
|
||||
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
|
||||
|
||||
/* ingress path synchronizer */
|
||||
spinlock_t ingress_lock;
|
||||
struct Qdisc *qdisc_ingress;
|
||||
|
||||
/*
|
||||
* Cache line mostly used on queue transmit path (qdisc)
|
||||
*/
|
||||
|
@ -617,10 +621,6 @@ struct net_device
|
|||
/* Partially transmitted GSO packet. */
|
||||
struct sk_buff *gso_skb;
|
||||
|
||||
/* ingress path synchronizer */
|
||||
spinlock_t ingress_lock;
|
||||
struct Qdisc *qdisc_ingress;
|
||||
|
||||
/*
|
||||
* One part is mostly used on xmit path (device)
|
||||
*/
|
||||
|
|
|
@ -324,6 +324,7 @@ extern void ax25_dama_on(ax25_cb *);
|
|||
extern void ax25_dama_off(ax25_cb *);
|
||||
|
||||
/* ax25_ds_timer.c */
|
||||
extern void ax25_ds_setup_timer(ax25_dev *);
|
||||
extern void ax25_ds_set_timer(ax25_dev *);
|
||||
extern void ax25_ds_del_timer(ax25_dev *);
|
||||
extern void ax25_ds_timer(ax25_cb *);
|
||||
|
@ -416,6 +417,7 @@ extern void ax25_calculate_rtt(ax25_cb *);
|
|||
extern void ax25_disconnect(ax25_cb *, int);
|
||||
|
||||
/* ax25_timer.c */
|
||||
extern void ax25_setup_timers(ax25_cb *);
|
||||
extern void ax25_start_heartbeat(ax25_cb *);
|
||||
extern void ax25_start_t1timer(ax25_cb *);
|
||||
extern void ax25_start_t2timer(ax25_cb *);
|
||||
|
|
|
@ -103,7 +103,6 @@ extern void ndisc_send_redirect(struct sk_buff *skb,
|
|||
extern int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir);
|
||||
|
||||
|
||||
struct rt6_info * dflt_rt_lookup(void);
|
||||
|
||||
/*
|
||||
* IGMP
|
||||
|
|
|
@ -508,7 +508,10 @@ struct xfrm_skb_cb {
|
|||
} header;
|
||||
|
||||
/* Sequence number for replay protection. */
|
||||
u64 seq;
|
||||
union {
|
||||
u64 output;
|
||||
__be32 input;
|
||||
} seq;
|
||||
};
|
||||
|
||||
#define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
|
||||
|
|
|
@ -510,11 +510,7 @@ ax25_cb *ax25_create_cb(void)
|
|||
skb_queue_head_init(&ax25->ack_queue);
|
||||
skb_queue_head_init(&ax25->reseq_queue);
|
||||
|
||||
init_timer(&ax25->timer);
|
||||
init_timer(&ax25->t1timer);
|
||||
init_timer(&ax25->t2timer);
|
||||
init_timer(&ax25->t3timer);
|
||||
init_timer(&ax25->idletimer);
|
||||
ax25_setup_timers(ax25);
|
||||
|
||||
ax25_fillin_cb(ax25, NULL);
|
||||
|
||||
|
@ -1928,12 +1924,10 @@ static int ax25_info_show(struct seq_file *seq, void *v)
|
|||
ax25->paclen);
|
||||
|
||||
if (ax25->sk != NULL) {
|
||||
bh_lock_sock(ax25->sk);
|
||||
seq_printf(seq," %d %d %ld\n",
|
||||
seq_printf(seq, " %d %d %lu\n",
|
||||
atomic_read(&ax25->sk->sk_wmem_alloc),
|
||||
atomic_read(&ax25->sk->sk_rmem_alloc),
|
||||
ax25->sk->sk_socket != NULL ? SOCK_INODE(ax25->sk->sk_socket)->i_ino : 0L);
|
||||
bh_unlock_sock(ax25->sk);
|
||||
sock_i_ino(ax25->sk));
|
||||
} else {
|
||||
seq_puts(seq, " * * *\n");
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ void ax25_dev_device_up(struct net_device *dev)
|
|||
ax25_dev->values[AX25_VALUES_DS_TIMEOUT]= AX25_DEF_DS_TIMEOUT;
|
||||
|
||||
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
|
||||
init_timer(&ax25_dev->dama.slave_timer);
|
||||
ax25_ds_setup_timer(ax25_dev);
|
||||
#endif
|
||||
|
||||
spin_lock_bh(&ax25_dev_lock);
|
||||
|
|
|
@ -40,13 +40,10 @@ static void ax25_ds_timeout(unsigned long);
|
|||
* 1/10th of a second.
|
||||
*/
|
||||
|
||||
static void ax25_ds_add_timer(ax25_dev *ax25_dev)
|
||||
void ax25_ds_setup_timer(ax25_dev *ax25_dev)
|
||||
{
|
||||
struct timer_list *t = &ax25_dev->dama.slave_timer;
|
||||
t->data = (unsigned long) ax25_dev;
|
||||
t->function = &ax25_ds_timeout;
|
||||
t->expires = jiffies + HZ;
|
||||
add_timer(t);
|
||||
setup_timer(&ax25_dev->dama.slave_timer, ax25_ds_timeout,
|
||||
(unsigned long)ax25_dev);
|
||||
}
|
||||
|
||||
void ax25_ds_del_timer(ax25_dev *ax25_dev)
|
||||
|
@ -60,10 +57,9 @@ void ax25_ds_set_timer(ax25_dev *ax25_dev)
|
|||
if (ax25_dev == NULL) /* paranoia */
|
||||
return;
|
||||
|
||||
del_timer(&ax25_dev->dama.slave_timer);
|
||||
ax25_dev->dama.slave_timeout =
|
||||
msecs_to_jiffies(ax25_dev->values[AX25_VALUES_DS_TIMEOUT]) / 10;
|
||||
ax25_ds_add_timer(ax25_dev);
|
||||
mod_timer(&ax25_dev->dama.slave_timer, jiffies + HZ);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -45,7 +45,7 @@ void ax25_rt_device_down(struct net_device *dev)
|
|||
{
|
||||
ax25_route *s, *t, *ax25_rt;
|
||||
|
||||
write_lock(&ax25_route_lock);
|
||||
write_lock_bh(&ax25_route_lock);
|
||||
ax25_rt = ax25_route_list;
|
||||
while (ax25_rt != NULL) {
|
||||
s = ax25_rt;
|
||||
|
@ -68,7 +68,7 @@ void ax25_rt_device_down(struct net_device *dev)
|
|||
}
|
||||
}
|
||||
}
|
||||
write_unlock(&ax25_route_lock);
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
}
|
||||
|
||||
static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
||||
|
@ -82,7 +82,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
|||
if (route->digi_count > AX25_MAX_DIGIS)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock(&ax25_route_lock);
|
||||
write_lock_bh(&ax25_route_lock);
|
||||
|
||||
ax25_rt = ax25_route_list;
|
||||
while (ax25_rt != NULL) {
|
||||
|
@ -92,7 +92,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
|||
ax25_rt->digipeat = NULL;
|
||||
if (route->digi_count != 0) {
|
||||
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
|
||||
write_unlock(&ax25_route_lock);
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ax25_rt->digipeat->lastrepeat = -1;
|
||||
|
@ -102,14 +102,14 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
|||
ax25_rt->digipeat->calls[i] = route->digi_addr[i];
|
||||
}
|
||||
}
|
||||
write_unlock(&ax25_route_lock);
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
return 0;
|
||||
}
|
||||
ax25_rt = ax25_rt->next;
|
||||
}
|
||||
|
||||
if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) {
|
||||
write_unlock(&ax25_route_lock);
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
|||
ax25_rt->ip_mode = ' ';
|
||||
if (route->digi_count != 0) {
|
||||
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
|
||||
write_unlock(&ax25_route_lock);
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
kfree(ax25_rt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
|
|||
}
|
||||
ax25_rt->next = ax25_route_list;
|
||||
ax25_route_list = ax25_rt;
|
||||
write_unlock(&ax25_route_lock);
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route)
|
|||
if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock(&ax25_route_lock);
|
||||
write_lock_bh(&ax25_route_lock);
|
||||
|
||||
ax25_rt = ax25_route_list;
|
||||
while (ax25_rt != NULL) {
|
||||
|
@ -174,7 +174,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route)
|
|||
}
|
||||
}
|
||||
}
|
||||
write_unlock(&ax25_route_lock);
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option)
|
|||
if ((ax25_dev = ax25_addr_ax25dev(&rt_option->port_addr)) == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock(&ax25_route_lock);
|
||||
write_lock_bh(&ax25_route_lock);
|
||||
|
||||
ax25_rt = ax25_route_list;
|
||||
while (ax25_rt != NULL) {
|
||||
|
@ -216,7 +216,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option)
|
|||
}
|
||||
|
||||
out:
|
||||
write_unlock(&ax25_route_lock);
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -492,7 +492,7 @@ void __exit ax25_rt_free(void)
|
|||
{
|
||||
ax25_route *s, *ax25_rt = ax25_route_list;
|
||||
|
||||
write_lock(&ax25_route_lock);
|
||||
write_lock_bh(&ax25_route_lock);
|
||||
while (ax25_rt != NULL) {
|
||||
s = ax25_rt;
|
||||
ax25_rt = ax25_rt->next;
|
||||
|
@ -500,5 +500,5 @@ void __exit ax25_rt_free(void)
|
|||
kfree(s->digipeat);
|
||||
kfree(s);
|
||||
}
|
||||
write_unlock(&ax25_route_lock);
|
||||
write_unlock_bh(&ax25_route_lock);
|
||||
}
|
||||
|
|
|
@ -40,63 +40,45 @@ static void ax25_t2timer_expiry(unsigned long);
|
|||
static void ax25_t3timer_expiry(unsigned long);
|
||||
static void ax25_idletimer_expiry(unsigned long);
|
||||
|
||||
void ax25_setup_timers(ax25_cb *ax25)
|
||||
{
|
||||
setup_timer(&ax25->timer, ax25_heartbeat_expiry, (unsigned long)ax25);
|
||||
setup_timer(&ax25->t1timer, ax25_t1timer_expiry, (unsigned long)ax25);
|
||||
setup_timer(&ax25->t2timer, ax25_t2timer_expiry, (unsigned long)ax25);
|
||||
setup_timer(&ax25->t3timer, ax25_t3timer_expiry, (unsigned long)ax25);
|
||||
setup_timer(&ax25->idletimer, ax25_idletimer_expiry,
|
||||
(unsigned long)ax25);
|
||||
}
|
||||
|
||||
void ax25_start_heartbeat(ax25_cb *ax25)
|
||||
{
|
||||
del_timer(&ax25->timer);
|
||||
|
||||
ax25->timer.data = (unsigned long)ax25;
|
||||
ax25->timer.function = &ax25_heartbeat_expiry;
|
||||
ax25->timer.expires = jiffies + 5 * HZ;
|
||||
|
||||
add_timer(&ax25->timer);
|
||||
mod_timer(&ax25->timer, jiffies + 5 * HZ);
|
||||
}
|
||||
|
||||
void ax25_start_t1timer(ax25_cb *ax25)
|
||||
{
|
||||
del_timer(&ax25->t1timer);
|
||||
|
||||
ax25->t1timer.data = (unsigned long)ax25;
|
||||
ax25->t1timer.function = &ax25_t1timer_expiry;
|
||||
ax25->t1timer.expires = jiffies + ax25->t1;
|
||||
|
||||
add_timer(&ax25->t1timer);
|
||||
mod_timer(&ax25->t1timer, jiffies + ax25->t1);
|
||||
}
|
||||
|
||||
void ax25_start_t2timer(ax25_cb *ax25)
|
||||
{
|
||||
del_timer(&ax25->t2timer);
|
||||
|
||||
ax25->t2timer.data = (unsigned long)ax25;
|
||||
ax25->t2timer.function = &ax25_t2timer_expiry;
|
||||
ax25->t2timer.expires = jiffies + ax25->t2;
|
||||
|
||||
add_timer(&ax25->t2timer);
|
||||
mod_timer(&ax25->t2timer, jiffies + ax25->t2);
|
||||
}
|
||||
|
||||
void ax25_start_t3timer(ax25_cb *ax25)
|
||||
{
|
||||
del_timer(&ax25->t3timer);
|
||||
|
||||
if (ax25->t3 > 0) {
|
||||
ax25->t3timer.data = (unsigned long)ax25;
|
||||
ax25->t3timer.function = &ax25_t3timer_expiry;
|
||||
ax25->t3timer.expires = jiffies + ax25->t3;
|
||||
|
||||
add_timer(&ax25->t3timer);
|
||||
}
|
||||
if (ax25->t3 > 0)
|
||||
mod_timer(&ax25->t3timer, jiffies + ax25->t3);
|
||||
else
|
||||
del_timer(&ax25->t3timer);
|
||||
}
|
||||
|
||||
void ax25_start_idletimer(ax25_cb *ax25)
|
||||
{
|
||||
del_timer(&ax25->idletimer);
|
||||
|
||||
if (ax25->idle > 0) {
|
||||
ax25->idletimer.data = (unsigned long)ax25;
|
||||
ax25->idletimer.function = &ax25_idletimer_expiry;
|
||||
ax25->idletimer.expires = jiffies + ax25->idle;
|
||||
|
||||
add_timer(&ax25->idletimer);
|
||||
}
|
||||
if (ax25->idle > 0)
|
||||
mod_timer(&ax25->idletimer, jiffies + ax25->idle);
|
||||
else
|
||||
del_timer(&ax25->idletimer);
|
||||
}
|
||||
|
||||
void ax25_stop_heartbeat(ax25_cb *ax25)
|
||||
|
|
|
@ -1071,8 +1071,6 @@ int dev_close(struct net_device *dev)
|
|||
*/
|
||||
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
|
||||
|
||||
dev_deactivate(dev);
|
||||
|
||||
clear_bit(__LINK_STATE_START, &dev->state);
|
||||
|
||||
/* Synchronize to scheduled poll. We cannot touch poll list,
|
||||
|
@ -1083,6 +1081,8 @@ int dev_close(struct net_device *dev)
|
|||
*/
|
||||
smp_mb__after_clear_bit(); /* Commit netif_running(). */
|
||||
|
||||
dev_deactivate(dev);
|
||||
|
||||
/*
|
||||
* Call the device specific close. This cannot fail.
|
||||
* Only if device is UP
|
||||
|
|
|
@ -834,18 +834,12 @@ static void neigh_timer_handler(unsigned long arg)
|
|||
}
|
||||
if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
|
||||
struct sk_buff *skb = skb_peek(&neigh->arp_queue);
|
||||
/* keep skb alive even if arp_queue overflows */
|
||||
if (skb)
|
||||
skb_get(skb);
|
||||
write_unlock(&neigh->lock);
|
||||
|
||||
neigh->ops->solicit(neigh, skb);
|
||||
atomic_inc(&neigh->probes);
|
||||
if (skb)
|
||||
kfree_skb(skb);
|
||||
} else {
|
||||
out:
|
||||
write_unlock(&neigh->lock);
|
||||
}
|
||||
out:
|
||||
write_unlock(&neigh->lock);
|
||||
|
||||
if (notify)
|
||||
neigh_update_notify(neigh);
|
||||
|
|
|
@ -504,7 +504,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
|
|||
|
||||
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
|
||||
|
||||
static void set_operstate(struct net_device *dev, unsigned char transition)
|
||||
static int set_operstate(struct net_device *dev, unsigned char transition, bool send_notification)
|
||||
{
|
||||
unsigned char operstate = dev->operstate;
|
||||
|
||||
|
@ -527,8 +527,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
|
|||
write_lock_bh(&dev_base_lock);
|
||||
dev->operstate = operstate;
|
||||
write_unlock_bh(&dev_base_lock);
|
||||
netdev_state_change(dev);
|
||||
}
|
||||
|
||||
if (send_notification)
|
||||
netdev_state_change(dev);
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
|
||||
|
@ -822,6 +826,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
|
|||
if (tb[IFLA_BROADCAST]) {
|
||||
nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
|
||||
send_addr_notify = 1;
|
||||
modified = 1;
|
||||
}
|
||||
|
||||
if (ifm->ifi_flags || ifm->ifi_change) {
|
||||
|
@ -834,16 +839,23 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
|
|||
dev_change_flags(dev, flags);
|
||||
}
|
||||
|
||||
if (tb[IFLA_TXQLEN])
|
||||
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
|
||||
if (tb[IFLA_TXQLEN]) {
|
||||
if (dev->tx_queue_len != nla_get_u32(tb[IFLA_TXQLEN])) {
|
||||
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
|
||||
modified = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (tb[IFLA_OPERSTATE])
|
||||
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
|
||||
modified |= set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]), false);
|
||||
|
||||
if (tb[IFLA_LINKMODE]) {
|
||||
write_lock_bh(&dev_base_lock);
|
||||
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
|
||||
write_unlock_bh(&dev_base_lock);
|
||||
if (dev->link_mode != nla_get_u8(tb[IFLA_LINKMODE])) {
|
||||
write_lock_bh(&dev_base_lock);
|
||||
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
|
||||
write_lock_bh(&dev_base_lock);
|
||||
modified = 1;
|
||||
}
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
@ -857,6 +869,10 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
|
|||
|
||||
if (send_addr_notify)
|
||||
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
|
||||
|
||||
if (modified)
|
||||
netdev_state_change(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -974,7 +990,7 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
|
|||
if (tb[IFLA_TXQLEN])
|
||||
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
|
||||
if (tb[IFLA_OPERSTATE])
|
||||
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
|
||||
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]), true);
|
||||
if (tb[IFLA_LINKMODE])
|
||||
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
|
||||
|
||||
|
|
|
@ -2106,11 +2106,10 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
|
|||
/**
|
||||
* skb_pull_rcsum - pull skb and update receive checksum
|
||||
* @skb: buffer to update
|
||||
* @start: start of data before pull
|
||||
* @len: length of data pulled
|
||||
*
|
||||
* This function performs an skb_pull on the packet and updates
|
||||
* update the CHECKSUM_COMPLETE checksum. It should be used on
|
||||
* the CHECKSUM_COMPLETE checksum. It should be used on
|
||||
* receive path processing instead of skb_pull unless you know
|
||||
* that the checksum difference is zero (e.g., a valid IP header)
|
||||
* or you are setting ip_summed to CHECKSUM_NONE.
|
||||
|
|
|
@ -96,7 +96,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
|
||||
ah->reserved = 0;
|
||||
ah->spi = x->id.spi;
|
||||
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
|
||||
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
|
||||
|
||||
spin_lock_bh(&x->lock);
|
||||
err = ah_mac_digest(ahp, skb, ah->auth_data);
|
||||
|
|
|
@ -368,7 +368,6 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
|
|||
if (!(neigh->nud_state&NUD_VALID))
|
||||
printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n");
|
||||
dst_ha = neigh->ha;
|
||||
read_lock_bh(&neigh->lock);
|
||||
} else if ((probes -= neigh->parms->app_probes) < 0) {
|
||||
#ifdef CONFIG_ARPD
|
||||
neigh_app_ns(neigh);
|
||||
|
@ -378,8 +377,6 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
|
|||
|
||||
arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
|
||||
dst_ha, dev->dev_addr, NULL);
|
||||
if (dst_ha)
|
||||
read_unlock_bh(&neigh->lock);
|
||||
}
|
||||
|
||||
static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
|
||||
|
|
|
@ -199,7 +199,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
esph->spi = x->id.spi;
|
||||
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
|
||||
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
|
||||
|
||||
sg_init_table(sg, nfrags);
|
||||
skb_to_sgvec(skb, sg,
|
||||
|
@ -210,7 +210,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
|
||||
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
|
||||
aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
|
||||
aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq);
|
||||
aead_givcrypt_set_giv(req, esph->enc_data,
|
||||
XFRM_SKB_CB(skb)->seq.output);
|
||||
|
||||
ESP_SKB_CB(skb)->tmp = tmp;
|
||||
err = crypto_aead_givencrypt(req);
|
||||
|
|
|
@ -1762,11 +1762,9 @@ static struct leaf *trie_leafindex(struct trie *t, int index)
|
|||
{
|
||||
struct leaf *l = trie_firstleaf(t);
|
||||
|
||||
while (index-- > 0) {
|
||||
while (l && index-- > 0)
|
||||
l = trie_nextleaf(l);
|
||||
if (!l)
|
||||
break;
|
||||
}
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
|
@ -2461,6 +2459,84 @@ static const struct file_operations fib_trie_fops = {
|
|||
.release = seq_release_net,
|
||||
};
|
||||
|
||||
struct fib_route_iter {
|
||||
struct seq_net_private p;
|
||||
struct trie *main_trie;
|
||||
loff_t pos;
|
||||
t_key key;
|
||||
};
|
||||
|
||||
static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
|
||||
{
|
||||
struct leaf *l = NULL;
|
||||
struct trie *t = iter->main_trie;
|
||||
|
||||
/* use cache location of last found key */
|
||||
if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
|
||||
pos -= iter->pos;
|
||||
else {
|
||||
iter->pos = 0;
|
||||
l = trie_firstleaf(t);
|
||||
}
|
||||
|
||||
while (l && pos-- > 0) {
|
||||
iter->pos++;
|
||||
l = trie_nextleaf(l);
|
||||
}
|
||||
|
||||
if (l)
|
||||
iter->key = pos; /* remember it */
|
||||
else
|
||||
iter->pos = 0; /* forget it */
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
__acquires(RCU)
|
||||
{
|
||||
struct fib_route_iter *iter = seq->private;
|
||||
struct fib_table *tb;
|
||||
|
||||
rcu_read_lock();
|
||||
tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
|
||||
if (!tb)
|
||||
return NULL;
|
||||
|
||||
iter->main_trie = (struct trie *) tb->tb_data;
|
||||
if (*pos == 0)
|
||||
return SEQ_START_TOKEN;
|
||||
else
|
||||
return fib_route_get_idx(iter, *pos - 1);
|
||||
}
|
||||
|
||||
static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct fib_route_iter *iter = seq->private;
|
||||
struct leaf *l = v;
|
||||
|
||||
++*pos;
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
iter->pos = 0;
|
||||
l = trie_firstleaf(iter->main_trie);
|
||||
} else {
|
||||
iter->pos++;
|
||||
l = trie_nextleaf(l);
|
||||
}
|
||||
|
||||
if (l)
|
||||
iter->key = l->key;
|
||||
else
|
||||
iter->pos = 0;
|
||||
return l;
|
||||
}
|
||||
|
||||
static void fib_route_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(RCU)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
|
||||
{
|
||||
static unsigned type2flags[RTN_MAX + 1] = {
|
||||
|
@ -2484,7 +2560,6 @@ static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
|
|||
*/
|
||||
static int fib_route_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
const struct fib_trie_iter *iter = seq->private;
|
||||
struct leaf *l = v;
|
||||
struct leaf_info *li;
|
||||
struct hlist_node *node;
|
||||
|
@ -2496,12 +2571,6 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (iter->trie == iter->trie_local)
|
||||
return 0;
|
||||
|
||||
if (IS_TNODE(l))
|
||||
return 0;
|
||||
|
||||
hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
|
||||
struct fib_alias *fa;
|
||||
__be32 mask, prefix;
|
||||
|
@ -2544,16 +2613,16 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
|
|||
}
|
||||
|
||||
static const struct seq_operations fib_route_seq_ops = {
|
||||
.start = fib_trie_seq_start,
|
||||
.next = fib_trie_seq_next,
|
||||
.stop = fib_trie_seq_stop,
|
||||
.start = fib_route_seq_start,
|
||||
.next = fib_route_seq_next,
|
||||
.stop = fib_route_seq_stop,
|
||||
.show = fib_route_seq_show,
|
||||
};
|
||||
|
||||
static int fib_route_seq_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open_net(inode, file, &fib_route_seq_ops,
|
||||
sizeof(struct fib_trie_iter));
|
||||
sizeof(struct fib_route_iter));
|
||||
}
|
||||
|
||||
static const struct file_operations fib_route_fops = {
|
||||
|
|
|
@ -120,8 +120,6 @@ void inet_listen_wlock(struct inet_hashinfo *hashinfo)
|
|||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(inet_listen_wlock);
|
||||
|
||||
/*
|
||||
* Don't inline this cruft. Here are some nice properties to exploit here. The
|
||||
* BSD API does not allow a listening sock to specify the remote port nor the
|
||||
|
@ -494,7 +492,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
|||
return ret;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__inet_hash_connect);
|
||||
|
||||
/*
|
||||
* Bind a port for a connect operation and hash it.
|
||||
|
|
|
@ -514,11 +514,6 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
val &= ~3;
|
||||
val |= inet->tos & 3;
|
||||
}
|
||||
if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP &&
|
||||
!capable(CAP_NET_ADMIN)) {
|
||||
err = -EPERM;
|
||||
break;
|
||||
}
|
||||
if (inet->tos != val) {
|
||||
inet->tos = val;
|
||||
sk->sk_priority = rt_tos2priority(val);
|
||||
|
|
|
@ -283,7 +283,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
|
||||
ah->reserved = 0;
|
||||
ah->spi = x->id.spi;
|
||||
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
|
||||
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
|
||||
|
||||
spin_lock_bh(&x->lock);
|
||||
err = ah_mac_digest(ahp, skb, ah->auth_data);
|
||||
|
|
|
@ -188,7 +188,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
*skb_mac_header(skb) = IPPROTO_ESP;
|
||||
|
||||
esph->spi = x->id.spi;
|
||||
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
|
||||
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
|
||||
|
||||
sg_init_table(sg, nfrags);
|
||||
skb_to_sgvec(skb, sg,
|
||||
|
@ -199,7 +199,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
|
||||
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
|
||||
aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
|
||||
aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq);
|
||||
aead_givcrypt_set_giv(req, esph->enc_data,
|
||||
XFRM_SKB_CB(skb)->seq.output);
|
||||
|
||||
ESP_SKB_CB(skb)->tmp = tmp;
|
||||
err = crypto_aead_givencrypt(req);
|
||||
|
|
|
@ -621,7 +621,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
|||
* or if the skb it not generated by a local socket. (This last
|
||||
* check should be redundant, but it's free.)
|
||||
*/
|
||||
if (!np || np->pmtudisc >= IPV6_PMTUDISC_DO) {
|
||||
if (!skb->local_df) {
|
||||
skb->dev = skb->dst->dev;
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
|
||||
IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
|
||||
|
@ -1420,6 +1420,10 @@ int ip6_push_pending_frames(struct sock *sk)
|
|||
tmp_skb->sk = NULL;
|
||||
}
|
||||
|
||||
/* Allow local fragmentation. */
|
||||
if (np->pmtudisc < IPV6_PMTUDISC_DO)
|
||||
skb->local_df = 1;
|
||||
|
||||
ipv6_addr_copy(final_dst, &fl->fl6_dst);
|
||||
__skb_pull(skb, skb_network_header_len(skb));
|
||||
if (opt && opt->opt_flen)
|
||||
|
|
|
@ -36,7 +36,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
|
|||
if (mtu < IPV6_MIN_MTU)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
|
||||
if (skb->len > mtu) {
|
||||
if (!skb->local_df && skb->len > mtu) {
|
||||
skb->dev = dst->dev;
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
|
||||
ret = -EMSGSIZE;
|
||||
|
|
|
@ -2291,6 +2291,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
|
|||
return 0;
|
||||
|
||||
out:
|
||||
xp->dead = 1;
|
||||
xfrm_policy_destroy(xp);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -945,7 +945,7 @@ static int tcp_packet(struct nf_conn *ct,
|
|||
|
||||
ct->proto.tcp.state = new_state;
|
||||
if (old_state != new_state
|
||||
&& new_state == TCP_CONNTRACK_CLOSE)
|
||||
&& new_state == TCP_CONNTRACK_FIN_WAIT)
|
||||
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
|
||||
timeout = ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans
|
||||
&& tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans
|
||||
|
|
|
@ -111,7 +111,7 @@ secmark_tg_check(const char *tablename, const void *entry,
|
|||
return true;
|
||||
}
|
||||
|
||||
void secmark_tg_destroy(const struct xt_target *target, void *targinfo)
|
||||
static void secmark_tg_destroy(const struct xt_target *target, void *targinfo)
|
||||
{
|
||||
switch (mode) {
|
||||
case SECMARK_MODE_SEL:
|
||||
|
|
|
@ -150,11 +150,11 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
|
|||
entry = netlbl_domhsh_search(domain);
|
||||
if (entry == NULL) {
|
||||
entry = rcu_dereference(netlbl_domhsh_def);
|
||||
if (entry != NULL && entry->valid)
|
||||
return entry;
|
||||
if (entry != NULL && !entry->valid)
|
||||
entry = NULL;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return entry;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -180,6 +180,7 @@ static void netlbl_unlabel_audit_addr4(struct audit_buffer *audit_buf,
|
|||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
/**
|
||||
* netlbl_unlabel_audit_addr6 - Audit an IPv6 address
|
||||
* @audit_buf: audit buffer
|
||||
|
@ -213,6 +214,7 @@ static void netlbl_unlabel_audit_addr6(struct audit_buffer *audit_buf,
|
|||
audit_log_format(audit_buf, " src_prefixlen=%d", mask_len);
|
||||
}
|
||||
}
|
||||
#endif /* IPv6 */
|
||||
|
||||
/*
|
||||
* Unlabeled Connection Hash Table Functions
|
||||
|
@ -617,8 +619,6 @@ static int netlbl_unlhsh_add(struct net *net,
|
|||
int ifindex;
|
||||
struct net_device *dev;
|
||||
struct netlbl_unlhsh_iface *iface;
|
||||
struct in_addr *addr4, *mask4;
|
||||
struct in6_addr *addr6, *mask6;
|
||||
struct audit_buffer *audit_buf = NULL;
|
||||
char *secctx = NULL;
|
||||
u32 secctx_len;
|
||||
|
@ -651,7 +651,9 @@ static int netlbl_unlhsh_add(struct net *net,
|
|||
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCADD,
|
||||
audit_info);
|
||||
switch (addr_len) {
|
||||
case sizeof(struct in_addr):
|
||||
case sizeof(struct in_addr): {
|
||||
struct in_addr *addr4, *mask4;
|
||||
|
||||
addr4 = (struct in_addr *)addr;
|
||||
mask4 = (struct in_addr *)mask;
|
||||
ret_val = netlbl_unlhsh_add_addr4(iface, addr4, mask4, secid);
|
||||
|
@ -661,8 +663,11 @@ static int netlbl_unlhsh_add(struct net *net,
|
|||
addr4->s_addr,
|
||||
mask4->s_addr);
|
||||
break;
|
||||
}
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
case sizeof(struct in6_addr):
|
||||
case sizeof(struct in6_addr): {
|
||||
struct in6_addr *addr6, *mask6;
|
||||
|
||||
addr6 = (struct in6_addr *)addr;
|
||||
mask6 = (struct in6_addr *)mask;
|
||||
ret_val = netlbl_unlhsh_add_addr6(iface, addr6, mask6, secid);
|
||||
|
@ -671,6 +676,7 @@ static int netlbl_unlhsh_add(struct net *net,
|
|||
dev_name,
|
||||
addr6, mask6);
|
||||
break;
|
||||
}
|
||||
#endif /* IPv6 */
|
||||
default:
|
||||
ret_val = -EINVAL;
|
||||
|
@ -1741,10 +1747,6 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
|
|||
u16 family,
|
||||
struct netlbl_lsm_secattr *secattr)
|
||||
{
|
||||
struct iphdr *hdr4;
|
||||
struct ipv6hdr *hdr6;
|
||||
struct netlbl_unlhsh_addr4 *addr4;
|
||||
struct netlbl_unlhsh_addr6 *addr6;
|
||||
struct netlbl_unlhsh_iface *iface;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -1752,21 +1754,29 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
|
|||
if (iface == NULL)
|
||||
goto unlabel_getattr_nolabel;
|
||||
switch (family) {
|
||||
case PF_INET:
|
||||
case PF_INET: {
|
||||
struct iphdr *hdr4;
|
||||
struct netlbl_unlhsh_addr4 *addr4;
|
||||
|
||||
hdr4 = ip_hdr(skb);
|
||||
addr4 = netlbl_unlhsh_search_addr4(hdr4->saddr, iface);
|
||||
if (addr4 == NULL)
|
||||
goto unlabel_getattr_nolabel;
|
||||
secattr->attr.secid = addr4->secid;
|
||||
break;
|
||||
}
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
case PF_INET6:
|
||||
case PF_INET6: {
|
||||
struct ipv6hdr *hdr6;
|
||||
struct netlbl_unlhsh_addr6 *addr6;
|
||||
|
||||
hdr6 = ipv6_hdr(skb);
|
||||
addr6 = netlbl_unlhsh_search_addr6(&hdr6->saddr, iface);
|
||||
if (addr6 == NULL)
|
||||
goto unlabel_getattr_nolabel;
|
||||
secattr->attr.secid = addr6->secid;
|
||||
break;
|
||||
}
|
||||
#endif /* IPv6 */
|
||||
default:
|
||||
goto unlabel_getattr_nolabel;
|
||||
|
|
|
@ -96,7 +96,6 @@ int netlbl_netlink_init(void)
|
|||
struct audit_buffer *netlbl_audit_start_common(int type,
|
||||
struct netlbl_audit *audit_info)
|
||||
{
|
||||
struct audit_context *audit_ctx = current->audit_context;
|
||||
struct audit_buffer *audit_buf;
|
||||
char *secctx;
|
||||
u32 secctx_len;
|
||||
|
@ -104,7 +103,7 @@ struct audit_buffer *netlbl_audit_start_common(int type,
|
|||
if (audit_enabled == 0)
|
||||
return NULL;
|
||||
|
||||
audit_buf = audit_log_start(audit_ctx, GFP_ATOMIC, type);
|
||||
audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type);
|
||||
if (audit_buf == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -230,10 +230,8 @@ static void genl_unregister_mc_groups(struct genl_family *family)
|
|||
{
|
||||
struct genl_multicast_group *grp, *tmp;
|
||||
|
||||
genl_lock();
|
||||
list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list)
|
||||
__genl_unregister_mc_group(family, grp);
|
||||
genl_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -396,10 +394,10 @@ int genl_unregister_family(struct genl_family *family)
|
|||
{
|
||||
struct genl_family *rc;
|
||||
|
||||
genl_unregister_mc_groups(family);
|
||||
|
||||
genl_lock();
|
||||
|
||||
genl_unregister_mc_groups(family);
|
||||
|
||||
list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
|
||||
if (family->id != rc->id || strcmp(rc->name, family->name))
|
||||
continue;
|
||||
|
|
|
@ -701,6 +701,9 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
|
|||
{
|
||||
struct socket *sock = file->private_data;
|
||||
|
||||
if (unlikely(!sock->ops->splice_read))
|
||||
return -EINVAL;
|
||||
|
||||
return sock->ops->splice_read(sock, ppos, pipe, len, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ config XFRM_MIGRATE
|
|||
|
||||
config XFRM_STATISTICS
|
||||
bool "Transformation statistics (EXPERIMENTAL)"
|
||||
depends on XFRM && PROC_FS && EXPERIMENTAL
|
||||
depends on INET && XFRM && PROC_FS && EXPERIMENTAL
|
||||
---help---
|
||||
This statistics is not a SNMP/MIB specification but shows
|
||||
statistics about transformation error (or almost error) factor
|
||||
|
|
|
@ -109,7 +109,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
|
|||
if (encap_type < 0) {
|
||||
async = 1;
|
||||
x = xfrm_input_state(skb);
|
||||
seq = XFRM_SKB_CB(skb)->seq;
|
||||
seq = XFRM_SKB_CB(skb)->seq.input;
|
||||
goto resume;
|
||||
}
|
||||
|
||||
|
@ -175,7 +175,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
|
|||
|
||||
spin_unlock(&x->lock);
|
||||
|
||||
XFRM_SKB_CB(skb)->seq = seq;
|
||||
XFRM_SKB_CB(skb)->seq.input = seq;
|
||||
|
||||
nexthdr = x->type->input(x, skb);
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
|
|||
}
|
||||
|
||||
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
|
||||
XFRM_SKB_CB(skb)->seq = ++x->replay.oseq;
|
||||
XFRM_SKB_CB(skb)->seq.output = ++x->replay.oseq;
|
||||
if (unlikely(x->replay.oseq == 0)) {
|
||||
XFRM_INC_STATS(LINUX_MIB_XFRMOUTSTATESEQERROR);
|
||||
x->replay.oseq--;
|
||||
|
|
|
@ -1105,6 +1105,7 @@ static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p,
|
|||
return xp;
|
||||
error:
|
||||
*errp = err;
|
||||
xp->dead = 1;
|
||||
xfrm_policy_destroy(xp);
|
||||
return NULL;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue