Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) If the local_df boolean is set on an SKB we have to allocate a unique ID even if IP_DF is set in the ipv4 headers, from Ansis Atteka. 2) Some fixups for the new chipset support that went into the sfc driver, from Ben Hutchings. 3) Because SCTP bypasses a good chunk of, and actually duplicates, the logic of the ipv6 output path, some IPSEC things don't get done properly. Integrate SCTP better into the ipv6 output path so that these problems are fixed and such issues don't get missed in the future either. From Daniel Borkmann. 4) Fix skge regressions added by the DMA mapping error return checking added in v3.10, from Mikulas Patocka. 5) Kill some more IRQF_DISABLED references, from Michael Opdenacker. 6) Fix races and deadlocks in the bridging code, from Hong Zhiguo. 7) Fix error handling in tun_set_iff(), in particular don't leak resources. From Jason Wang. 8) Prevent format-string injection into xen-netback driver, from Kees Cook. 9) Fix regression added to netpoll ARP packet handling, in particular check for the right ETH_P_ARP protocol code. From Sonic Zhang. 10) Try to deal with AMD IOMMU errors when using r8169 chips, from Francois Romieu. 11) Cure freezes due to recent changes in the rt2x00 wireless driver, from Stanislaw Gruszka. 12) Don't do SPI transfers (which can sleep) in interrupt context in cw1200 driver, from Solomon Peachy. 13) Fix LEDs handling bug in 5720 tg3 chips already handled for 5719. From Nithin Sujir. 14) Make xen_netbk_count_skb_slots() count the actual number of slots that will be used, taking into consideration packing and other issues that the transmit path will run into. From David Vrabel. 15) Use the correct maximum age when calculating the bridge message_age_timer, from Chris Healy. 16) Get rid of memory leaks in mcs7780 IRDA driver, from Alexey Khoroshilov. 17) Netfilter conntrack extensions were converted to RCU but are not always freed properly using kfree_rcu(). Fix from Michal Kubecek. 18) VF reset recovery not being done correctly in qlcnic driver, from Manish Chopra. 19) Fix inverted test in ATM nicstar driver, from Andy Shevchenko. 20) Missing workqueue destroy in cxgb4 error handling, from Wei Yang. 21) Internal switch not initialized properly in bgmac driver, from Rafał Miłecki. 22) Netlink messages report wrong local and remote addresses in IPv6 tunneling, from Ding Zhi. 23) ICMP redirects should not generate socket errors in DCCP and SCTP. We're still working out how this should be handled for RAW and UDP sockets. From Daniel Borkmann and Duan Jiong. 24) We've had several bugs wherein the network namespace's loopback device gets accessed after it is free'd, NULL it out so that we can catch these problems more readily. From Eric W Biederman. 25) Fix regression in TCP RTO calculations, from Neal Cardwell. 26) Fix too early free of xen-netback network device when VIFs still exist. From Paul Durrant. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (87 commits) netconsole: fix a deadlock with rtnl and netconsole's mutex netpoll: fix NULL pointer dereference in netpoll_cleanup skge: fix broken driver ip: generate unique IP identificator if local fragmentation is allowed ip: use ip_hdr() in __ip_make_skb() to retrieve IP header xen-netback: Don't destroy the netdev until the vif is shut down net:dccp: do not report ICMP redirects to user space cnic: Fix crash in cnic_bnx2x_service_kcq() bnx2x, cnic, bnx2i, bnx2fc: Fix bnx2i and bnx2fc regressions. vxlan: Avoid creating fdb entry with NULL destination tcp: fix RTO calculated from cached RTT drivers: net: phy: cicada.c: clears warning Use #include <linux/io.h> instead of <asm/io.h> net loopback: Set loopback_dev to NULL when freed batman-adv: set the TAG flag for the vid passed to BLA netfilter: nfnetlink_queue: use network skb for sequence adjustment net: sctp: rfc4443: do not report ICMP redirects to user space net: usb: cdc_ether: use usb.h macros whenever possible net: usb: cdc_ether: fix checkpatch errors and warnings net: usb: cdc_ether: Use wwan interface for Telit modules ip6_tunnels: raddr and laddr are inverted in nl msg ...
This commit is contained in:
commit
b75ff5e84b
127 changed files with 756 additions and 522 deletions
|
@ -1362,6 +1362,12 @@ To add ARP targets:
|
|||
To remove an ARP target:
|
||||
# echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
|
||||
|
||||
To configure the interval between learning packet transmits:
|
||||
# echo 12 > /sys/class/net/bond0/bonding/lp_interval
|
||||
NOTE: the lp_inteval is the number of seconds between instances where
|
||||
the bonding driver sends learning packets to each slaves peer switch. The
|
||||
default interval is 1 second.
|
||||
|
||||
Example Configuration
|
||||
---------------------
|
||||
We begin with the same example that is shown in section 3.3,
|
||||
|
|
|
@ -2865,15 +2865,4 @@ static struct pci_driver he_driver = {
|
|||
.id_table = he_pci_tbl,
|
||||
};
|
||||
|
||||
static int __init he_init(void)
|
||||
{
|
||||
return pci_register_driver(&he_driver);
|
||||
}
|
||||
|
||||
static void __exit he_cleanup(void)
|
||||
{
|
||||
pci_unregister_driver(&he_driver);
|
||||
}
|
||||
|
||||
module_init(he_init);
|
||||
module_exit(he_cleanup);
|
||||
module_pci_driver(he_driver);
|
||||
|
|
|
@ -778,7 +778,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
|
|||
return error;
|
||||
}
|
||||
|
||||
if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) {
|
||||
if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
|
||||
nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
|
||||
card->atmdev->esi, 6);
|
||||
if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
|
||||
|
|
|
@ -269,6 +269,8 @@ static struct bcma_device *bcma_find_core_reverse(struct bcma_bus *bus, u16 core
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#define IS_ERR_VALUE_U32(x) ((x) >= (u32)-MAX_ERRNO)
|
||||
|
||||
static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
||||
struct bcma_device_id *match, int core_num,
|
||||
struct bcma_device *core)
|
||||
|
@ -351,11 +353,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
|||
* the main register space for the core
|
||||
*/
|
||||
tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
|
||||
if (tmp == 0 || IS_ERR_VALUE(tmp)) {
|
||||
if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
|
||||
/* Try again to see if it is a bridge */
|
||||
tmp = bcma_erom_get_addr_desc(bus, eromptr,
|
||||
SCAN_ADDR_TYPE_BRIDGE, 0);
|
||||
if (tmp == 0 || IS_ERR_VALUE(tmp)) {
|
||||
if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
|
||||
return -EILSEQ;
|
||||
} else {
|
||||
bcma_info(bus, "Bridge found\n");
|
||||
|
@ -369,7 +371,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
|||
for (j = 0; ; j++) {
|
||||
tmp = bcma_erom_get_addr_desc(bus, eromptr,
|
||||
SCAN_ADDR_TYPE_SLAVE, i);
|
||||
if (IS_ERR_VALUE(tmp)) {
|
||||
if (IS_ERR_VALUE_U32(tmp)) {
|
||||
/* no more entries for port _i_ */
|
||||
/* pr_debug("erom: slave port %d "
|
||||
* "has %d descriptors\n", i, j); */
|
||||
|
@ -386,7 +388,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
|||
for (j = 0; ; j++) {
|
||||
tmp = bcma_erom_get_addr_desc(bus, eromptr,
|
||||
SCAN_ADDR_TYPE_MWRAP, i);
|
||||
if (IS_ERR_VALUE(tmp)) {
|
||||
if (IS_ERR_VALUE_U32(tmp)) {
|
||||
/* no more entries for port _i_ */
|
||||
/* pr_debug("erom: master wrapper %d "
|
||||
* "has %d descriptors\n", i, j); */
|
||||
|
@ -404,7 +406,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
|||
for (j = 0; ; j++) {
|
||||
tmp = bcma_erom_get_addr_desc(bus, eromptr,
|
||||
SCAN_ADDR_TYPE_SWRAP, i + hack);
|
||||
if (IS_ERR_VALUE(tmp)) {
|
||||
if (IS_ERR_VALUE_U32(tmp)) {
|
||||
/* no more entries for port _i_ */
|
||||
/* pr_debug("erom: master wrapper %d "
|
||||
* has %d descriptors\n", i, j); */
|
||||
|
|
|
@ -2295,8 +2295,8 @@ _hfcpci_softirq(struct device *dev, void *arg)
|
|||
static void
|
||||
hfcpci_softirq(void *arg)
|
||||
{
|
||||
(void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
|
||||
_hfcpci_softirq);
|
||||
WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
|
||||
_hfcpci_softirq) != 0);
|
||||
|
||||
/* if next event would be in the past ... */
|
||||
if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
|
||||
|
|
|
@ -314,7 +314,7 @@ Amd7930_empty_Dfifo(struct IsdnCardState *cs, int flag)
|
|||
|
||||
t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx);
|
||||
QuickHex(t, cs->rcvbuf, cs->rcvidx);
|
||||
debugl1(cs, cs->dlog);
|
||||
debugl1(cs, "%s", cs->dlog);
|
||||
}
|
||||
/* moves received data in sk-buffer */
|
||||
memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx);
|
||||
|
@ -406,7 +406,7 @@ Amd7930_fill_Dfifo(struct IsdnCardState *cs)
|
|||
|
||||
t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count);
|
||||
QuickHex(t, deb_ptr, count);
|
||||
debugl1(cs, cs->dlog);
|
||||
debugl1(cs, "%s", cs->dlog);
|
||||
}
|
||||
/* AMD interrupts on */
|
||||
AmdIrqOn(cs);
|
||||
|
|
|
@ -285,7 +285,7 @@ hdlc_empty_fifo(struct BCState *bcs, int count)
|
|||
t += sprintf(t, "hdlc_empty_fifo %c cnt %d",
|
||||
bcs->channel ? 'B' : 'A', count);
|
||||
QuickHex(t, p, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -345,7 +345,7 @@ hdlc_fill_fifo(struct BCState *bcs)
|
|||
t += sprintf(t, "hdlc_fill_fifo %c cnt %d",
|
||||
bcs->channel ? 'B' : 'A', count);
|
||||
QuickHex(t, p, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
|
|||
ptr--;
|
||||
*ptr++ = '\n';
|
||||
*ptr = 0;
|
||||
HiSax_putstatus(cs, NULL, cs->dlog);
|
||||
HiSax_putstatus(cs, NULL, "%s", cs->dlog);
|
||||
} else
|
||||
HiSax_putstatus(cs, "LogEcho: ",
|
||||
"warning Frame too big (%d)",
|
||||
|
|
|
@ -427,7 +427,7 @@ Memhscx_empty_fifo(struct BCState *bcs, int count)
|
|||
t += sprintf(t, "hscx_empty_fifo %c cnt %d",
|
||||
bcs->hw.hscx.hscx ? 'B' : 'A', count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -469,7 +469,7 @@ Memhscx_fill_fifo(struct BCState *bcs)
|
|||
t += sprintf(t, "hscx_fill_fifo %c cnt %d",
|
||||
bcs->hw.hscx.hscx ? 'B' : 'A', count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -535,7 +535,7 @@ check_arcofi(struct IsdnCardState *cs)
|
|||
t = tmp;
|
||||
t += sprintf(tmp, "Arcofi data");
|
||||
QuickHex(t, p, cs->dc.isac.mon_rxp);
|
||||
debugl1(cs, tmp);
|
||||
debugl1(cs, "%s", tmp);
|
||||
if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) {
|
||||
switch (cs->dc.isac.mon_rx[1]) {
|
||||
case 0x80:
|
||||
|
|
|
@ -344,7 +344,7 @@ static inline void receive_chars(struct IsdnCardState *cs,
|
|||
|
||||
t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt);
|
||||
QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt);
|
||||
debugl1(cs, tmp);
|
||||
debugl1(cs, "%s", tmp);
|
||||
}
|
||||
cs->hw.elsa.rcvcnt = 0;
|
||||
}
|
||||
|
|
|
@ -901,7 +901,7 @@ receive_emsg(struct IsdnCardState *cs)
|
|||
ptr--;
|
||||
*ptr++ = '\n';
|
||||
*ptr = 0;
|
||||
HiSax_putstatus(cs, NULL, cs->dlog);
|
||||
HiSax_putstatus(cs, NULL, "%s", cs->dlog);
|
||||
} else
|
||||
HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
|
||||
}
|
||||
|
|
|
@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs)
|
|||
ptr--;
|
||||
*ptr++ = '\n';
|
||||
*ptr = 0;
|
||||
HiSax_putstatus(cs, NULL, cs->dlog);
|
||||
HiSax_putstatus(cs, NULL, "%s", cs->dlog);
|
||||
} else
|
||||
HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ hscx_empty_fifo(struct BCState *bcs, int count)
|
|||
t += sprintf(t, "hscx_empty_fifo %c cnt %d",
|
||||
bcs->hw.hscx.hscx ? 'B' : 'A', count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ hscx_fill_fifo(struct BCState *bcs)
|
|||
t += sprintf(t, "hscx_fill_fifo %c cnt %d",
|
||||
bcs->hw.hscx.hscx ? 'B' : 'A', count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ icc_empty_fifo(struct IsdnCardState *cs, int count)
|
|||
|
||||
t += sprintf(t, "icc_empty_fifo cnt %d", count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, cs->dlog);
|
||||
debugl1(cs, "%s", cs->dlog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,7 +176,7 @@ icc_fill_fifo(struct IsdnCardState *cs)
|
|||
|
||||
t += sprintf(t, "icc_fill_fifo cnt %d", count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, cs->dlog);
|
||||
debugl1(cs, "%s", cs->dlog);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -260,7 +260,7 @@ dch_empty_fifo(struct IsdnCardState *cs, int count)
|
|||
|
||||
t += sprintf(t, "dch_empty_fifo() cnt %d", count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, cs->dlog);
|
||||
debugl1(cs, "%s", cs->dlog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -307,7 +307,7 @@ dch_fill_fifo(struct IsdnCardState *cs)
|
|||
|
||||
t += sprintf(t, "dch_fill_fifo() cnt %d", count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, cs->dlog);
|
||||
debugl1(cs, "%s", cs->dlog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -539,7 +539,7 @@ bch_empty_fifo(struct BCState *bcs, int count)
|
|||
|
||||
t += sprintf(t, "bch_empty_fifo() B-%d cnt %d", hscx, count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -582,7 +582,7 @@ bch_fill_fifo(struct BCState *bcs)
|
|||
|
||||
t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ isac_empty_fifo(struct IsdnCardState *cs, int count)
|
|||
|
||||
t += sprintf(t, "isac_empty_fifo cnt %d", count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, cs->dlog);
|
||||
debugl1(cs, "%s", cs->dlog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,7 +179,7 @@ isac_fill_fifo(struct IsdnCardState *cs)
|
|||
|
||||
t += sprintf(t, "isac_fill_fifo cnt %d", count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, cs->dlog);
|
||||
debugl1(cs, "%s", cs->dlog);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ sendmsg(struct IsdnCardState *cs, u_char his, u_char creg, u_char len,
|
|||
t = tmp;
|
||||
t += sprintf(t, "sendmbox cnt %d", len);
|
||||
QuickHex(t, &msg[len-i], (i > 64) ? 64 : i);
|
||||
debugl1(cs, tmp);
|
||||
debugl1(cs, "%s", tmp);
|
||||
i -= 64;
|
||||
}
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ rcv_mbox(struct IsdnCardState *cs, struct isar_reg *ireg, u_char *msg)
|
|||
t = tmp;
|
||||
t += sprintf(t, "rcv_mbox cnt %d", ireg->clsb);
|
||||
QuickHex(t, &msg[ireg->clsb - i], (i > 64) ? 64 : i);
|
||||
debugl1(cs, tmp);
|
||||
debugl1(cs, "%s", tmp);
|
||||
i -= 64;
|
||||
}
|
||||
}
|
||||
|
@ -1248,7 +1248,7 @@ isar_int_main(struct IsdnCardState *cs)
|
|||
tp += sprintf(debbuf, "msg iis(%x) msb(%x)",
|
||||
ireg->iis, ireg->cmsb);
|
||||
QuickHex(tp, (u_char *)ireg->par, ireg->clsb);
|
||||
debugl1(cs, debbuf);
|
||||
debugl1(cs, "%s", debbuf);
|
||||
}
|
||||
break;
|
||||
case ISAR_IIS_INVMSG:
|
||||
|
|
|
@ -81,10 +81,7 @@ modejade(struct BCState *bcs, int mode, int bc)
|
|||
int jade = bcs->hw.hscx.hscx;
|
||||
|
||||
if (cs->debug & L1_DEB_HSCX) {
|
||||
char tmp[40];
|
||||
sprintf(tmp, "jade %c mode %d ichan %d",
|
||||
'A' + jade, mode, bc);
|
||||
debugl1(cs, tmp);
|
||||
debugl1(cs, "jade %c mode %d ichan %d", 'A' + jade, mode, bc);
|
||||
}
|
||||
bcs->mode = mode;
|
||||
bcs->channel = bc;
|
||||
|
@ -257,23 +254,18 @@ void
|
|||
clear_pending_jade_ints(struct IsdnCardState *cs)
|
||||
{
|
||||
int val;
|
||||
char tmp[64];
|
||||
|
||||
cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00);
|
||||
cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00);
|
||||
|
||||
val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR);
|
||||
sprintf(tmp, "jade B ISTA %x", val);
|
||||
debugl1(cs, tmp);
|
||||
debugl1(cs, "jade B ISTA %x", val);
|
||||
val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR);
|
||||
sprintf(tmp, "jade A ISTA %x", val);
|
||||
debugl1(cs, tmp);
|
||||
debugl1(cs, "jade A ISTA %x", val);
|
||||
val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR);
|
||||
sprintf(tmp, "jade B STAR %x", val);
|
||||
debugl1(cs, tmp);
|
||||
debugl1(cs, "jade B STAR %x", val);
|
||||
val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR);
|
||||
sprintf(tmp, "jade A STAR %x", val);
|
||||
debugl1(cs, tmp);
|
||||
debugl1(cs, "jade A STAR %x", val);
|
||||
/* Unmask ints */
|
||||
cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8);
|
||||
cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8);
|
||||
|
|
|
@ -65,7 +65,7 @@ jade_empty_fifo(struct BCState *bcs, int count)
|
|||
t += sprintf(t, "jade_empty_fifo %c cnt %d",
|
||||
bcs->hw.hscx.hscx ? 'B' : 'A', count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ jade_fill_fifo(struct BCState *bcs)
|
|||
t += sprintf(t, "jade_fill_fifo %c cnt %d",
|
||||
bcs->hw.hscx.hscx ? 'B' : 'A', count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ l3_1tr6_error(struct l3_process *pc, u_char *msg, struct sk_buff *skb)
|
|||
{
|
||||
dev_kfree_skb(skb);
|
||||
if (pc->st->l3.debug & L3_DEB_WARN)
|
||||
l3_debug(pc->st, msg);
|
||||
l3_debug(pc->st, "%s", msg);
|
||||
l3_1tr6_release_req(pc, 0, NULL);
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,6 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
|
|||
{
|
||||
u_char *p;
|
||||
int bcfound = 0;
|
||||
char tmp[80];
|
||||
struct sk_buff *skb = arg;
|
||||
|
||||
/* Channel Identification */
|
||||
|
@ -214,10 +213,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
|
|||
/* Signal all services, linklevel takes care of Service-Indicator */
|
||||
if (bcfound) {
|
||||
if ((pc->para.setup.si1 != 7) && (pc->st->l3.debug & L3_DEB_WARN)) {
|
||||
sprintf(tmp, "non-digital call: %s -> %s",
|
||||
l3_debug(pc->st, "non-digital call: %s -> %s",
|
||||
pc->para.setup.phone,
|
||||
pc->para.setup.eazmsn);
|
||||
l3_debug(pc->st, tmp);
|
||||
}
|
||||
newl3state(pc, 6);
|
||||
pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc);
|
||||
|
@ -301,7 +299,7 @@ l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
|
|||
{
|
||||
u_char *p;
|
||||
int i, tmpcharge = 0;
|
||||
char a_charge[8], tmp[32];
|
||||
char a_charge[8];
|
||||
struct sk_buff *skb = arg;
|
||||
|
||||
p = skb->data;
|
||||
|
@ -316,8 +314,8 @@ l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
|
|||
pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
|
||||
}
|
||||
if (pc->st->l3.debug & L3_DEB_CHARGE) {
|
||||
sprintf(tmp, "charging info %d", pc->para.chargeinfo);
|
||||
l3_debug(pc->st, tmp);
|
||||
l3_debug(pc->st, "charging info %d",
|
||||
pc->para.chargeinfo);
|
||||
}
|
||||
} else if (pc->st->l3.debug & L3_DEB_CHARGE)
|
||||
l3_debug(pc->st, "charging info not found");
|
||||
|
@ -399,7 +397,7 @@ l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
|
|||
struct sk_buff *skb = arg;
|
||||
u_char *p;
|
||||
int i, tmpcharge = 0;
|
||||
char a_charge[8], tmp[32];
|
||||
char a_charge[8];
|
||||
|
||||
StopAllL3Timer(pc);
|
||||
p = skb->data;
|
||||
|
@ -414,8 +412,8 @@ l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
|
|||
pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
|
||||
}
|
||||
if (pc->st->l3.debug & L3_DEB_CHARGE) {
|
||||
sprintf(tmp, "charging info %d", pc->para.chargeinfo);
|
||||
l3_debug(pc->st, tmp);
|
||||
l3_debug(pc->st, "charging info %d",
|
||||
pc->para.chargeinfo);
|
||||
}
|
||||
} else if (pc->st->l3.debug & L3_DEB_CHARGE)
|
||||
l3_debug(pc->st, "charging info not found");
|
||||
|
@ -746,7 +744,6 @@ up1tr6(struct PStack *st, int pr, void *arg)
|
|||
int i, mt, cr;
|
||||
struct l3_process *proc;
|
||||
struct sk_buff *skb = arg;
|
||||
char tmp[80];
|
||||
|
||||
switch (pr) {
|
||||
case (DL_DATA | INDICATION):
|
||||
|
@ -762,26 +759,23 @@ up1tr6(struct PStack *st, int pr, void *arg)
|
|||
}
|
||||
if (skb->len < 4) {
|
||||
if (st->l3.debug & L3_DEB_PROTERR) {
|
||||
sprintf(tmp, "up1tr6 len only %d", skb->len);
|
||||
l3_debug(st, tmp);
|
||||
l3_debug(st, "up1tr6 len only %d", skb->len);
|
||||
}
|
||||
dev_kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) {
|
||||
if (st->l3.debug & L3_DEB_PROTERR) {
|
||||
sprintf(tmp, "up1tr6%sunexpected discriminator %x message len %d",
|
||||
l3_debug(st, "up1tr6%sunexpected discriminator %x message len %d",
|
||||
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
|
||||
skb->data[0], skb->len);
|
||||
l3_debug(st, tmp);
|
||||
}
|
||||
dev_kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
if (skb->data[1] != 1) {
|
||||
if (st->l3.debug & L3_DEB_PROTERR) {
|
||||
sprintf(tmp, "up1tr6 CR len not 1");
|
||||
l3_debug(st, tmp);
|
||||
l3_debug(st, "up1tr6 CR len not 1");
|
||||
}
|
||||
dev_kfree_skb(skb);
|
||||
return;
|
||||
|
@ -791,9 +785,8 @@ up1tr6(struct PStack *st, int pr, void *arg)
|
|||
if (skb->data[0] == PROTO_DIS_N0) {
|
||||
dev_kfree_skb(skb);
|
||||
if (st->l3.debug & L3_DEB_STATE) {
|
||||
sprintf(tmp, "up1tr6%s N0 mt %x unhandled",
|
||||
l3_debug(st, "up1tr6%s N0 mt %x unhandled",
|
||||
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", mt);
|
||||
l3_debug(st, tmp);
|
||||
}
|
||||
} else if (skb->data[0] == PROTO_DIS_N1) {
|
||||
if (!(proc = getl3proc(st, cr))) {
|
||||
|
@ -801,8 +794,7 @@ up1tr6(struct PStack *st, int pr, void *arg)
|
|||
if (cr < 128) {
|
||||
if (!(proc = new_l3_process(st, cr))) {
|
||||
if (st->l3.debug & L3_DEB_PROTERR) {
|
||||
sprintf(tmp, "up1tr6 no roc mem");
|
||||
l3_debug(st, tmp);
|
||||
l3_debug(st, "up1tr6 no roc mem");
|
||||
}
|
||||
dev_kfree_skb(skb);
|
||||
return;
|
||||
|
@ -821,8 +813,7 @@ up1tr6(struct PStack *st, int pr, void *arg)
|
|||
} else {
|
||||
if (!(proc = new_l3_process(st, cr))) {
|
||||
if (st->l3.debug & L3_DEB_PROTERR) {
|
||||
sprintf(tmp, "up1tr6 no roc mem");
|
||||
l3_debug(st, tmp);
|
||||
l3_debug(st, "up1tr6 no roc mem");
|
||||
}
|
||||
dev_kfree_skb(skb);
|
||||
return;
|
||||
|
@ -837,18 +828,16 @@ up1tr6(struct PStack *st, int pr, void *arg)
|
|||
if (i == ARRAY_SIZE(datastln1)) {
|
||||
dev_kfree_skb(skb);
|
||||
if (st->l3.debug & L3_DEB_STATE) {
|
||||
sprintf(tmp, "up1tr6%sstate %d mt %x unhandled",
|
||||
l3_debug(st, "up1tr6%sstate %d mt %x unhandled",
|
||||
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
|
||||
proc->state, mt);
|
||||
l3_debug(st, tmp);
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
if (st->l3.debug & L3_DEB_STATE) {
|
||||
sprintf(tmp, "up1tr6%sstate %d mt %x",
|
||||
l3_debug(st, "up1tr6%sstate %d mt %x",
|
||||
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
|
||||
proc->state, mt);
|
||||
l3_debug(st, tmp);
|
||||
}
|
||||
datastln1[i].rout(proc, pr, skb);
|
||||
}
|
||||
|
@ -861,7 +850,6 @@ down1tr6(struct PStack *st, int pr, void *arg)
|
|||
int i, cr;
|
||||
struct l3_process *proc;
|
||||
struct Channel *chan;
|
||||
char tmp[80];
|
||||
|
||||
if ((DL_ESTABLISH | REQUEST) == pr) {
|
||||
l3_msg(st, pr, NULL);
|
||||
|
@ -888,15 +876,13 @@ down1tr6(struct PStack *st, int pr, void *arg)
|
|||
break;
|
||||
if (i == ARRAY_SIZE(downstl)) {
|
||||
if (st->l3.debug & L3_DEB_STATE) {
|
||||
sprintf(tmp, "down1tr6 state %d prim %d unhandled",
|
||||
l3_debug(st, "down1tr6 state %d prim %d unhandled",
|
||||
proc->state, pr);
|
||||
l3_debug(st, tmp);
|
||||
}
|
||||
} else {
|
||||
if (st->l3.debug & L3_DEB_STATE) {
|
||||
sprintf(tmp, "down1tr6 state %d prim %d",
|
||||
l3_debug(st, "down1tr6 state %d prim %d",
|
||||
proc->state, pr);
|
||||
l3_debug(st, tmp);
|
||||
}
|
||||
downstl[i].rout(proc, pr, arg);
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ static void printframe(struct IsdnCardState *cs, u_char *buf, int count, char *s
|
|||
else
|
||||
j = i;
|
||||
QuickHex(t, p, j);
|
||||
debugl1(cs, tmp);
|
||||
debugl1(cs, "%s", tmp);
|
||||
p += j;
|
||||
i -= j;
|
||||
t = tmp;
|
||||
|
|
|
@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
|
|||
dp--;
|
||||
*dp++ = '\n';
|
||||
*dp = 0;
|
||||
HiSax_putstatus(cs, NULL, cs->dlog);
|
||||
HiSax_putstatus(cs, NULL, "%s", cs->dlog);
|
||||
} else
|
||||
HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
|
||||
}
|
||||
|
@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
|
|||
}
|
||||
if (finish) {
|
||||
*dp = 0;
|
||||
HiSax_putstatus(cs, NULL, cs->dlog);
|
||||
HiSax_putstatus(cs, NULL, "%s", cs->dlog);
|
||||
return;
|
||||
}
|
||||
if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */
|
||||
|
@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
|
|||
dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
|
||||
}
|
||||
*dp = 0;
|
||||
HiSax_putstatus(cs, NULL, cs->dlog);
|
||||
HiSax_putstatus(cs, NULL, "%s", cs->dlog);
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ W6692_empty_fifo(struct IsdnCardState *cs, int count)
|
|||
|
||||
t += sprintf(t, "W6692_empty_fifo cnt %d", count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, cs->dlog);
|
||||
debugl1(cs, "%s", cs->dlog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ W6692_fill_fifo(struct IsdnCardState *cs)
|
|||
|
||||
t += sprintf(t, "W6692_fill_fifo cnt %d", count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, cs->dlog);
|
||||
debugl1(cs, "%s", cs->dlog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,7 @@ W6692B_empty_fifo(struct BCState *bcs, int count)
|
|||
t += sprintf(t, "W6692B_empty_fifo %c cnt %d",
|
||||
bcs->channel + '1', count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -264,7 +264,7 @@ W6692B_fill_fifo(struct BCState *bcs)
|
|||
t += sprintf(t, "W6692B_fill_fifo %c cnt %d",
|
||||
bcs->channel + '1', count);
|
||||
QuickHex(t, ptr, count);
|
||||
debugl1(cs, bcs->blog);
|
||||
debugl1(cs, "%s", bcs->blog);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1472,7 +1472,7 @@ void bond_alb_monitor(struct work_struct *work)
|
|||
bond_info->lp_counter++;
|
||||
|
||||
/* send learning packets */
|
||||
if (bond_info->lp_counter >= BOND_ALB_LP_TICKS) {
|
||||
if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
|
||||
/* change of curr_active_slave involves swapping of mac addresses.
|
||||
* in order to avoid this swapping from happening while
|
||||
* sending the learning packets, the curr_slave_lock must be held for
|
||||
|
|
|
@ -36,14 +36,15 @@ struct slave;
|
|||
* Used for division - never set
|
||||
* to zero !!!
|
||||
*/
|
||||
#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
|
||||
* learning packets to the switch
|
||||
*/
|
||||
#define BOND_ALB_DEFAULT_LP_INTERVAL 1
|
||||
#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of
|
||||
* learning packets to the switch
|
||||
*/
|
||||
|
||||
#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
|
||||
* ALB_TIMER_TICKS_PER_SEC)
|
||||
|
||||
#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
|
||||
#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \
|
||||
* ALB_TIMER_TICKS_PER_SEC)
|
||||
|
||||
#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
|
||||
|
|
|
@ -4416,6 +4416,7 @@ static int bond_check_params(struct bond_params *params)
|
|||
params->all_slaves_active = all_slaves_active;
|
||||
params->resend_igmp = resend_igmp;
|
||||
params->min_links = min_links;
|
||||
params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
|
||||
|
||||
if (primary) {
|
||||
strncpy(params->primary, primary, IFNAMSIZ);
|
||||
|
|
|
@ -1699,6 +1699,44 @@ static ssize_t bonding_store_resend_igmp(struct device *d,
|
|||
static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
|
||||
bonding_show_resend_igmp, bonding_store_resend_igmp);
|
||||
|
||||
|
||||
static ssize_t bonding_show_lp_interval(struct device *d,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct bonding *bond = to_bond(d);
|
||||
return sprintf(buf, "%d\n", bond->params.lp_interval);
|
||||
}
|
||||
|
||||
static ssize_t bonding_store_lp_interval(struct device *d,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct bonding *bond = to_bond(d);
|
||||
int new_value, ret = count;
|
||||
|
||||
if (sscanf(buf, "%d", &new_value) != 1) {
|
||||
pr_err("%s: no lp interval value specified.\n",
|
||||
bond->dev->name);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (new_value <= 0) {
|
||||
pr_err ("%s: lp_interval must be between 1 and %d\n",
|
||||
bond->dev->name, INT_MAX);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bond->params.lp_interval = new_value;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
|
||||
bonding_show_lp_interval, bonding_store_lp_interval);
|
||||
|
||||
static struct attribute *per_bond_attrs[] = {
|
||||
&dev_attr_slaves.attr,
|
||||
&dev_attr_mode.attr,
|
||||
|
@ -1729,6 +1767,7 @@ static struct attribute *per_bond_attrs[] = {
|
|||
&dev_attr_all_slaves_active.attr,
|
||||
&dev_attr_resend_igmp.attr,
|
||||
&dev_attr_min_links.attr,
|
||||
&dev_attr_lp_interval.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -176,6 +176,7 @@ struct bond_params {
|
|||
int tx_queues;
|
||||
int all_slaves_active;
|
||||
int resend_igmp;
|
||||
int lp_interval;
|
||||
};
|
||||
|
||||
struct bond_parm_tbl {
|
||||
|
|
|
@ -530,7 +530,7 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
|
|||
if (lp->wol && !lp->irq_wake_requested) {
|
||||
/* register wake irq handler */
|
||||
rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
|
||||
IRQF_DISABLED, "EMAC_WAKE", dev);
|
||||
0, "EMAC_WAKE", dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
lp->irq_wake_requested = true;
|
||||
|
@ -1686,7 +1686,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
|
|||
/* now, enable interrupts */
|
||||
/* register irq handler */
|
||||
rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
|
||||
IRQF_DISABLED, "EMAC_RX", ndev);
|
||||
0, "EMAC_RX", ndev);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
|
||||
rc = -EBUSY;
|
||||
|
|
|
@ -358,7 +358,7 @@ static int __init lance_probe( struct net_device *dev)
|
|||
|
||||
REGA(CSR0) = CSR0_STOP;
|
||||
|
||||
if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) {
|
||||
if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) {
|
||||
#ifdef CONFIG_SUN3
|
||||
iounmap((void __iomem *)ioaddr);
|
||||
#endif
|
||||
|
|
|
@ -1188,7 +1188,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct alx_priv *alx;
|
||||
struct alx_hw *hw;
|
||||
bool phy_configured;
|
||||
int bars, pm_cap, err;
|
||||
int bars, err;
|
||||
|
||||
err = pci_enable_device_mem(pdev);
|
||||
if (err)
|
||||
|
@ -1225,18 +1225,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
pci_enable_pcie_error_reporting(pdev);
|
||||
pci_set_master(pdev);
|
||||
|
||||
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
|
||||
if (pm_cap == 0) {
|
||||
if (!pdev->pm_cap) {
|
||||
dev_err(&pdev->dev,
|
||||
"Can't find power management capability, aborting\n");
|
||||
err = -EIO;
|
||||
goto out_pci_release;
|
||||
}
|
||||
|
||||
err = pci_set_power_state(pdev, PCI_D0);
|
||||
if (err)
|
||||
goto out_pci_release;
|
||||
|
||||
netdev = alloc_etherdev(sizeof(*alx));
|
||||
if (!netdev) {
|
||||
err = -ENOMEM;
|
||||
|
|
|
@ -157,6 +157,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
|
|||
if (++ring->end >= BGMAC_TX_RING_SLOTS)
|
||||
ring->end = 0;
|
||||
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
|
||||
ring->index_base +
|
||||
ring->end * sizeof(struct bgmac_dma_desc));
|
||||
|
||||
/* Always keep one slot free to allow detecting bugged calls. */
|
||||
|
@ -181,6 +182,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
|
|||
/* The last slot that hardware didn't consume yet */
|
||||
empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
|
||||
empty_slot &= BGMAC_DMA_TX_STATDPTR;
|
||||
empty_slot -= ring->index_base;
|
||||
empty_slot &= BGMAC_DMA_TX_STATDPTR;
|
||||
empty_slot /= sizeof(struct bgmac_dma_desc);
|
||||
|
||||
while (ring->start != empty_slot) {
|
||||
|
@ -274,6 +277,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
|
|||
|
||||
end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
|
||||
end_slot &= BGMAC_DMA_RX_STATDPTR;
|
||||
end_slot -= ring->index_base;
|
||||
end_slot &= BGMAC_DMA_RX_STATDPTR;
|
||||
end_slot /= sizeof(struct bgmac_dma_desc);
|
||||
|
||||
ring->end = end_slot;
|
||||
|
@ -418,9 +423,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
|
|||
ring = &bgmac->tx_ring[i];
|
||||
ring->num_slots = BGMAC_TX_RING_SLOTS;
|
||||
ring->mmio_base = ring_base[i];
|
||||
if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
|
||||
bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
|
||||
ring->mmio_base);
|
||||
|
||||
/* Alloc ring of descriptors */
|
||||
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
|
||||
|
@ -435,6 +437,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
|
|||
if (ring->dma_base & 0xC0000000)
|
||||
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
|
||||
|
||||
ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
|
||||
BGMAC_DMA_RING_TX);
|
||||
if (ring->unaligned)
|
||||
ring->index_base = lower_32_bits(ring->dma_base);
|
||||
else
|
||||
ring->index_base = 0;
|
||||
|
||||
/* No need to alloc TX slots yet */
|
||||
}
|
||||
|
||||
|
@ -444,9 +453,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
|
|||
ring = &bgmac->rx_ring[i];
|
||||
ring->num_slots = BGMAC_RX_RING_SLOTS;
|
||||
ring->mmio_base = ring_base[i];
|
||||
if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
|
||||
bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
|
||||
ring->mmio_base);
|
||||
|
||||
/* Alloc ring of descriptors */
|
||||
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
|
||||
|
@ -462,6 +468,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
|
|||
if (ring->dma_base & 0xC0000000)
|
||||
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
|
||||
|
||||
ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
|
||||
BGMAC_DMA_RING_RX);
|
||||
if (ring->unaligned)
|
||||
ring->index_base = lower_32_bits(ring->dma_base);
|
||||
else
|
||||
ring->index_base = 0;
|
||||
|
||||
/* Alloc RX slots */
|
||||
for (j = 0; j < ring->num_slots; j++) {
|
||||
err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
|
||||
|
@ -489,12 +502,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
|
|||
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
|
||||
ring = &bgmac->tx_ring[i];
|
||||
|
||||
/* We don't implement unaligned addressing, so enable first */
|
||||
bgmac_dma_tx_enable(bgmac, ring);
|
||||
if (!ring->unaligned)
|
||||
bgmac_dma_tx_enable(bgmac, ring);
|
||||
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
|
||||
lower_32_bits(ring->dma_base));
|
||||
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
|
||||
upper_32_bits(ring->dma_base));
|
||||
if (ring->unaligned)
|
||||
bgmac_dma_tx_enable(bgmac, ring);
|
||||
|
||||
ring->start = 0;
|
||||
ring->end = 0; /* Points the slot that should *not* be read */
|
||||
|
@ -505,12 +520,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
|
|||
|
||||
ring = &bgmac->rx_ring[i];
|
||||
|
||||
/* We don't implement unaligned addressing, so enable first */
|
||||
bgmac_dma_rx_enable(bgmac, ring);
|
||||
if (!ring->unaligned)
|
||||
bgmac_dma_rx_enable(bgmac, ring);
|
||||
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
|
||||
lower_32_bits(ring->dma_base));
|
||||
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
|
||||
upper_32_bits(ring->dma_base));
|
||||
if (ring->unaligned)
|
||||
bgmac_dma_rx_enable(bgmac, ring);
|
||||
|
||||
for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
|
||||
j++, dma_desc++) {
|
||||
|
@ -531,6 +548,7 @@ static void bgmac_dma_init(struct bgmac *bgmac)
|
|||
}
|
||||
|
||||
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
|
||||
ring->index_base +
|
||||
ring->num_slots * sizeof(struct bgmac_dma_desc));
|
||||
|
||||
ring->start = 0;
|
||||
|
@ -908,10 +926,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
|
|||
struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
|
||||
u8 et_swtype = 0;
|
||||
u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
|
||||
BGMAC_CHIPCTL_1_IF_TYPE_RMII;
|
||||
char buf[2];
|
||||
BGMAC_CHIPCTL_1_IF_TYPE_MII;
|
||||
char buf[4];
|
||||
|
||||
if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
|
||||
if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
|
||||
if (kstrtou8(buf, 0, &et_swtype))
|
||||
bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
|
||||
buf);
|
||||
|
|
|
@ -333,7 +333,7 @@
|
|||
|
||||
#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
|
||||
#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
|
||||
#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010
|
||||
#define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010
|
||||
#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
|
||||
#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
|
||||
#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
|
||||
|
@ -384,6 +384,8 @@ struct bgmac_dma_ring {
|
|||
u16 mmio_base;
|
||||
struct bgmac_dma_desc *cpu_base;
|
||||
dma_addr_t dma_base;
|
||||
u32 index_base; /* Used for unaligned rings only, otherwise 0 */
|
||||
bool unaligned;
|
||||
|
||||
struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
|
||||
};
|
||||
|
|
|
@ -246,8 +246,37 @@ enum {
|
|||
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
|
||||
};
|
||||
|
||||
#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
|
||||
/* use a value high enough to be above all the PFs, which has least significant
|
||||
* nibble as 8, so when cnic needs to come up with a CID for UIO to use to
|
||||
* calculate doorbell address according to old doorbell configuration scheme
|
||||
* (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
|
||||
* We must avoid coming up with cid 8 for iscsi since according to this method
|
||||
* the designated UIO cid will come out 0 and it has a special handling for that
|
||||
* case which doesn't suit us. Therefore will will cieling to closes cid which
|
||||
* has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
|
||||
*/
|
||||
|
||||
#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
|
||||
(bp)->max_cos)
|
||||
/* amount of cids traversed by UIO's DPM addition to doorbell */
|
||||
#define UIO_DPM 8
|
||||
/* roundup to DPM offset */
|
||||
#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
|
||||
UIO_DPM))
|
||||
/* offset to nearest value which has lsb nibble matching DPM */
|
||||
#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
|
||||
(UIO_DPM * 2))
|
||||
/* add offset to rounded-up cid to get a value which could be used with UIO */
|
||||
#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
|
||||
/* but wait - avoid UIO special case for cid 0 */
|
||||
#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
|
||||
(UIO_DPM_ALIGN(bp) == UIO_DPM))
|
||||
/* Properly DPM aligned CID dajusted to cid 0 secal case */
|
||||
#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
|
||||
(UIO_DPM_CID0_OFFSET(bp)))
|
||||
/* how many cids were wasted - need this value for cid allocation */
|
||||
#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
|
||||
BNX2X_1st_NON_L2_ETH_CID(bp))
|
||||
/* iSCSI L2 */
|
||||
#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
|
||||
/* FCoE L2 */
|
||||
|
@ -1542,7 +1571,6 @@ struct bnx2x {
|
|||
*/
|
||||
bool fcoe_init;
|
||||
|
||||
int pm_cap;
|
||||
int mrrs;
|
||||
|
||||
struct delayed_work sp_task;
|
||||
|
@ -1681,10 +1709,11 @@ struct bnx2x {
|
|||
* Maximum CID count that might be required by the bnx2x:
|
||||
* Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
|
||||
*/
|
||||
|
||||
#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
|
||||
+ 2 * CNIC_SUPPORT(bp))
|
||||
+ CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
|
||||
#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
|
||||
+ 2 * CNIC_SUPPORT(bp))
|
||||
+ CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
|
||||
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
|
||||
ILT_PAGE_CIDS))
|
||||
|
||||
|
|
|
@ -3008,16 +3008,16 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
|
|||
u16 pmcsr;
|
||||
|
||||
/* If there is no power capability, silently succeed */
|
||||
if (!bp->pm_cap) {
|
||||
if (!bp->pdev->pm_cap) {
|
||||
BNX2X_DEV_INFO("No power capability. Breaking.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
|
||||
pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
|
||||
|
||||
switch (state) {
|
||||
case PCI_D0:
|
||||
pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
|
||||
pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
|
||||
((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
|
||||
PCI_PM_CTRL_PME_STATUS));
|
||||
|
||||
|
@ -3041,7 +3041,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
|
|||
if (bp->wol)
|
||||
pmcsr |= PCI_PM_CTRL_PME_ENABLE;
|
||||
|
||||
pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
|
||||
pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
|
||||
pmcsr);
|
||||
|
||||
/* No more memory access after this point until
|
||||
|
|
|
@ -1387,9 +1387,9 @@ static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
|
|||
u16 pm = 0;
|
||||
struct net_device *dev = pci_get_drvdata(bp->pdev);
|
||||
|
||||
if (bp->pm_cap)
|
||||
if (bp->pdev->pm_cap)
|
||||
rc = pci_read_config_word(bp->pdev,
|
||||
bp->pm_cap + PCI_PM_CTRL, &pm);
|
||||
bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
|
||||
|
||||
if ((rc && !netif_running(dev)) ||
|
||||
(!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
|
||||
|
|
|
@ -8652,6 +8652,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
|
|||
else if (bp->wol) {
|
||||
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
|
||||
u8 *mac_addr = bp->dev->dev_addr;
|
||||
struct pci_dev *pdev = bp->pdev;
|
||||
u32 val;
|
||||
u16 pmc;
|
||||
|
||||
|
@ -8668,9 +8669,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
|
|||
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
|
||||
|
||||
/* Enable the PME and clear the status */
|
||||
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
|
||||
pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
|
||||
pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
|
||||
pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
|
||||
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
|
||||
|
||||
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
|
||||
|
||||
|
@ -10399,7 +10400,7 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
|
|||
break;
|
||||
}
|
||||
|
||||
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
|
||||
pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
|
||||
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
|
||||
|
||||
BNX2X_DEV_INFO("%sWoL capable\n",
|
||||
|
@ -12141,8 +12142,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
if (IS_PF(bp)) {
|
||||
bp->pm_cap = pdev->pm_cap;
|
||||
if (bp->pm_cap == 0) {
|
||||
if (!pdev->pm_cap) {
|
||||
dev_err(&bp->pdev->dev,
|
||||
"Cannot find power management capability, aborting\n");
|
||||
rc = -EIO;
|
||||
|
@ -13632,6 +13632,10 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp)
|
|||
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
|
||||
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
|
||||
|
||||
DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
|
||||
BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
|
||||
cp->iscsi_l2_cid);
|
||||
|
||||
if (NO_ISCSI_OOO(bp))
|
||||
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
|
||||
}
|
||||
|
|
|
@ -3135,6 +3135,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
|
|||
{
|
||||
struct cnic_dev *dev = (struct cnic_dev *) data;
|
||||
struct cnic_local *cp = dev->cnic_priv;
|
||||
struct bnx2x *bp = netdev_priv(dev->netdev);
|
||||
u32 status_idx, new_status_idx;
|
||||
|
||||
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
|
||||
|
@ -3146,7 +3147,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
|
|||
CNIC_WR16(dev, cp->kcq1.io_addr,
|
||||
cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
|
||||
|
||||
if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
|
||||
if (!CNIC_SUPPORTS_FCOE(bp)) {
|
||||
cp->arm_int(dev, status_idx);
|
||||
break;
|
||||
}
|
||||
|
@ -5217,7 +5218,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
|
|||
"iSCSI CLIENT_SETUP did not complete\n");
|
||||
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
|
||||
cnic_ring_ctl(dev, cid, cli, 1);
|
||||
*cid_ptr = cid;
|
||||
*cid_ptr = cid >> 4;
|
||||
*(cid_ptr + 1) = cid * bp->db_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3034,6 +3034,7 @@ static bool tg3_phy_led_bug(struct tg3 *tp)
|
|||
{
|
||||
switch (tg3_asic_rev(tp)) {
|
||||
case ASIC_REV_5719:
|
||||
case ASIC_REV_5720:
|
||||
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
|
||||
!tp->pci_fn)
|
||||
return true;
|
||||
|
@ -16192,12 +16193,12 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
|
|||
* So explicitly force the chip into D0 here.
|
||||
*/
|
||||
pci_read_config_dword(tp->pdev,
|
||||
tp->pm_cap + PCI_PM_CTRL,
|
||||
tp->pdev->pm_cap + PCI_PM_CTRL,
|
||||
&pm_reg);
|
||||
pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
|
||||
pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
|
||||
pci_write_config_dword(tp->pdev,
|
||||
tp->pm_cap + PCI_PM_CTRL,
|
||||
tp->pdev->pm_cap + PCI_PM_CTRL,
|
||||
pm_reg);
|
||||
|
||||
/* Also, force SERR#/PERR# in PCI command. */
|
||||
|
@ -17346,7 +17347,6 @@ static int tg3_init_one(struct pci_dev *pdev,
|
|||
tp = netdev_priv(dev);
|
||||
tp->pdev = pdev;
|
||||
tp->dev = dev;
|
||||
tp->pm_cap = pdev->pm_cap;
|
||||
tp->rx_mode = TG3_DEF_RX_MODE;
|
||||
tp->tx_mode = TG3_DEF_TX_MODE;
|
||||
tp->irq_sync = 1;
|
||||
|
|
|
@ -3234,7 +3234,6 @@ struct tg3 {
|
|||
u8 pci_lat_timer;
|
||||
|
||||
int pci_fn;
|
||||
int pm_cap;
|
||||
int msi_cap;
|
||||
int pcix_cap;
|
||||
int pcie_readrq;
|
||||
|
|
|
@ -6149,8 +6149,10 @@ static int __init cxgb4_init_module(void)
|
|||
pr_warn("could not create debugfs entry, continuing\n");
|
||||
|
||||
ret = pci_register_driver(&cxgb4_driver);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
debugfs_remove(cxgb4_debugfs_root);
|
||||
destroy_workqueue(workq);
|
||||
}
|
||||
|
||||
register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
|
||||
|
||||
|
|
|
@ -1321,7 +1321,7 @@ de4x5_open(struct net_device *dev)
|
|||
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
|
||||
lp->adapter_name, dev)) {
|
||||
printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
|
||||
if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED,
|
||||
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
|
||||
lp->adapter_name, dev)) {
|
||||
printk("\n Cannot get IRQ- reconfigure your hardware.\n");
|
||||
disable_ast(dev);
|
||||
|
|
|
@ -2802,7 +2802,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
|
|||
struct be_resources res = {0};
|
||||
struct be_vf_cfg *vf_cfg;
|
||||
u32 cap_flags, en_flags, vf;
|
||||
int status;
|
||||
int status = 0;
|
||||
|
||||
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
|
||||
BE_IF_FLAGS_MULTICAST;
|
||||
|
|
|
@ -2199,7 +2199,7 @@ fec_probe(struct platform_device *pdev)
|
|||
goto failed_irq;
|
||||
}
|
||||
ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
|
||||
IRQF_DISABLED, pdev->name, ndev);
|
||||
0, pdev->name, ndev);
|
||||
if (ret)
|
||||
goto failed_irq;
|
||||
}
|
||||
|
|
|
@ -1097,7 +1097,7 @@ static int hp100_open(struct net_device *dev)
|
|||
/* New: if bus is PCI or EISA, interrupts might be shared interrupts */
|
||||
if (request_irq(dev->irq, hp100_interrupt,
|
||||
lp->bus == HP100_BUS_PCI || lp->bus ==
|
||||
HP100_BUS_EISA ? IRQF_SHARED : IRQF_DISABLED,
|
||||
HP100_BUS_EISA ? IRQF_SHARED : 0,
|
||||
"hp100", dev)) {
|
||||
printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
|
||||
return -EAGAIN;
|
||||
|
|
|
@ -102,6 +102,19 @@ static int ehea_probe_adapter(struct platform_device *dev);
|
|||
|
||||
static int ehea_remove(struct platform_device *dev);
|
||||
|
||||
static struct of_device_id ehea_module_device_table[] = {
|
||||
{
|
||||
.name = "lhea",
|
||||
.compatible = "IBM,lhea",
|
||||
},
|
||||
{
|
||||
.type = "network",
|
||||
.compatible = "IBM,lhea-ethernet",
|
||||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ehea_module_device_table);
|
||||
|
||||
static struct of_device_id ehea_device_table[] = {
|
||||
{
|
||||
.name = "lhea",
|
||||
|
@ -109,7 +122,6 @@ static struct of_device_id ehea_device_table[] = {
|
|||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ehea_device_table);
|
||||
|
||||
static struct platform_driver ehea_driver = {
|
||||
.driver = {
|
||||
|
@ -1285,7 +1297,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
|
|||
|
||||
ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
|
||||
ehea_qp_aff_irq_handler,
|
||||
IRQF_DISABLED, port->int_aff_name, port);
|
||||
0, port->int_aff_name, port);
|
||||
if (ret) {
|
||||
netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
|
||||
port->qp_eq->attr.ist1);
|
||||
|
@ -1303,8 +1315,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
|
|||
"%s-queue%d", dev->name, i);
|
||||
ret = ibmebus_request_irq(pr->eq->attr.ist1,
|
||||
ehea_recv_irq_handler,
|
||||
IRQF_DISABLED, pr->int_send_name,
|
||||
pr);
|
||||
0, pr->int_send_name, pr);
|
||||
if (ret) {
|
||||
netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
|
||||
i, pr->eq->attr.ist1);
|
||||
|
@ -3320,7 +3331,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
|
|||
}
|
||||
|
||||
ret = ibmebus_request_irq(adapter->neq->attr.ist1,
|
||||
ehea_interrupt_neq, IRQF_DISABLED,
|
||||
ehea_interrupt_neq, 0,
|
||||
"ehea_neq", adapter);
|
||||
if (ret) {
|
||||
dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
|
||||
|
|
|
@ -922,6 +922,14 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
|
|||
else
|
||||
mask &= ~(1 << 30);
|
||||
}
|
||||
if (mac->type == e1000_pch2lan) {
|
||||
/* SHRAH[0,1,2] different than previous */
|
||||
if (i == 7)
|
||||
mask &= 0xFFF4FFFF;
|
||||
/* SHRAH[3] different than SHRAH[0,1,2] */
|
||||
if (i == 10)
|
||||
mask |= (1 << 30);
|
||||
}
|
||||
|
||||
REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
|
||||
0xFFFFFFFF);
|
||||
|
|
|
@ -1371,7 +1371,10 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
|
|||
return;
|
||||
}
|
||||
|
||||
if (index < hw->mac.rar_entry_count) {
|
||||
/* RAR[1-6] are owned by manageability. Skip those and program the
|
||||
* next address into the SHRA register array.
|
||||
*/
|
||||
if (index < (u32)(hw->mac.rar_entry_count - 6)) {
|
||||
s32 ret_val;
|
||||
|
||||
ret_val = e1000_acquire_swflag_ich8lan(hw);
|
||||
|
@ -1962,8 +1965,8 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
|
||||
for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
|
||||
/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
|
||||
for (i = 0; i < (hw->mac.rar_entry_count); i++) {
|
||||
mac_reg = er32(RAL(i));
|
||||
hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
|
||||
(u16)(mac_reg & 0xFFFF));
|
||||
|
@ -2007,10 +2010,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
|
|||
return ret_val;
|
||||
|
||||
if (enable) {
|
||||
/* Write Rx addresses (rar_entry_count for RAL/H, +4 for
|
||||
/* Write Rx addresses (rar_entry_count for RAL/H, and
|
||||
* SHRAL/H) and initial CRC values to the MAC
|
||||
*/
|
||||
for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
|
||||
for (i = 0; i < hw->mac.rar_entry_count; i++) {
|
||||
u8 mac_addr[ETH_ALEN] = { 0 };
|
||||
u32 addr_high, addr_low;
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@
|
|||
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
|
||||
|
||||
#define E1000_ICH_RAR_ENTRIES 7
|
||||
#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
|
||||
#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */
|
||||
#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
|
||||
|
||||
#define PHY_PAGE_SHIFT 5
|
||||
|
|
|
@ -4868,7 +4868,7 @@ static void e1000_watchdog_task(struct work_struct *work)
|
|||
*/
|
||||
if ((hw->phy.type == e1000_phy_igp_3 ||
|
||||
hw->phy.type == e1000_phy_bm) &&
|
||||
(hw->mac.autoneg == true) &&
|
||||
hw->mac.autoneg &&
|
||||
(adapter->link_speed == SPEED_10 ||
|
||||
adapter->link_speed == SPEED_100) &&
|
||||
(adapter->link_duplex == HALF_DUPLEX)) {
|
||||
|
|
|
@ -719,6 +719,10 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
|
|||
u32 ctrl_ext;
|
||||
u32 mdic;
|
||||
|
||||
/* Extra read required for some PHY's on i354 */
|
||||
if (hw->mac.type == e1000_i354)
|
||||
igb_get_phy_id(hw);
|
||||
|
||||
/* For SGMII PHYs, we try the list of possible addresses until
|
||||
* we find one that works. For non-SGMII PHYs
|
||||
* (e.g. integrated copper PHYs), an address of 1 should
|
||||
|
|
|
@ -712,6 +712,7 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
|
|||
static s32 igb_set_default_fc(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 lan_offset;
|
||||
u16 nvm_data;
|
||||
|
||||
/* Read and store word 0x0F of the EEPROM. This word contains bits
|
||||
|
@ -722,7 +723,14 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
|
|||
* control setting, then the variable hw->fc will
|
||||
* be initialized based on a value in the EEPROM.
|
||||
*/
|
||||
ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
|
||||
if (hw->mac.type == e1000_i350) {
|
||||
lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
|
||||
ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
|
||||
+ lan_offset, 1, &nvm_data);
|
||||
} else {
|
||||
ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
|
||||
1, &nvm_data);
|
||||
}
|
||||
|
||||
if (ret_val) {
|
||||
hw_dbg("NVM Read Error\n");
|
||||
|
|
|
@ -160,6 +160,13 @@ static int ixgbe_get_settings(struct net_device *netdev,
|
|||
bool autoneg = false;
|
||||
bool link_up;
|
||||
|
||||
/* SFP type is needed for get_link_capabilities */
|
||||
if (hw->phy.media_type & (ixgbe_media_type_fiber |
|
||||
ixgbe_media_type_fiber_qsfp)) {
|
||||
if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
|
||||
hw->phy.ops.identify_sfp(hw);
|
||||
}
|
||||
|
||||
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
|
||||
|
||||
/* set the supported link speeds */
|
||||
|
@ -186,6 +193,11 @@ static int ixgbe_get_settings(struct net_device *netdev,
|
|||
ecmd->advertising |= ADVERTISED_1000baseT_Full;
|
||||
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
|
||||
ecmd->advertising |= ADVERTISED_100baseT_Full;
|
||||
|
||||
if (hw->phy.multispeed_fiber && !autoneg) {
|
||||
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
|
||||
ecmd->advertising = ADVERTISED_10000baseT_Full;
|
||||
}
|
||||
}
|
||||
|
||||
if (autoneg) {
|
||||
|
@ -314,6 +326,14 @@ static int ixgbe_set_settings(struct net_device *netdev,
|
|||
if (ecmd->advertising & ~ecmd->supported)
|
||||
return -EINVAL;
|
||||
|
||||
/* only allow one speed at a time if no autoneg */
|
||||
if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
|
||||
if (ecmd->advertising ==
|
||||
(ADVERTISED_10000baseT_Full |
|
||||
ADVERTISED_1000baseT_Full))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
old = hw->phy.autoneg_advertised;
|
||||
advertised = 0;
|
||||
if (ecmd->advertising & ADVERTISED_10000baseT_Full)
|
||||
|
@ -1805,6 +1825,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
|
|||
unsigned int size = 1024;
|
||||
netdev_tx_t tx_ret_val;
|
||||
struct sk_buff *skb;
|
||||
u32 flags_orig = adapter->flags;
|
||||
|
||||
/* DCB can modify the frames on Tx */
|
||||
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
|
||||
|
||||
/* allocate test skb */
|
||||
skb = alloc_skb(size, GFP_KERNEL);
|
||||
|
@ -1857,6 +1881,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
|
|||
|
||||
/* free the original skb */
|
||||
kfree_skb(skb);
|
||||
adapter->flags = flags_orig;
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
|
|
@ -3571,7 +3571,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
|
|||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
int i;
|
||||
u32 rxctrl;
|
||||
u32 rxctrl, rfctl;
|
||||
|
||||
/* disable receives while setting up the descriptors */
|
||||
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
|
||||
|
@ -3580,6 +3580,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
|
|||
ixgbe_setup_psrtype(adapter);
|
||||
ixgbe_setup_rdrxctl(adapter);
|
||||
|
||||
/* RSC Setup */
|
||||
rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
|
||||
rfctl &= ~IXGBE_RFCTL_RSC_DIS;
|
||||
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
|
||||
rfctl |= IXGBE_RFCTL_RSC_DIS;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
|
||||
|
||||
/* Program registers for the distribution of queues */
|
||||
ixgbe_setup_mrqc(adapter);
|
||||
|
||||
|
@ -5993,8 +6000,16 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
|
|||
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
|
||||
|
||||
speed = hw->phy.autoneg_advertised;
|
||||
if ((!speed) && (hw->mac.ops.get_link_capabilities))
|
||||
if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
|
||||
hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
|
||||
|
||||
/* setup the highest link when no autoneg */
|
||||
if (!autoneg) {
|
||||
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
|
||||
speed = IXGBE_LINK_SPEED_10GB_FULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (hw->mac.ops.setup_link)
|
||||
hw->mac.ops.setup_link(hw, speed, true);
|
||||
|
||||
|
|
|
@ -1861,6 +1861,7 @@ enum {
|
|||
#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
|
||||
#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
|
||||
#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
|
||||
#define IXGBE_RFCTL_RSC_DIS 0x00000020
|
||||
#define IXGBE_RFCTL_NFSW_DIS 0x00000040
|
||||
#define IXGBE_RFCTL_NFSR_DIS 0x00000080
|
||||
#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
|
||||
|
|
|
@ -282,8 +282,7 @@ ltq_etop_hw_init(struct net_device *dev)
|
|||
|
||||
if (IS_TX(i)) {
|
||||
ltq_dma_alloc_tx(&ch->dma);
|
||||
request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
|
||||
"etop_tx", priv);
|
||||
request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
|
||||
} else if (IS_RX(i)) {
|
||||
ltq_dma_alloc_rx(&ch->dma);
|
||||
for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
|
||||
|
@ -291,8 +290,7 @@ ltq_etop_hw_init(struct net_device *dev)
|
|||
if (ltq_etop_alloc_skb(ch))
|
||||
return -ENOMEM;
|
||||
ch->dma.desc = 0;
|
||||
request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
|
||||
"etop_rx", priv);
|
||||
request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
|
||||
}
|
||||
ch->dma.irq = irq;
|
||||
}
|
||||
|
|
|
@ -1123,8 +1123,7 @@ static int pxa168_eth_open(struct net_device *dev)
|
|||
struct pxa168_eth_private *pep = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
err = request_irq(dev->irq, pxa168_eth_int_handler,
|
||||
IRQF_DISABLED, dev->name, dev);
|
||||
err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
|
||||
if (err) {
|
||||
dev_err(&dev->dev, "can't assign irq\n");
|
||||
return -EAGAIN;
|
||||
|
|
|
@ -3092,6 +3092,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
|
|||
if (!nskb)
|
||||
goto resubmit;
|
||||
|
||||
skb = e->skb;
|
||||
prefetch(skb->data);
|
||||
|
||||
if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
|
||||
dev_kfree_skb(nskb);
|
||||
goto resubmit;
|
||||
|
@ -3101,8 +3104,6 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
|
|||
dma_unmap_addr(e, mapaddr),
|
||||
dma_unmap_len(e, maplen),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
skb = e->skb;
|
||||
prefetch(skb->data);
|
||||
}
|
||||
|
||||
skb_put(skb, len);
|
||||
|
|
|
@ -53,9 +53,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
|
|||
for (i = 0; i < priv->tx_ring_num; i++) {
|
||||
priv->tx_cq[i].moder_cnt = priv->tx_frames;
|
||||
priv->tx_cq[i].moder_time = priv->tx_usecs;
|
||||
err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
|
||||
if (err)
|
||||
return err;
|
||||
if (priv->port_up) {
|
||||
err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->adaptive_rx_coal)
|
||||
|
@ -65,9 +67,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
|
|||
priv->rx_cq[i].moder_cnt = priv->rx_frames;
|
||||
priv->rx_cq[i].moder_time = priv->rx_usecs;
|
||||
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
|
||||
err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
|
||||
if (err)
|
||||
return err;
|
||||
if (priv->port_up) {
|
||||
err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
|
|
|
@ -915,7 +915,7 @@ static int ks_net_open(struct net_device *netdev)
|
|||
struct ks_net *ks = netdev_priv(netdev);
|
||||
int err;
|
||||
|
||||
#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
|
||||
#define KS_INT_FLAGS IRQF_TRIGGER_LOW
|
||||
/* lock the card, even if we may not actually do anything
|
||||
* else at the moment.
|
||||
*/
|
||||
|
|
|
@ -83,8 +83,7 @@ static int jazzsonic_open(struct net_device* dev)
|
|||
{
|
||||
int retval;
|
||||
|
||||
retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
|
||||
"sonic", dev);
|
||||
retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
|
||||
if (retval) {
|
||||
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
|
||||
dev->name, dev->irq);
|
||||
|
|
|
@ -95,8 +95,7 @@ static int xtsonic_open(struct net_device *dev)
|
|||
{
|
||||
int retval;
|
||||
|
||||
retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
|
||||
"sonic", dev);
|
||||
retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
|
||||
if (retval) {
|
||||
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
|
||||
dev->name, dev->irq);
|
||||
|
|
|
@ -1219,7 +1219,7 @@ static int pasemi_mac_open(struct net_device *dev)
|
|||
snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
|
||||
dev->name);
|
||||
|
||||
ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED,
|
||||
ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0,
|
||||
mac->tx_irq_name, mac->tx);
|
||||
if (ret) {
|
||||
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
|
||||
|
@ -1230,7 +1230,7 @@ static int pasemi_mac_open(struct net_device *dev)
|
|||
snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
|
||||
dev->name);
|
||||
|
||||
ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED,
|
||||
ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, 0,
|
||||
mac->rx_irq_name, mac->rx);
|
||||
if (ret) {
|
||||
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
|
||||
|
|
|
@ -1561,6 +1561,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
|
|||
{
|
||||
int err;
|
||||
|
||||
adapter->need_fw_reset = 0;
|
||||
qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
|
||||
qlcnic_83xx_enable_mbx_interrupt(adapter);
|
||||
|
||||
|
|
|
@ -4231,6 +4231,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
|
|||
case RTL_GIGA_MAC_VER_23:
|
||||
case RTL_GIGA_MAC_VER_24:
|
||||
case RTL_GIGA_MAC_VER_34:
|
||||
case RTL_GIGA_MAC_VER_35:
|
||||
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_40:
|
||||
|
|
|
@ -7,7 +7,7 @@ config SFC
|
|||
select I2C_ALGOBIT
|
||||
select PTP_1588_CLOCK
|
||||
---help---
|
||||
This driver supports 10-gigabit Ethernet cards based on
|
||||
This driver supports 10/40-gigabit Ethernet cards based on
|
||||
the Solarflare SFC4000, SFC9000-family and SFC9100-family
|
||||
controllers.
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
|
|||
return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
|
||||
}
|
||||
|
||||
static int efx_ef10_init_capabilities(struct efx_nic *efx)
|
||||
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
|
||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||
|
@ -107,16 +107,27 @@ static int efx_ef10_init_capabilities(struct efx_nic *efx)
|
|||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < sizeof(outbuf)) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"unable to read datapath firmware capabilities\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (outlen >= sizeof(outbuf)) {
|
||||
nic_data->datapath_caps =
|
||||
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
|
||||
if (!(nic_data->datapath_caps &
|
||||
(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"Capabilities don't indicate TSO support.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
nic_data->datapath_caps =
|
||||
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
|
||||
|
||||
if (!(nic_data->datapath_caps &
|
||||
(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"current firmware does not support TSO\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!(nic_data->datapath_caps &
|
||||
(1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
|
||||
netif_err(efx, probe, efx->net_dev,
|
||||
"current firmware does not support an RX prefix\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -217,21 +228,13 @@ static int efx_ef10_probe(struct efx_nic *efx)
|
|||
if (rc)
|
||||
goto fail3;
|
||||
|
||||
rc = efx_ef10_init_capabilities(efx);
|
||||
rc = efx_ef10_init_datapath_caps(efx);
|
||||
if (rc < 0)
|
||||
goto fail3;
|
||||
|
||||
efx->rx_packet_len_offset =
|
||||
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
|
||||
|
||||
if (!(nic_data->datapath_caps &
|
||||
(1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
|
||||
netif_err(efx, probe, efx->net_dev,
|
||||
"current firmware does not support an RX prefix\n");
|
||||
rc = -ENODEV;
|
||||
goto fail3;
|
||||
}
|
||||
|
||||
rc = efx_mcdi_port_get_number(efx);
|
||||
if (rc < 0)
|
||||
goto fail3;
|
||||
|
@ -260,8 +263,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
|
|||
if (rc)
|
||||
goto fail3;
|
||||
|
||||
efx_ptp_probe(efx);
|
||||
|
||||
return 0;
|
||||
|
||||
fail3:
|
||||
|
@ -342,6 +343,13 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
|
|||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||
int rc;
|
||||
|
||||
if (nic_data->must_check_datapath_caps) {
|
||||
rc = efx_ef10_init_datapath_caps(efx);
|
||||
if (rc)
|
||||
return rc;
|
||||
nic_data->must_check_datapath_caps = false;
|
||||
}
|
||||
|
||||
if (nic_data->must_realloc_vis) {
|
||||
/* We cannot let the number of VIs change now */
|
||||
rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
|
||||
|
@ -710,6 +718,14 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
|
|||
nic_data->must_restore_filters = true;
|
||||
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
|
||||
|
||||
/* The datapath firmware might have been changed */
|
||||
nic_data->must_check_datapath_caps = true;
|
||||
|
||||
/* MAC statistics have been cleared on the NIC; clear the local
|
||||
* statistic that we update with efx_update_diff_stat().
|
||||
*/
|
||||
nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
|
|
@ -556,6 +556,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
|
|||
case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
|
||||
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
|
||||
case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
|
||||
case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
|
||||
default: return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
|
@ -841,6 +842,7 @@ static unsigned int efx_mcdi_event_link_speed[] = {
|
|||
[MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
|
||||
};
|
||||
|
||||
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
|
||||
|
|
|
@ -400,6 +400,8 @@ enum {
|
|||
* @rx_rss_context: Firmware handle for our RSS context
|
||||
* @stats: Hardware statistics
|
||||
* @workaround_35388: Flag: firmware supports workaround for bug 35388
|
||||
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
|
||||
* after MC reboot
|
||||
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
|
||||
* %MC_CMD_GET_CAPABILITIES response)
|
||||
*/
|
||||
|
@ -413,6 +415,7 @@ struct efx_ef10_nic_data {
|
|||
u32 rx_rss_context;
|
||||
u64 stats[EF10_STAT_COUNT];
|
||||
bool workaround_35388;
|
||||
bool must_check_datapath_caps;
|
||||
u32 datapath_caps;
|
||||
};
|
||||
|
||||
|
|
|
@ -271,7 +271,7 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
|
|||
#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
|
||||
#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
|
||||
|
||||
#define SMC_IRQ_FLAGS (IRQF_DISABLED)
|
||||
#define SMC_IRQ_FLAGS 0
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -1356,8 +1356,7 @@ static int smsc9420_open(struct net_device *dev)
|
|||
smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
|
||||
smsc9420_pci_flush_write(pd);
|
||||
|
||||
result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
|
||||
DRV_NAME, pd);
|
||||
result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
|
||||
if (result) {
|
||||
smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
|
||||
result = -ENODEV;
|
||||
|
|
|
@ -1726,7 +1726,7 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
|
|||
goto fail_alloc_irq;
|
||||
}
|
||||
result = request_irq(card->irq, gelic_card_interrupt,
|
||||
IRQF_DISABLED, netdev->name, card);
|
||||
0, netdev->name, card);
|
||||
|
||||
if (result) {
|
||||
dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
|
||||
|
|
|
@ -191,8 +191,8 @@ static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
|
|||
goto error;
|
||||
|
||||
ret = 0;
|
||||
error:
|
||||
return ret;
|
||||
error:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Setup a communication between mcs7780 and agilent chip. */
|
||||
|
@ -501,8 +501,11 @@ static inline int mcs_setup_urbs(struct mcs_cb *mcs)
|
|||
return 0;
|
||||
|
||||
mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!mcs->rx_urb)
|
||||
if (!mcs->rx_urb) {
|
||||
usb_free_urb(mcs->tx_urb);
|
||||
mcs->tx_urb = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -643,9 +646,9 @@ static int mcs_speed_change(struct mcs_cb *mcs)
|
|||
ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
|
||||
|
||||
mcs->speed = mcs->new_speed;
|
||||
error:
|
||||
mcs->new_speed = 0;
|
||||
return ret;
|
||||
error:
|
||||
mcs->new_speed = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Ioctl calls not supported at this time. Can be an area of future work. */
|
||||
|
@ -738,17 +741,20 @@ static int mcs_net_open(struct net_device *netdev)
|
|||
|
||||
ret = mcs_receive_start(mcs);
|
||||
if (ret)
|
||||
goto error3;
|
||||
goto error4;
|
||||
|
||||
netif_start_queue(netdev);
|
||||
return 0;
|
||||
|
||||
error3:
|
||||
irlap_close(mcs->irlap);
|
||||
error2:
|
||||
kfree_skb(mcs->rx_buff.skb);
|
||||
error1:
|
||||
return ret;
|
||||
error4:
|
||||
usb_free_urb(mcs->rx_urb);
|
||||
usb_free_urb(mcs->tx_urb);
|
||||
error3:
|
||||
irlap_close(mcs->irlap);
|
||||
error2:
|
||||
kfree_skb(mcs->rx_buff.skb);
|
||||
error1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Receive callback function. */
|
||||
|
@ -946,11 +952,11 @@ static int mcs_probe(struct usb_interface *intf,
|
|||
usb_set_intfdata(intf, mcs);
|
||||
return 0;
|
||||
|
||||
error2:
|
||||
free_netdev(ndev);
|
||||
error2:
|
||||
free_netdev(ndev);
|
||||
|
||||
error1:
|
||||
return ret;
|
||||
error1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* The current device is removed, the USB layer tells us to shut down. */
|
||||
|
|
|
@ -146,6 +146,7 @@ static int loopback_dev_init(struct net_device *dev)
|
|||
|
||||
static void loopback_dev_free(struct net_device *dev)
|
||||
{
|
||||
dev_net(dev)->loopback_dev = NULL;
|
||||
free_percpu(dev->lstats);
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
|
|
@ -684,15 +684,12 @@ static int netconsole_netdev_event(struct notifier_block *this,
|
|||
case NETDEV_RELEASE:
|
||||
case NETDEV_JOIN:
|
||||
case NETDEV_UNREGISTER:
|
||||
/*
|
||||
* rtnl_lock already held
|
||||
/* rtnl_lock already held
|
||||
* we might sleep in __netpoll_cleanup()
|
||||
*/
|
||||
spin_unlock_irqrestore(&target_list_lock, flags);
|
||||
|
||||
mutex_lock(&nt->mutex);
|
||||
__netpoll_cleanup(&nt->np);
|
||||
mutex_unlock(&nt->mutex);
|
||||
|
||||
spin_lock_irqsave(&target_list_lock, flags);
|
||||
dev_put(nt->np.dev);
|
||||
|
|
|
@ -30,9 +30,9 @@
|
|||
#include <linux/ethtool.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/* Cicada Extended Control Register 1 */
|
||||
#define MII_CIS8201_EXT_CON1 0x17
|
||||
|
|
|
@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|||
nf_reset(skb);
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
ip_select_ident(iph, &rt->dst, NULL);
|
||||
ip_select_ident(skb, &rt->dst, NULL);
|
||||
ip_send_check(iph);
|
||||
|
||||
ip_local_out(skb);
|
||||
|
|
|
@ -1641,11 +1641,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
INIT_LIST_HEAD(&tun->disabled);
|
||||
err = tun_attach(tun, file, false);
|
||||
if (err < 0)
|
||||
goto err_free_dev;
|
||||
goto err_free_flow;
|
||||
|
||||
err = register_netdevice(tun->dev);
|
||||
if (err < 0)
|
||||
goto err_free_dev;
|
||||
goto err_detach;
|
||||
|
||||
if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
|
||||
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
|
||||
|
@ -1689,7 +1689,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
strcpy(ifr->ifr_name, tun->dev->name);
|
||||
return 0;
|
||||
|
||||
err_free_dev:
|
||||
err_detach:
|
||||
tun_detach_all(dev);
|
||||
err_free_flow:
|
||||
tun_flow_uninit(tun);
|
||||
security_tun_dev_free_security(tun->security);
|
||||
err_free_dev:
|
||||
free_netdev(dev);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include <linux/usb/usbnet.h>
|
||||
|
||||
|
||||
#if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
|
||||
#if IS_ENABLED(CONFIG_USB_NET_RNDIS_HOST)
|
||||
|
||||
static int is_rndis(struct usb_interface_descriptor *desc)
|
||||
{
|
||||
|
@ -69,8 +69,7 @@ static const u8 mbm_guid[16] = {
|
|||
0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
|
||||
};
|
||||
|
||||
/*
|
||||
* probes control interface, claims data interface, collects the bulk
|
||||
/* probes control interface, claims data interface, collects the bulk
|
||||
* endpoints, activates data interface (if needed), maybe sets MTU.
|
||||
* all pure cdc, except for certain firmware workarounds, and knowing
|
||||
* that rndis uses one different rule.
|
||||
|
@ -88,7 +87,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
struct usb_cdc_mdlm_desc *desc = NULL;
|
||||
struct usb_cdc_mdlm_detail_desc *detail = NULL;
|
||||
|
||||
if (sizeof dev->data < sizeof *info)
|
||||
if (sizeof(dev->data) < sizeof(*info))
|
||||
return -EDOM;
|
||||
|
||||
/* expect strict spec conformance for the descriptors, but
|
||||
|
@ -126,10 +125,10 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
is_activesync(&intf->cur_altsetting->desc) ||
|
||||
is_wireless_rndis(&intf->cur_altsetting->desc));
|
||||
|
||||
memset(info, 0, sizeof *info);
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->control = intf;
|
||||
while (len > 3) {
|
||||
if (buf [1] != USB_DT_CS_INTERFACE)
|
||||
if (buf[1] != USB_DT_CS_INTERFACE)
|
||||
goto next_desc;
|
||||
|
||||
/* use bDescriptorSubType to identify the CDC descriptors.
|
||||
|
@ -139,14 +138,14 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
* in favor of a complicated OID-based RPC scheme doing what
|
||||
* CDC Ethernet achieves with a simple descriptor.
|
||||
*/
|
||||
switch (buf [2]) {
|
||||
switch (buf[2]) {
|
||||
case USB_CDC_HEADER_TYPE:
|
||||
if (info->header) {
|
||||
dev_dbg(&intf->dev, "extra CDC header\n");
|
||||
goto bad_desc;
|
||||
}
|
||||
info->header = (void *) buf;
|
||||
if (info->header->bLength != sizeof *info->header) {
|
||||
if (info->header->bLength != sizeof(*info->header)) {
|
||||
dev_dbg(&intf->dev, "CDC header len %u\n",
|
||||
info->header->bLength);
|
||||
goto bad_desc;
|
||||
|
@ -175,7 +174,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
goto bad_desc;
|
||||
}
|
||||
info->u = (void *) buf;
|
||||
if (info->u->bLength != sizeof *info->u) {
|
||||
if (info->u->bLength != sizeof(*info->u)) {
|
||||
dev_dbg(&intf->dev, "CDC union len %u\n",
|
||||
info->u->bLength);
|
||||
goto bad_desc;
|
||||
|
@ -233,7 +232,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
goto bad_desc;
|
||||
}
|
||||
info->ether = (void *) buf;
|
||||
if (info->ether->bLength != sizeof *info->ether) {
|
||||
if (info->ether->bLength != sizeof(*info->ether)) {
|
||||
dev_dbg(&intf->dev, "CDC ether len %u\n",
|
||||
info->ether->bLength);
|
||||
goto bad_desc;
|
||||
|
@ -274,8 +273,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
break;
|
||||
}
|
||||
next_desc:
|
||||
len -= buf [0]; /* bLength */
|
||||
buf += buf [0];
|
||||
len -= buf[0]; /* bLength */
|
||||
buf += buf[0];
|
||||
}
|
||||
|
||||
/* Microsoft ActiveSync based and some regular RNDIS devices lack the
|
||||
|
@ -379,9 +378,7 @@ void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* Communications Device Class, Ethernet Control model
|
||||
/* Communications Device Class, Ethernet Control model
|
||||
*
|
||||
* Takes two interfaces. The DATA interface is inactive till an altsetting
|
||||
* is selected. Configuration data includes class descriptors. There's
|
||||
|
@ -389,8 +386,7 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
|
|||
*
|
||||
* This should interop with whatever the 2.4 "CDCEther.c" driver
|
||||
* (by Brad Hards) talked with, with more functionality.
|
||||
*
|
||||
*-------------------------------------------------------------------------*/
|
||||
*/
|
||||
|
||||
static void dumpspeed(struct usbnet *dev, __le32 *speeds)
|
||||
{
|
||||
|
@ -404,7 +400,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
|
|||
{
|
||||
struct usb_cdc_notification *event;
|
||||
|
||||
if (urb->actual_length < sizeof *event)
|
||||
if (urb->actual_length < sizeof(*event))
|
||||
return;
|
||||
|
||||
/* SPEED_CHANGE can get split into two 8-byte packets */
|
||||
|
@ -423,7 +419,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
|
|||
case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
|
||||
netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
|
||||
urb->actual_length);
|
||||
if (urb->actual_length != (sizeof *event + 8))
|
||||
if (urb->actual_length != (sizeof(*event) + 8))
|
||||
set_bit(EVENT_STS_SPLIT, &dev->flags);
|
||||
else
|
||||
dumpspeed(dev, (__le32 *) &event[1]);
|
||||
|
@ -469,7 +465,6 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
|
|||
static const struct driver_info cdc_info = {
|
||||
.description = "CDC Ethernet Device",
|
||||
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
|
||||
// .check_connect = cdc_check_connect,
|
||||
.bind = usbnet_cdc_bind,
|
||||
.unbind = usbnet_cdc_unbind,
|
||||
.status = usbnet_cdc_status,
|
||||
|
@ -493,9 +488,8 @@ static const struct driver_info wwan_info = {
|
|||
#define DELL_VENDOR_ID 0x413C
|
||||
#define REALTEK_VENDOR_ID 0x0bda
|
||||
|
||||
static const struct usb_device_id products [] = {
|
||||
/*
|
||||
* BLACKLIST !!
|
||||
static const struct usb_device_id products[] = {
|
||||
/* BLACKLIST !!
|
||||
*
|
||||
* First blacklist any products that are egregiously nonconformant
|
||||
* with the CDC Ethernet specs. Minor braindamage we cope with; when
|
||||
|
@ -542,7 +536,7 @@ static const struct usb_device_id products [] = {
|
|||
.driver_info = 0,
|
||||
}, {
|
||||
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
|
||||
| USB_DEVICE_ID_MATCH_DEVICE,
|
||||
| USB_DEVICE_ID_MATCH_DEVICE,
|
||||
.idVendor = 0x04DD,
|
||||
.idProduct = 0x8007, /* C-700 */
|
||||
ZAURUS_MASTER_INTERFACE,
|
||||
|
@ -659,8 +653,7 @@ static const struct usb_device_id products [] = {
|
|||
.driver_info = 0,
|
||||
},
|
||||
|
||||
/*
|
||||
* WHITELIST!!!
|
||||
/* WHITELIST!!!
|
||||
*
|
||||
* CDC Ether uses two interfaces, not necessarily consecutive.
|
||||
* We match the main interface, ignoring the optional device
|
||||
|
@ -672,59 +665,39 @@ static const struct usb_device_id products [] = {
|
|||
*/
|
||||
{
|
||||
/* ZTE (Vodafone) K3805-Z */
|
||||
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
|
||||
| USB_DEVICE_ID_MATCH_PRODUCT
|
||||
| USB_DEVICE_ID_MATCH_INT_INFO,
|
||||
.idVendor = ZTE_VENDOR_ID,
|
||||
.idProduct = 0x1003,
|
||||
.bInterfaceClass = USB_CLASS_COMM,
|
||||
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
|
||||
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
|
||||
USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1003, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* ZTE (Vodafone) K3806-Z */
|
||||
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
|
||||
| USB_DEVICE_ID_MATCH_PRODUCT
|
||||
| USB_DEVICE_ID_MATCH_INT_INFO,
|
||||
.idVendor = ZTE_VENDOR_ID,
|
||||
.idProduct = 0x1015,
|
||||
.bInterfaceClass = USB_CLASS_COMM,
|
||||
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
|
||||
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
|
||||
USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1015, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* ZTE (Vodafone) K4510-Z */
|
||||
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
|
||||
| USB_DEVICE_ID_MATCH_PRODUCT
|
||||
| USB_DEVICE_ID_MATCH_INT_INFO,
|
||||
.idVendor = ZTE_VENDOR_ID,
|
||||
.idProduct = 0x1173,
|
||||
.bInterfaceClass = USB_CLASS_COMM,
|
||||
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
|
||||
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
|
||||
USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1173, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* ZTE (Vodafone) K3770-Z */
|
||||
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
|
||||
| USB_DEVICE_ID_MATCH_PRODUCT
|
||||
| USB_DEVICE_ID_MATCH_INT_INFO,
|
||||
.idVendor = ZTE_VENDOR_ID,
|
||||
.idProduct = 0x1177,
|
||||
.bInterfaceClass = USB_CLASS_COMM,
|
||||
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
|
||||
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
|
||||
USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1177, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* ZTE (Vodafone) K3772-Z */
|
||||
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
|
||||
| USB_DEVICE_ID_MATCH_PRODUCT
|
||||
| USB_DEVICE_ID_MATCH_INT_INFO,
|
||||
.idVendor = ZTE_VENDOR_ID,
|
||||
.idProduct = 0x1181,
|
||||
.bInterfaceClass = USB_CLASS_COMM,
|
||||
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
|
||||
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
|
||||
USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1181, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* Telit modules */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (kernel_ulong_t) &wwan_info,
|
||||
}, {
|
||||
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
|
@ -736,15 +709,11 @@ static const struct usb_device_id products [] = {
|
|||
|
||||
}, {
|
||||
/* Various Huawei modems with a network port like the UMG1831 */
|
||||
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
|
||||
| USB_DEVICE_ID_MATCH_INT_INFO,
|
||||
.idVendor = HUAWEI_VENDOR_ID,
|
||||
.bInterfaceClass = USB_CLASS_COMM,
|
||||
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
|
||||
.bInterfaceProtocol = 255,
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET, 255),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
},
|
||||
{ }, // END
|
||||
{ }, /* END */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, products);
|
||||
|
||||
|
|
|
@ -564,7 +564,7 @@ static void vxlan_notify_add_rx_port(struct sock *sk)
|
|||
struct net_device *dev;
|
||||
struct net *net = sock_net(sk);
|
||||
sa_family_t sa_family = sk->sk_family;
|
||||
u16 port = htons(inet_sk(sk)->inet_sport);
|
||||
__be16 port = inet_sk(sk)->inet_sport;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_netdev_rcu(net, dev) {
|
||||
|
@ -581,7 +581,7 @@ static void vxlan_notify_del_rx_port(struct sock *sk)
|
|||
struct net_device *dev;
|
||||
struct net *net = sock_net(sk);
|
||||
sa_family_t sa_family = sk->sk_family;
|
||||
u16 port = htons(inet_sk(sk)->inet_sport);
|
||||
__be16 port = inet_sk(sk)->inet_sport;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_netdev_rcu(net, dev) {
|
||||
|
@ -2021,7 +2021,8 @@ static struct device_type vxlan_type = {
|
|||
};
|
||||
|
||||
/* Calls the ndo_add_vxlan_port of the caller in order to
|
||||
* supply the listening VXLAN udp ports.
|
||||
* supply the listening VXLAN udp ports. Callers are expected
|
||||
* to implement the ndo_add_vxlan_port.
|
||||
*/
|
||||
void vxlan_get_rx_port(struct net_device *dev)
|
||||
{
|
||||
|
@ -2029,16 +2030,13 @@ void vxlan_get_rx_port(struct net_device *dev)
|
|||
struct net *net = dev_net(dev);
|
||||
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
|
||||
sa_family_t sa_family;
|
||||
u16 port;
|
||||
int i;
|
||||
|
||||
if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port)
|
||||
return;
|
||||
__be16 port;
|
||||
unsigned int i;
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
for (i = 0; i < PORT_HASH_SIZE; ++i) {
|
||||
hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) {
|
||||
port = htons(inet_sk(vs->sock->sk)->inet_sport);
|
||||
hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
|
||||
port = inet_sk(vs->sock->sk)->inet_sport;
|
||||
sa_family = vs->sock->sk->sk_family;
|
||||
dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
|
||||
port);
|
||||
|
@ -2492,15 +2490,19 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
|
|||
|
||||
SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
|
||||
|
||||
/* create an fdb entry for default destination */
|
||||
err = vxlan_fdb_create(vxlan, all_zeros_mac,
|
||||
&vxlan->default_dst.remote_ip,
|
||||
NUD_REACHABLE|NUD_PERMANENT,
|
||||
NLM_F_EXCL|NLM_F_CREATE,
|
||||
vxlan->dst_port, vxlan->default_dst.remote_vni,
|
||||
vxlan->default_dst.remote_ifindex, NTF_SELF);
|
||||
if (err)
|
||||
return err;
|
||||
/* create an fdb entry for a valid default destination */
|
||||
if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
|
||||
err = vxlan_fdb_create(vxlan, all_zeros_mac,
|
||||
&vxlan->default_dst.remote_ip,
|
||||
NUD_REACHABLE|NUD_PERMANENT,
|
||||
NLM_F_EXCL|NLM_F_CREATE,
|
||||
vxlan->dst_port,
|
||||
vxlan->default_dst.remote_vni,
|
||||
vxlan->default_dst.remote_ifindex,
|
||||
NTF_SELF);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err) {
|
||||
|
|
|
@ -28,7 +28,7 @@ config BRCMFMAC
|
|||
|
||||
config BRCMFMAC_SDIO
|
||||
bool "SDIO bus interface support for FullMAC driver"
|
||||
depends on MMC
|
||||
depends on (MMC = y || MMC = BRCMFMAC)
|
||||
depends on BRCMFMAC
|
||||
select FW_LOADER
|
||||
default y
|
||||
|
@ -39,7 +39,7 @@ config BRCMFMAC_SDIO
|
|||
|
||||
config BRCMFMAC_USB
|
||||
bool "USB bus interface support for FullMAC driver"
|
||||
depends on USB
|
||||
depends on (USB = y || USB = BRCMFMAC)
|
||||
depends on BRCMFMAC
|
||||
select FW_LOADER
|
||||
---help---
|
||||
|
|
|
@ -40,7 +40,9 @@ struct hwbus_priv {
|
|||
struct cw1200_common *core;
|
||||
const struct cw1200_platform_data_spi *pdata;
|
||||
spinlock_t lock; /* Serialize all bus operations */
|
||||
wait_queue_head_t wq;
|
||||
int claimed;
|
||||
int irq_disabled;
|
||||
};
|
||||
|
||||
#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
|
||||
|
@ -197,8 +199,11 @@ static void cw1200_spi_lock(struct hwbus_priv *self)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
might_sleep();
|
||||
|
||||
add_wait_queue(&self->wq, &wait);
|
||||
spin_lock_irqsave(&self->lock, flags);
|
||||
while (1) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
|
@ -211,6 +216,7 @@ static void cw1200_spi_lock(struct hwbus_priv *self)
|
|||
set_current_state(TASK_RUNNING);
|
||||
self->claimed = 1;
|
||||
spin_unlock_irqrestore(&self->lock, flags);
|
||||
remove_wait_queue(&self->wq, &wait);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -222,6 +228,8 @@ static void cw1200_spi_unlock(struct hwbus_priv *self)
|
|||
spin_lock_irqsave(&self->lock, flags);
|
||||
self->claimed = 0;
|
||||
spin_unlock_irqrestore(&self->lock, flags);
|
||||
wake_up(&self->wq);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -230,6 +238,8 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
|
|||
struct hwbus_priv *self = dev_id;
|
||||
|
||||
if (self->core) {
|
||||
disable_irq_nosync(self->func->irq);
|
||||
self->irq_disabled = 1;
|
||||
cw1200_irq_handler(self->core);
|
||||
return IRQ_HANDLED;
|
||||
} else {
|
||||
|
@ -263,13 +273,22 @@ static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
|
|||
|
||||
static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
pr_debug("SW IRQ unsubscribe\n");
|
||||
disable_irq_wake(self->func->irq);
|
||||
free_irq(self->func->irq, self);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable)
|
||||
{
|
||||
/* Disables are handled by the interrupt handler */
|
||||
if (enable && self->irq_disabled) {
|
||||
enable_irq(self->func->irq);
|
||||
self->irq_disabled = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
|
||||
|
@ -349,6 +368,7 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
|
|||
.unlock = cw1200_spi_unlock,
|
||||
.align_size = cw1200_spi_align_size,
|
||||
.power_mgmt = cw1200_spi_pm,
|
||||
.irq_enable = cw1200_spi_irq_enable,
|
||||
};
|
||||
|
||||
/* Probe Function to be called by SPI stack when device is discovered */
|
||||
|
@ -400,6 +420,8 @@ static int cw1200_spi_probe(struct spi_device *func)
|
|||
|
||||
spi_set_drvdata(func, self);
|
||||
|
||||
init_waitqueue_head(&self->wq);
|
||||
|
||||
status = cw1200_spi_irq_subscribe(self);
|
||||
|
||||
status = cw1200_core_probe(&cw1200_spi_hwbus_ops,
|
||||
|
|
|
@ -485,7 +485,7 @@ int cw1200_load_firmware(struct cw1200_common *priv)
|
|||
|
||||
/* Enable interrupt signalling */
|
||||
priv->hwbus_ops->lock(priv->hwbus_priv);
|
||||
ret = __cw1200_irq_enable(priv, 1);
|
||||
ret = __cw1200_irq_enable(priv, 2);
|
||||
priv->hwbus_ops->unlock(priv->hwbus_priv);
|
||||
if (ret < 0)
|
||||
goto unsubscribe;
|
||||
|
|
|
@ -28,6 +28,7 @@ struct hwbus_ops {
|
|||
void (*unlock)(struct hwbus_priv *self);
|
||||
size_t (*align_size)(struct hwbus_priv *self, size_t size);
|
||||
int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
|
||||
int (*irq_enable)(struct hwbus_priv *self, int enable);
|
||||
};
|
||||
|
||||
#endif /* CW1200_HWBUS_H */
|
||||
|
|
|
@ -273,6 +273,21 @@ int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
|
|||
u16 val16;
|
||||
int ret;
|
||||
|
||||
/* We need to do this hack because the SPI layer can sleep on I/O
|
||||
and the general path involves I/O to the device in interrupt
|
||||
context.
|
||||
|
||||
However, the initial enable call needs to go to the hardware.
|
||||
|
||||
We don't worry about shutdown because we do a full reset which
|
||||
clears the interrupt enabled bits.
|
||||
*/
|
||||
if (priv->hwbus_ops->irq_enable) {
|
||||
ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable);
|
||||
if (ret || enable < 2)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (HIF_8601_SILICON == priv->hw_type) {
|
||||
ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -6659,19 +6659,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
|
|||
rt2800_init_registers(rt2x00dev)))
|
||||
return -EIO;
|
||||
|
||||
if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* Send signal to firmware during boot time.
|
||||
*/
|
||||
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
|
||||
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
|
||||
if (rt2x00_is_usb(rt2x00dev)) {
|
||||
if (rt2x00_is_usb(rt2x00dev))
|
||||
rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
|
||||
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
|
||||
}
|
||||
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
|
||||
msleep(1);
|
||||
|
||||
if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
|
||||
rt2800_wait_bbp_ready(rt2x00dev)))
|
||||
if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
|
||||
return -EIO;
|
||||
|
||||
rt2800_init_bbp(rt2x00dev);
|
||||
|
|
|
@ -438,17 +438,16 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
|
|||
skb_queue_tail(&priv->rx_queue, skb);
|
||||
usb_anchor_urb(entry, &priv->anchored);
|
||||
ret = usb_submit_urb(entry, GFP_KERNEL);
|
||||
usb_put_urb(entry);
|
||||
if (ret) {
|
||||
skb_unlink(skb, &priv->rx_queue);
|
||||
usb_unanchor_urb(entry);
|
||||
goto err;
|
||||
}
|
||||
usb_free_urb(entry);
|
||||
}
|
||||
return ret;
|
||||
|
||||
err:
|
||||
usb_free_urb(entry);
|
||||
kfree_skb(skb);
|
||||
usb_kill_anchored_urbs(&priv->anchored);
|
||||
return ret;
|
||||
|
@ -956,8 +955,12 @@ static int rtl8187_start(struct ieee80211_hw *dev)
|
|||
(RETRY_COUNT << 8 /* short retry limit */) |
|
||||
(RETRY_COUNT << 0 /* long retry limit */) |
|
||||
(7 << 21 /* MAX TX DMA */));
|
||||
rtl8187_init_urbs(dev);
|
||||
rtl8187b_init_status_urb(dev);
|
||||
ret = rtl8187_init_urbs(dev);
|
||||
if (ret)
|
||||
goto rtl8187_start_exit;
|
||||
ret = rtl8187b_init_status_urb(dev);
|
||||
if (ret)
|
||||
usb_kill_anchored_urbs(&priv->anchored);
|
||||
goto rtl8187_start_exit;
|
||||
}
|
||||
|
||||
|
@ -966,7 +969,9 @@ static int rtl8187_start(struct ieee80211_hw *dev)
|
|||
rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
|
||||
rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
|
||||
|
||||
rtl8187_init_urbs(dev);
|
||||
ret = rtl8187_init_urbs(dev);
|
||||
if (ret)
|
||||
goto rtl8187_start_exit;
|
||||
|
||||
reg = RTL818X_RX_CONF_ONLYERLPKT |
|
||||
RTL818X_RX_CONF_RX_AUTORESETPHY |
|
||||
|
|
|
@ -184,6 +184,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
|
|||
unsigned long rx_ring_ref, unsigned int tx_evtchn,
|
||||
unsigned int rx_evtchn);
|
||||
void xenvif_disconnect(struct xenvif *vif);
|
||||
void xenvif_free(struct xenvif *vif);
|
||||
|
||||
int xenvif_xenbus_init(void);
|
||||
void xenvif_xenbus_fini(void);
|
||||
|
|
|
@ -353,6 +353,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
|
|||
}
|
||||
|
||||
netdev_dbg(dev, "Successfully created xenvif\n");
|
||||
|
||||
__module_get(THIS_MODULE);
|
||||
|
||||
return vif;
|
||||
}
|
||||
|
||||
|
@ -366,8 +369,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
|
|||
if (vif->tx_irq)
|
||||
return 0;
|
||||
|
||||
__module_get(THIS_MODULE);
|
||||
|
||||
err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
|
||||
if (err < 0)
|
||||
goto err;
|
||||
|
@ -406,7 +407,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
|
|||
|
||||
init_waitqueue_head(&vif->wq);
|
||||
vif->task = kthread_create(xenvif_kthread,
|
||||
(void *)vif, vif->dev->name);
|
||||
(void *)vif, "%s", vif->dev->name);
|
||||
if (IS_ERR(vif->task)) {
|
||||
pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
|
||||
err = PTR_ERR(vif->task);
|
||||
|
@ -452,12 +453,6 @@ void xenvif_carrier_off(struct xenvif *vif)
|
|||
|
||||
void xenvif_disconnect(struct xenvif *vif)
|
||||
{
|
||||
/* Disconnect funtion might get called by generic framework
|
||||
* even before vif connects, so we need to check if we really
|
||||
* need to do a module_put.
|
||||
*/
|
||||
int need_module_put = 0;
|
||||
|
||||
if (netif_carrier_ok(vif->dev))
|
||||
xenvif_carrier_off(vif);
|
||||
|
||||
|
@ -468,23 +463,22 @@ void xenvif_disconnect(struct xenvif *vif)
|
|||
unbind_from_irqhandler(vif->tx_irq, vif);
|
||||
unbind_from_irqhandler(vif->rx_irq, vif);
|
||||
}
|
||||
/* vif->irq is valid, we had a module_get in
|
||||
* xenvif_connect.
|
||||
*/
|
||||
need_module_put = 1;
|
||||
vif->tx_irq = 0;
|
||||
}
|
||||
|
||||
if (vif->task)
|
||||
kthread_stop(vif->task);
|
||||
|
||||
xenvif_unmap_frontend_rings(vif);
|
||||
}
|
||||
|
||||
void xenvif_free(struct xenvif *vif)
|
||||
{
|
||||
netif_napi_del(&vif->napi);
|
||||
|
||||
unregister_netdev(vif->dev);
|
||||
|
||||
xenvif_unmap_frontend_rings(vif);
|
||||
|
||||
free_netdev(vif->dev);
|
||||
|
||||
if (need_module_put)
|
||||
module_put(THIS_MODULE);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
|
|
@ -212,6 +212,49 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
|
|||
return false;
|
||||
}
|
||||
|
||||
struct xenvif_count_slot_state {
|
||||
unsigned long copy_off;
|
||||
bool head;
|
||||
};
|
||||
|
||||
unsigned int xenvif_count_frag_slots(struct xenvif *vif,
|
||||
unsigned long offset, unsigned long size,
|
||||
struct xenvif_count_slot_state *state)
|
||||
{
|
||||
unsigned count = 0;
|
||||
|
||||
offset &= ~PAGE_MASK;
|
||||
|
||||
while (size > 0) {
|
||||
unsigned long bytes;
|
||||
|
||||
bytes = PAGE_SIZE - offset;
|
||||
|
||||
if (bytes > size)
|
||||
bytes = size;
|
||||
|
||||
if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
|
||||
count++;
|
||||
state->copy_off = 0;
|
||||
}
|
||||
|
||||
if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
|
||||
bytes = MAX_BUFFER_OFFSET - state->copy_off;
|
||||
|
||||
state->copy_off += bytes;
|
||||
|
||||
offset += bytes;
|
||||
size -= bytes;
|
||||
|
||||
if (offset == PAGE_SIZE)
|
||||
offset = 0;
|
||||
|
||||
state->head = false;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out how many ring slots we're going to need to send @skb to
|
||||
* the guest. This function is essentially a dry run of
|
||||
|
@ -219,48 +262,39 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
|
|||
*/
|
||||
unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
|
||||
{
|
||||
struct xenvif_count_slot_state state;
|
||||
unsigned int count;
|
||||
int i, copy_off;
|
||||
unsigned char *data;
|
||||
unsigned i;
|
||||
|
||||
count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
|
||||
state.head = true;
|
||||
state.copy_off = 0;
|
||||
|
||||
copy_off = skb_headlen(skb) % PAGE_SIZE;
|
||||
/* Slot for the first (partial) page of data. */
|
||||
count = 1;
|
||||
|
||||
/* Need a slot for the GSO prefix for GSO extra data? */
|
||||
if (skb_shinfo(skb)->gso_size)
|
||||
count++;
|
||||
|
||||
data = skb->data;
|
||||
while (data < skb_tail_pointer(skb)) {
|
||||
unsigned long offset = offset_in_page(data);
|
||||
unsigned long size = PAGE_SIZE - offset;
|
||||
|
||||
if (data + size > skb_tail_pointer(skb))
|
||||
size = skb_tail_pointer(skb) - data;
|
||||
|
||||
count += xenvif_count_frag_slots(vif, offset, size, &state);
|
||||
|
||||
data += size;
|
||||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
|
||||
unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
|
||||
unsigned long bytes;
|
||||
|
||||
offset &= ~PAGE_MASK;
|
||||
|
||||
while (size > 0) {
|
||||
BUG_ON(offset >= PAGE_SIZE);
|
||||
BUG_ON(copy_off > MAX_BUFFER_OFFSET);
|
||||
|
||||
bytes = PAGE_SIZE - offset;
|
||||
|
||||
if (bytes > size)
|
||||
bytes = size;
|
||||
|
||||
if (start_new_rx_buffer(copy_off, bytes, 0)) {
|
||||
count++;
|
||||
copy_off = 0;
|
||||
}
|
||||
|
||||
if (copy_off + bytes > MAX_BUFFER_OFFSET)
|
||||
bytes = MAX_BUFFER_OFFSET - copy_off;
|
||||
|
||||
copy_off += bytes;
|
||||
|
||||
offset += bytes;
|
||||
size -= bytes;
|
||||
|
||||
if (offset == PAGE_SIZE)
|
||||
offset = 0;
|
||||
}
|
||||
count += xenvif_count_frag_slots(vif, offset, size, &state);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ static int netback_remove(struct xenbus_device *dev)
|
|||
if (be->vif) {
|
||||
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
|
||||
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
|
||||
xenvif_disconnect(be->vif);
|
||||
xenvif_free(be->vif);
|
||||
be->vif = NULL;
|
||||
}
|
||||
kfree(be);
|
||||
|
@ -213,9 +213,18 @@ static void disconnect_backend(struct xenbus_device *dev)
|
|||
{
|
||||
struct backend_info *be = dev_get_drvdata(&dev->dev);
|
||||
|
||||
if (be->vif) {
|
||||
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
|
||||
if (be->vif)
|
||||
xenvif_disconnect(be->vif);
|
||||
}
|
||||
|
||||
static void destroy_backend(struct xenbus_device *dev)
|
||||
{
|
||||
struct backend_info *be = dev_get_drvdata(&dev->dev);
|
||||
|
||||
if (be->vif) {
|
||||
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
|
||||
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
|
||||
xenvif_free(be->vif);
|
||||
be->vif = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -246,14 +255,11 @@ static void frontend_changed(struct xenbus_device *dev,
|
|||
case XenbusStateConnected:
|
||||
if (dev->state == XenbusStateConnected)
|
||||
break;
|
||||
backend_create_xenvif(be);
|
||||
if (be->vif)
|
||||
connect(be);
|
||||
break;
|
||||
|
||||
case XenbusStateClosing:
|
||||
if (be->vif)
|
||||
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
|
||||
disconnect_backend(dev);
|
||||
xenbus_switch_state(dev, XenbusStateClosing);
|
||||
break;
|
||||
|
@ -262,6 +268,7 @@ static void frontend_changed(struct xenbus_device *dev,
|
|||
xenbus_switch_state(dev, XenbusStateClosed);
|
||||
if (xenbus_dev_is_online(dev))
|
||||
break;
|
||||
destroy_backend(dev);
|
||||
/* fall through if not online */
|
||||
case XenbusStateUnknown:
|
||||
device_unregister(&dev->dev);
|
||||
|
|
|
@ -105,7 +105,7 @@
|
|||
#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
|
||||
#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
|
||||
#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
|
||||
#define BNX2FC_5771X_DB_PAGE_SIZE 128
|
||||
#define BNX2X_DB_SHIFT 3
|
||||
|
||||
#define BNX2FC_TASK_SIZE 128
|
||||
#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
|
||||
|
|
|
@ -1421,8 +1421,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
|
|||
|
||||
reg_base = pci_resource_start(hba->pcidev,
|
||||
BNX2X_DOORBELL_PCI_BAR);
|
||||
reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
|
||||
(context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
|
||||
reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
|
||||
tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
|
||||
if (!tgt->ctx_base)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
|
||||
#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
|
||||
|
||||
#define BNX2I_5771X_DBELL_PAGE_SIZE 128
|
||||
#define BNX2X_DB_SHIFT 3
|
||||
|
||||
/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
|
||||
#define MAX_BD_LENGTH 65535
|
||||
|
|
|
@ -2738,8 +2738,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
|
|||
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
|
||||
reg_base = pci_resource_start(ep->hba->pcidev,
|
||||
BNX2X_DOORBELL_PCI_BAR);
|
||||
reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) +
|
||||
DPM_TRIGER_TYPE;
|
||||
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
|
||||
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
|
||||
goto arm_cq;
|
||||
}
|
||||
|
|
|
@ -950,14 +950,14 @@ struct netdev_phys_port_id {
|
|||
* multiple net devices on single physical port.
|
||||
*
|
||||
* void (*ndo_add_vxlan_port)(struct net_device *dev,
|
||||
* sa_family_t sa_family, __u16 port);
|
||||
* sa_family_t sa_family, __be16 port);
|
||||
* Called by vxlan to notiy a driver about the UDP port and socket
|
||||
* address family that vxlan is listnening to. It is called only when
|
||||
* a new port starts listening. The operation is protected by the
|
||||
* vxlan_net->sock_lock.
|
||||
*
|
||||
* void (*ndo_del_vxlan_port)(struct net_device *dev,
|
||||
* sa_family_t sa_family, __u16 port);
|
||||
* sa_family_t sa_family, __be16 port);
|
||||
* Called by vxlan to notify the driver about a UDP port and socket
|
||||
* address family that vxlan is not listening to anymore. The operation
|
||||
* is protected by the vxlan_net->sock_lock.
|
||||
|
@ -1093,10 +1093,10 @@ struct net_device_ops {
|
|||
struct netdev_phys_port_id *ppid);
|
||||
void (*ndo_add_vxlan_port)(struct net_device *dev,
|
||||
sa_family_t sa_family,
|
||||
__u16 port);
|
||||
__be16 port);
|
||||
void (*ndo_del_vxlan_port)(struct net_device *dev,
|
||||
sa_family_t sa_family,
|
||||
__u16 port);
|
||||
__be16 port);
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -296,10 +296,12 @@ ip_set_eexist(int ret, u32 flags)
|
|||
|
||||
/* Match elements marked with nomatch */
|
||||
static inline bool
|
||||
ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt)
|
||||
ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set)
|
||||
{
|
||||
return adt == IPSET_TEST &&
|
||||
ret == -ENOTEMPTY && ((flags >> 16) & IPSET_FLAG_NOMATCH);
|
||||
(set->type->features & IPSET_TYPE_NOMATCH) &&
|
||||
((flags >> 16) & IPSET_FLAG_NOMATCH) &&
|
||||
(ret > 0 || ret == -ENOTEMPTY);
|
||||
}
|
||||
|
||||
/* Check the NLA_F_NET_BYTEORDER flag */
|
||||
|
|
|
@ -264,9 +264,11 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
|
|||
|
||||
extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
|
||||
|
||||
static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
|
||||
static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
|
||||
{
|
||||
if (iph->frag_off & htons(IP_DF)) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
|
||||
/* This is only to work around buggy Windows95/2000
|
||||
* VJ compression implementations. If the ID field
|
||||
* does not change, they drop every other packet in
|
||||
|
@ -278,9 +280,11 @@ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, str
|
|||
__ip_select_ident(iph, dst, 0);
|
||||
}
|
||||
|
||||
static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
|
||||
static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
|
||||
{
|
||||
if (iph->frag_off & htons(IP_DF)) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
|
||||
if (sk && inet_sk(sk)->inet_daddr) {
|
||||
iph->id = htons(inet_sk(sk)->inet_id);
|
||||
inet_sk(sk)->inet_id += 1 + more;
|
||||
|
|
|
@ -86,7 +86,7 @@ static inline void nf_ct_ext_destroy(struct nf_conn *ct)
|
|||
static inline void nf_ct_ext_free(struct nf_conn *ct)
|
||||
{
|
||||
if (ct->ext)
|
||||
kfree(ct->ext);
|
||||
kfree_rcu(ct->ext, rcu);
|
||||
}
|
||||
|
||||
/* Add this type, returns pointer to data or NULL. */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue