Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (27 commits) sfc: Change falcon_probe_board() to fail for unsupported boards sfc: Always close net device at the end of a disabling reset sfc: Wait at most 10ms for the MC to finish reading out MAC statistics sctp: Fix oops when sending queued ASCONF chunks sctp: fix to calc the INIT/INIT-ACK chunk length correctly is set sctp: per_cpu variables should be in bh_disabled section sctp: fix potential reference of a freed pointer sctp: avoid irq lock inversion while call sk->sk_data_ready() Revert "tcp: bind() fix when many ports are bound" net/usb: add sierra_net.c driver cdc_ether: fix autosuspend for mbm devices bluetooth: handle l2cap_create_connless_pdu() errors gianfar: Wait for both RX and TX to stop ipheth: potential null dereferences on error path smc91c92_cs: spin_unlock_irqrestore before calling smc_interrupt() drivers/usb/net/kaweth.c: add device "Allied Telesyn AT-USB10 USB Ethernet Adapter" bnx2: Update version to 2.0.9. bnx2: Prevent "scheduling while atomic" warning with cnic, bonding and vlan. bnx2: Fix lost MSI-X problem on 5709 NICs. cxgb3: Wait longer for control packets on initialization ...
This commit is contained in:
commit
032b734d29
36 changed files with 1312 additions and 188 deletions
|
@ -58,8 +58,8 @@
|
|||
#include "bnx2_fw.h"
|
||||
|
||||
#define DRV_MODULE_NAME "bnx2"
|
||||
#define DRV_MODULE_VERSION "2.0.8"
|
||||
#define DRV_MODULE_RELDATE "Feb 15, 2010"
|
||||
#define DRV_MODULE_VERSION "2.0.9"
|
||||
#define DRV_MODULE_RELDATE "April 27, 2010"
|
||||
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
|
||||
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
|
||||
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
|
||||
|
@ -651,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp)
|
|||
}
|
||||
|
||||
static void
|
||||
bnx2_netif_stop(struct bnx2 *bp)
|
||||
bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
|
||||
{
|
||||
bnx2_cnic_stop(bp);
|
||||
if (stop_cnic)
|
||||
bnx2_cnic_stop(bp);
|
||||
if (netif_running(bp->dev)) {
|
||||
int i;
|
||||
|
||||
|
@ -671,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp)
|
|||
}
|
||||
|
||||
static void
|
||||
bnx2_netif_start(struct bnx2 *bp)
|
||||
bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
|
||||
{
|
||||
if (atomic_dec_and_test(&bp->intr_sem)) {
|
||||
if (netif_running(bp->dev)) {
|
||||
netif_tx_wake_all_queues(bp->dev);
|
||||
bnx2_napi_enable(bp);
|
||||
bnx2_enable_int(bp);
|
||||
bnx2_cnic_start(bp);
|
||||
if (start_cnic)
|
||||
bnx2_cnic_start(bp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4759,8 +4761,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
|
|||
rc = bnx2_alloc_bad_rbuf(bp);
|
||||
}
|
||||
|
||||
if (bp->flags & BNX2_FLAG_USING_MSIX)
|
||||
if (bp->flags & BNX2_FLAG_USING_MSIX) {
|
||||
bnx2_setup_msix_tbl(bp);
|
||||
/* Prevent MSIX table reads and write from timing out */
|
||||
REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
|
||||
BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -6273,12 +6279,12 @@ bnx2_reset_task(struct work_struct *work)
|
|||
return;
|
||||
}
|
||||
|
||||
bnx2_netif_stop(bp);
|
||||
bnx2_netif_stop(bp, true);
|
||||
|
||||
bnx2_init_nic(bp, 1);
|
||||
|
||||
atomic_set(&bp->intr_sem, 1);
|
||||
bnx2_netif_start(bp);
|
||||
bnx2_netif_start(bp, true);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
@ -6320,7 +6326,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
|
|||
struct bnx2 *bp = netdev_priv(dev);
|
||||
|
||||
if (netif_running(dev))
|
||||
bnx2_netif_stop(bp);
|
||||
bnx2_netif_stop(bp, false);
|
||||
|
||||
bp->vlgrp = vlgrp;
|
||||
|
||||
|
@ -6331,7 +6337,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
|
|||
if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
|
||||
bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
|
||||
|
||||
bnx2_netif_start(bp);
|
||||
bnx2_netif_start(bp, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -7051,9 +7057,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
|
|||
bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
|
||||
|
||||
if (netif_running(bp->dev)) {
|
||||
bnx2_netif_stop(bp);
|
||||
bnx2_netif_stop(bp, true);
|
||||
bnx2_init_nic(bp, 0);
|
||||
bnx2_netif_start(bp);
|
||||
bnx2_netif_start(bp, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -7083,7 +7089,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
|
|||
/* Reset will erase chipset stats; save them */
|
||||
bnx2_save_stats(bp);
|
||||
|
||||
bnx2_netif_stop(bp);
|
||||
bnx2_netif_stop(bp, true);
|
||||
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
|
||||
bnx2_free_skbs(bp);
|
||||
bnx2_free_mem(bp);
|
||||
|
@ -7111,7 +7117,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
|
|||
bnx2_setup_cnic_irq_info(bp);
|
||||
mutex_unlock(&bp->cnic_lock);
|
||||
#endif
|
||||
bnx2_netif_start(bp);
|
||||
bnx2_netif_start(bp, true);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -7364,7 +7370,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
|
|||
if (etest->flags & ETH_TEST_FL_OFFLINE) {
|
||||
int i;
|
||||
|
||||
bnx2_netif_stop(bp);
|
||||
bnx2_netif_stop(bp, true);
|
||||
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
|
||||
bnx2_free_skbs(bp);
|
||||
|
||||
|
@ -7383,7 +7389,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
|
|||
bnx2_shutdown_chip(bp);
|
||||
else {
|
||||
bnx2_init_nic(bp, 1);
|
||||
bnx2_netif_start(bp);
|
||||
bnx2_netif_start(bp, true);
|
||||
}
|
||||
|
||||
/* wait for link up */
|
||||
|
@ -8377,7 +8383,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
return 0;
|
||||
|
||||
flush_scheduled_work();
|
||||
bnx2_netif_stop(bp);
|
||||
bnx2_netif_stop(bp, true);
|
||||
netif_device_detach(dev);
|
||||
del_timer_sync(&bp->timer);
|
||||
bnx2_shutdown_chip(bp);
|
||||
|
@ -8399,7 +8405,7 @@ bnx2_resume(struct pci_dev *pdev)
|
|||
bnx2_set_power_state(bp, PCI_D0);
|
||||
netif_device_attach(dev);
|
||||
bnx2_init_nic(bp, 1);
|
||||
bnx2_netif_start(bp);
|
||||
bnx2_netif_start(bp, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -8426,7 +8432,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
if (netif_running(dev)) {
|
||||
bnx2_netif_stop(bp);
|
||||
bnx2_netif_stop(bp, true);
|
||||
del_timer_sync(&bp->timer);
|
||||
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
|
||||
}
|
||||
|
@ -8483,7 +8489,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
|
|||
|
||||
rtnl_lock();
|
||||
if (netif_running(dev))
|
||||
bnx2_netif_start(bp);
|
||||
bnx2_netif_start(bp, true);
|
||||
|
||||
netif_device_attach(dev);
|
||||
rtnl_unlock();
|
||||
|
|
|
@ -439,7 +439,7 @@ static void free_irq_resources(struct adapter *adapter)
|
|||
static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
|
||||
unsigned long n)
|
||||
{
|
||||
int attempts = 5;
|
||||
int attempts = 10;
|
||||
|
||||
while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
|
||||
if (!--attempts)
|
||||
|
|
|
@ -336,7 +336,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
|
|||
struct e1000_hw *hw = &adapter->hw;
|
||||
static int global_quad_port_a; /* global port a indication */
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
u16 eeprom_data = 0;
|
||||
int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
|
||||
s32 rc;
|
||||
|
||||
|
@ -387,16 +386,15 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
|
|||
if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
|
||||
adapter->flags &= ~FLAG_HAS_WOL;
|
||||
break;
|
||||
|
||||
case e1000_82573:
|
||||
case e1000_82574:
|
||||
case e1000_82583:
|
||||
/* Disable ASPM L0s due to hardware errata */
|
||||
e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S);
|
||||
|
||||
if (pdev->device == E1000_DEV_ID_82573L) {
|
||||
if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1,
|
||||
&eeprom_data) < 0)
|
||||
break;
|
||||
if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) {
|
||||
adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
|
||||
adapter->max_hw_frame_size = DEFAULT_JUMBO;
|
||||
}
|
||||
adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
|
||||
adapter->max_hw_frame_size = DEFAULT_JUMBO;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -1792,6 +1790,7 @@ struct e1000_info e1000_82571_info = {
|
|||
| FLAG_RESET_OVERWRITES_LAA /* errata */
|
||||
| FLAG_TARC_SPEED_MODE_BIT /* errata */
|
||||
| FLAG_APME_CHECK_PORT_B,
|
||||
.flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
|
||||
.pba = 38,
|
||||
.max_hw_frame_size = DEFAULT_JUMBO,
|
||||
.get_variants = e1000_get_variants_82571,
|
||||
|
@ -1809,6 +1808,7 @@ struct e1000_info e1000_82572_info = {
|
|||
| FLAG_RX_CSUM_ENABLED
|
||||
| FLAG_HAS_CTRLEXT_ON_LOAD
|
||||
| FLAG_TARC_SPEED_MODE_BIT, /* errata */
|
||||
.flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
|
||||
.pba = 38,
|
||||
.max_hw_frame_size = DEFAULT_JUMBO,
|
||||
.get_variants = e1000_get_variants_82571,
|
||||
|
@ -1820,13 +1820,11 @@ struct e1000_info e1000_82572_info = {
|
|||
struct e1000_info e1000_82573_info = {
|
||||
.mac = e1000_82573,
|
||||
.flags = FLAG_HAS_HW_VLAN_FILTER
|
||||
| FLAG_HAS_JUMBO_FRAMES
|
||||
| FLAG_HAS_WOL
|
||||
| FLAG_APME_IN_CTRL3
|
||||
| FLAG_RX_CSUM_ENABLED
|
||||
| FLAG_HAS_SMART_POWER_DOWN
|
||||
| FLAG_HAS_AMT
|
||||
| FLAG_HAS_ERT
|
||||
| FLAG_HAS_SWSM_ON_LOAD,
|
||||
.pba = 20,
|
||||
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci-aspm.h>
|
||||
|
||||
#include "hw.h"
|
||||
|
||||
|
@ -374,7 +375,7 @@ struct e1000_adapter {
|
|||
struct e1000_info {
|
||||
enum e1000_mac_type mac;
|
||||
unsigned int flags;
|
||||
unsigned int flags2;
|
||||
unsigned int flags2;
|
||||
u32 pba;
|
||||
u32 max_hw_frame_size;
|
||||
s32 (*get_variants)(struct e1000_adapter *);
|
||||
|
@ -421,6 +422,7 @@ struct e1000_info {
|
|||
#define FLAG2_CRC_STRIPPING (1 << 0)
|
||||
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
|
||||
#define FLAG2_IS_DISCARDING (1 << 2)
|
||||
#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
|
||||
|
||||
#define E1000_RX_DESC_PS(R, i) \
|
||||
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
|
||||
|
@ -461,6 +463,7 @@ extern void e1000e_update_stats(struct e1000_adapter *adapter);
|
|||
extern bool e1000e_has_link(struct e1000_adapter *adapter);
|
||||
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
|
||||
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
|
||||
extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
|
||||
|
||||
extern unsigned int copybreak;
|
||||
|
||||
|
|
|
@ -4283,6 +4283,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* 82573 Errata 17 */
|
||||
if (((adapter->hw.mac.type == e1000_82573) ||
|
||||
(adapter->hw.mac.type == e1000_82574)) &&
|
||||
(max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
|
||||
adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
|
||||
e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
|
||||
}
|
||||
|
||||
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
|
||||
msleep(1);
|
||||
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
|
||||
|
@ -4605,29 +4613,39 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
|
|||
}
|
||||
}
|
||||
|
||||
static void e1000e_disable_l1aspm(struct pci_dev *pdev)
|
||||
#ifdef CONFIG_PCIEASPM
|
||||
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
|
||||
{
|
||||
pci_disable_link_state(pdev, state);
|
||||
}
|
||||
#else
|
||||
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
|
||||
{
|
||||
int pos;
|
||||
u16 val;
|
||||
u16 reg16;
|
||||
|
||||
/*
|
||||
* 82573 workaround - disable L1 ASPM on mobile chipsets
|
||||
*
|
||||
* L1 ASPM on various mobile (ich7) chipsets do not behave properly
|
||||
* resulting in lost data or garbage information on the pci-e link
|
||||
* level. This could result in (false) bad EEPROM checksum errors,
|
||||
* long ping times (up to 2s) or even a system freeze/hang.
|
||||
*
|
||||
* Unfortunately this feature saves about 1W power consumption when
|
||||
* active.
|
||||
* Both device and parent should have the same ASPM setting.
|
||||
* Disable ASPM in downstream component first and then upstream.
|
||||
*/
|
||||
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
|
||||
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val);
|
||||
if (val & 0x2) {
|
||||
dev_warn(&pdev->dev, "Disabling L1 ASPM\n");
|
||||
val &= ~0x2;
|
||||
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val);
|
||||
}
|
||||
pos = pci_pcie_cap(pdev);
|
||||
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
|
||||
reg16 &= ~state;
|
||||
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
|
||||
|
||||
pos = pci_pcie_cap(pdev->bus->self);
|
||||
pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16);
|
||||
reg16 &= ~state;
|
||||
pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
|
||||
}
|
||||
#endif
|
||||
void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
|
||||
{
|
||||
dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
|
||||
(state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
|
||||
(state & PCIE_LINK_STATE_L1) ? "L1" : "");
|
||||
|
||||
__e1000e_disable_aspm(pdev, state);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -4653,7 +4671,8 @@ static int e1000_resume(struct pci_dev *pdev)
|
|||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
pci_save_state(pdev);
|
||||
e1000e_disable_l1aspm(pdev);
|
||||
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
|
||||
e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
|
||||
|
||||
err = pci_enable_device_mem(pdev);
|
||||
if (err) {
|
||||
|
@ -4795,7 +4814,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
|
|||
int err;
|
||||
pci_ers_result_t result;
|
||||
|
||||
e1000e_disable_l1aspm(pdev);
|
||||
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
|
||||
e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
|
||||
err = pci_enable_device_mem(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
|
@ -4889,13 +4909,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
|
|||
dev_warn(&adapter->pdev->dev,
|
||||
"Warning: detected DSPD enabled in EEPROM\n");
|
||||
}
|
||||
|
||||
ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
|
||||
if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
|
||||
/* ASPM enable */
|
||||
dev_warn(&adapter->pdev->dev,
|
||||
"Warning: detected ASPM enabled in EEPROM\n");
|
||||
}
|
||||
}
|
||||
|
||||
static const struct net_device_ops e1000e_netdev_ops = {
|
||||
|
@ -4944,7 +4957,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
|||
u16 eeprom_data = 0;
|
||||
u16 eeprom_apme_mask = E1000_EEPROM_APME;
|
||||
|
||||
e1000e_disable_l1aspm(pdev);
|
||||
if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
|
||||
e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
|
||||
|
||||
err = pci_enable_device_mem(pdev);
|
||||
if (err)
|
||||
|
|
|
@ -1511,9 +1511,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
|
|||
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
|
||||
gfar_write(®s->dmactrl, tempval);
|
||||
|
||||
while (!(gfar_read(®s->ievent) &
|
||||
(IEVENT_GRSC | IEVENT_GTSC)))
|
||||
cpu_relax();
|
||||
spin_event_timeout(((gfar_read(®s->ievent) &
|
||||
(IEVENT_GRSC | IEVENT_GTSC)) ==
|
||||
(IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
#define IXGBE_82599_MC_TBL_SIZE 128
|
||||
#define IXGBE_82599_VFT_TBL_SIZE 128
|
||||
|
||||
void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
|
||||
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
|
||||
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed speed,
|
||||
|
@ -69,8 +71,14 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
|
|||
if (hw->phy.multispeed_fiber) {
|
||||
/* Set up dual speed SFP+ support */
|
||||
mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
|
||||
mac->ops.disable_tx_laser =
|
||||
&ixgbe_disable_tx_laser_multispeed_fiber;
|
||||
mac->ops.enable_tx_laser =
|
||||
&ixgbe_enable_tx_laser_multispeed_fiber;
|
||||
mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
|
||||
} else {
|
||||
mac->ops.disable_tx_laser = NULL;
|
||||
mac->ops.enable_tx_laser = NULL;
|
||||
mac->ops.flap_tx_laser = NULL;
|
||||
if ((mac->ops.get_media_type(hw) ==
|
||||
ixgbe_media_type_backplane) &&
|
||||
|
@ -415,6 +423,44 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* The base drivers may require better control over SFP+ module
|
||||
* PHY states. This includes selectively shutting down the Tx
|
||||
* laser on the PHY, effectively halting physical link.
|
||||
**/
|
||||
void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
|
||||
{
|
||||
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
|
||||
|
||||
/* Disable tx laser; allow 100us to go dark per spec */
|
||||
esdp_reg |= IXGBE_ESDP_SDP3;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* The base drivers may require better control over SFP+ module
|
||||
* PHY states. This includes selectively turning on the Tx
|
||||
* laser on the PHY, effectively starting physical link.
|
||||
**/
|
||||
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
|
||||
{
|
||||
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
|
||||
|
||||
/* Enable tx laser; allow 100ms to light up */
|
||||
esdp_reg &= ~IXGBE_ESDP_SDP3;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
msleep(100);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
|
||||
* @hw: pointer to hardware structure
|
||||
|
@ -429,23 +475,11 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
|
|||
**/
|
||||
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
|
||||
{
|
||||
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
|
||||
|
||||
hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
|
||||
|
||||
if (hw->mac.autotry_restart) {
|
||||
/* Disable tx laser; allow 100us to go dark per spec */
|
||||
esdp_reg |= IXGBE_ESDP_SDP3;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
udelay(100);
|
||||
|
||||
/* Enable tx laser; allow 100ms to light up */
|
||||
esdp_reg &= ~IXGBE_ESDP_SDP3;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
msleep(100);
|
||||
|
||||
ixgbe_disable_tx_laser_multispeed_fiber(hw);
|
||||
ixgbe_enable_tx_laser_multispeed_fiber(hw);
|
||||
hw->mac.autotry_restart = false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2982,6 +2982,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
|
|||
else
|
||||
ixgbe_configure_msi_and_legacy(adapter);
|
||||
|
||||
/* enable the optics */
|
||||
if (hw->phy.multispeed_fiber)
|
||||
hw->mac.ops.enable_tx_laser(hw);
|
||||
|
||||
clear_bit(__IXGBE_DOWN, &adapter->state);
|
||||
ixgbe_napi_enable_all(adapter);
|
||||
|
||||
|
@ -3243,6 +3247,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
|
|||
/* signal that we are down to the interrupt handler */
|
||||
set_bit(__IXGBE_DOWN, &adapter->state);
|
||||
|
||||
/* power down the optics */
|
||||
if (hw->phy.multispeed_fiber)
|
||||
hw->mac.ops.disable_tx_laser(hw);
|
||||
|
||||
/* disable receive for all VFs and wait one second */
|
||||
if (adapter->num_vfs) {
|
||||
/* ping all the active vfs to let them know we are going down */
|
||||
|
@ -6253,6 +6261,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
|||
goto err_eeprom;
|
||||
}
|
||||
|
||||
/* power down the optics */
|
||||
if (hw->phy.multispeed_fiber)
|
||||
hw->mac.ops.disable_tx_laser(hw);
|
||||
|
||||
init_timer(&adapter->watchdog_timer);
|
||||
adapter->watchdog_timer.function = &ixgbe_watchdog;
|
||||
adapter->watchdog_timer.data = (unsigned long)adapter;
|
||||
|
@ -6400,16 +6412,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
|
|||
del_timer_sync(&adapter->sfp_timer);
|
||||
cancel_work_sync(&adapter->watchdog_task);
|
||||
cancel_work_sync(&adapter->sfp_task);
|
||||
if (adapter->hw.phy.multispeed_fiber) {
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
/*
|
||||
* Restart clause 37 autoneg, disable and re-enable
|
||||
* the tx laser, to clear & alert the link partner
|
||||
* that it needs to restart autotry
|
||||
*/
|
||||
hw->mac.autotry_restart = true;
|
||||
hw->mac.ops.flap_tx_laser(hw);
|
||||
}
|
||||
cancel_work_sync(&adapter->multispeed_fiber_task);
|
||||
cancel_work_sync(&adapter->sfp_config_module_task);
|
||||
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
|
||||
|
|
|
@ -2398,6 +2398,8 @@ struct ixgbe_mac_operations {
|
|||
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
|
||||
|
||||
/* Link */
|
||||
void (*disable_tx_laser)(struct ixgbe_hw *);
|
||||
void (*enable_tx_laser)(struct ixgbe_hw *);
|
||||
void (*flap_tx_laser)(struct ixgbe_hw *);
|
||||
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
|
||||
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
|
||||
|
|
|
@ -1804,23 +1804,30 @@ static void media_check(u_long arg)
|
|||
SMC_SELECT_BANK(1);
|
||||
media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
|
||||
|
||||
SMC_SELECT_BANK(saved_bank);
|
||||
spin_unlock_irqrestore(&smc->lock, flags);
|
||||
|
||||
/* Check for pending interrupt with watchdog flag set: with
|
||||
this, we can limp along even if the interrupt is blocked */
|
||||
if (smc->watchdog++ && ((i>>8) & i)) {
|
||||
if (!smc->fast_poll)
|
||||
printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
|
||||
local_irq_save(flags);
|
||||
smc_interrupt(dev->irq, dev);
|
||||
local_irq_restore(flags);
|
||||
smc->fast_poll = HZ;
|
||||
}
|
||||
if (smc->fast_poll) {
|
||||
smc->fast_poll--;
|
||||
smc->media.expires = jiffies + HZ/100;
|
||||
add_timer(&smc->media);
|
||||
SMC_SELECT_BANK(saved_bank);
|
||||
spin_unlock_irqrestore(&smc->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&smc->lock, flags);
|
||||
|
||||
saved_bank = inw(ioaddr + BANK_SELECT);
|
||||
|
||||
if (smc->cfg & CFG_MII_SELECT) {
|
||||
if (smc->mii_if.phy_id < 0)
|
||||
goto reschedule;
|
||||
|
@ -1978,15 +1985,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
|
|||
unsigned int ioaddr = dev->base_addr;
|
||||
u16 saved_bank = inw(ioaddr + BANK_SELECT);
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&smc->lock);
|
||||
spin_lock_irqsave(&smc->lock, flags);
|
||||
SMC_SELECT_BANK(3);
|
||||
if (smc->cfg & CFG_MII_SELECT)
|
||||
ret = mii_ethtool_gset(&smc->mii_if, ecmd);
|
||||
else
|
||||
ret = smc_netdev_get_ecmd(dev, ecmd);
|
||||
SMC_SELECT_BANK(saved_bank);
|
||||
spin_unlock_irq(&smc->lock);
|
||||
spin_unlock_irqrestore(&smc->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1996,15 +2004,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
|
|||
unsigned int ioaddr = dev->base_addr;
|
||||
u16 saved_bank = inw(ioaddr + BANK_SELECT);
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&smc->lock);
|
||||
spin_lock_irqsave(&smc->lock, flags);
|
||||
SMC_SELECT_BANK(3);
|
||||
if (smc->cfg & CFG_MII_SELECT)
|
||||
ret = mii_ethtool_sset(&smc->mii_if, ecmd);
|
||||
else
|
||||
ret = smc_netdev_set_ecmd(dev, ecmd);
|
||||
SMC_SELECT_BANK(saved_bank);
|
||||
spin_unlock_irq(&smc->lock);
|
||||
spin_unlock_irqrestore(&smc->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2014,12 +2023,13 @@ static u32 smc_get_link(struct net_device *dev)
|
|||
unsigned int ioaddr = dev->base_addr;
|
||||
u16 saved_bank = inw(ioaddr + BANK_SELECT);
|
||||
u32 ret;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&smc->lock);
|
||||
spin_lock_irqsave(&smc->lock, flags);
|
||||
SMC_SELECT_BANK(3);
|
||||
ret = smc_link_ok(dev);
|
||||
SMC_SELECT_BANK(saved_bank);
|
||||
spin_unlock_irq(&smc->lock);
|
||||
spin_unlock_irqrestore(&smc->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2056,16 +2066,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
|
|||
int rc = 0;
|
||||
u16 saved_bank;
|
||||
unsigned int ioaddr = dev->base_addr;
|
||||
unsigned long flags;
|
||||
|
||||
if (!netif_running(dev))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&smc->lock);
|
||||
spin_lock_irqsave(&smc->lock, flags);
|
||||
saved_bank = inw(ioaddr + BANK_SELECT);
|
||||
SMC_SELECT_BANK(3);
|
||||
rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
|
||||
SMC_SELECT_BANK(saved_bank);
|
||||
spin_unlock_irq(&smc->lock);
|
||||
spin_unlock_irqrestore(&smc->lock, flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -2759,6 +2759,7 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
|
|||
{
|
||||
iounmap(ioaddr);
|
||||
pci_release_regions(pdev);
|
||||
pci_clear_mwi(pdev);
|
||||
pci_disable_device(pdev);
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
@ -2825,8 +2826,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
|
|||
spin_lock_irq(&tp->lock);
|
||||
|
||||
RTL_W8(Cfg9346, Cfg9346_Unlock);
|
||||
|
||||
RTL_W32(MAC4, high);
|
||||
RTL_R32(MAC4);
|
||||
|
||||
RTL_W32(MAC0, low);
|
||||
RTL_R32(MAC0);
|
||||
|
||||
RTL_W8(Cfg9346, Cfg9346_Lock);
|
||||
|
||||
spin_unlock_irq(&tp->lock);
|
||||
|
@ -3014,9 +3020,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
goto err_out_free_dev_1;
|
||||
}
|
||||
|
||||
rc = pci_set_mwi(pdev);
|
||||
if (rc < 0)
|
||||
goto err_out_disable_2;
|
||||
if (pci_set_mwi(pdev) < 0)
|
||||
netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
|
||||
|
||||
/* make sure PCI base addr 1 is MMIO */
|
||||
if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
|
||||
|
@ -3024,7 +3029,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
"region #%d not an MMIO resource, aborting\n",
|
||||
region);
|
||||
rc = -ENODEV;
|
||||
goto err_out_mwi_3;
|
||||
goto err_out_mwi_2;
|
||||
}
|
||||
|
||||
/* check for weird/broken PCI region reporting */
|
||||
|
@ -3032,13 +3037,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
netif_err(tp, probe, dev,
|
||||
"Invalid PCI region size(s), aborting\n");
|
||||
rc = -ENODEV;
|
||||
goto err_out_mwi_3;
|
||||
goto err_out_mwi_2;
|
||||
}
|
||||
|
||||
rc = pci_request_regions(pdev, MODULENAME);
|
||||
if (rc < 0) {
|
||||
netif_err(tp, probe, dev, "could not request regions\n");
|
||||
goto err_out_mwi_3;
|
||||
goto err_out_mwi_2;
|
||||
}
|
||||
|
||||
tp->cp_cmd = PCIMulRW | RxChkSum;
|
||||
|
@ -3051,7 +3056,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (rc < 0) {
|
||||
netif_err(tp, probe, dev, "DMA configuration failed\n");
|
||||
goto err_out_free_res_4;
|
||||
goto err_out_free_res_3;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3060,7 +3065,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (!ioaddr) {
|
||||
netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
|
||||
rc = -EIO;
|
||||
goto err_out_free_res_4;
|
||||
goto err_out_free_res_3;
|
||||
}
|
||||
|
||||
tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
|
||||
|
@ -3102,7 +3107,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (i == ARRAY_SIZE(rtl_chip_info)) {
|
||||
dev_err(&pdev->dev,
|
||||
"driver bug, MAC version not found in rtl_chip_info\n");
|
||||
goto err_out_msi_5;
|
||||
goto err_out_msi_4;
|
||||
}
|
||||
tp->chipset = i;
|
||||
|
||||
|
@ -3167,7 +3172,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
rc = register_netdev(dev);
|
||||
if (rc < 0)
|
||||
goto err_out_msi_5;
|
||||
goto err_out_msi_4;
|
||||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
|
@ -3190,14 +3195,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
out:
|
||||
return rc;
|
||||
|
||||
err_out_msi_5:
|
||||
err_out_msi_4:
|
||||
rtl_disable_msi(pdev, tp);
|
||||
iounmap(ioaddr);
|
||||
err_out_free_res_4:
|
||||
err_out_free_res_3:
|
||||
pci_release_regions(pdev);
|
||||
err_out_mwi_3:
|
||||
err_out_mwi_2:
|
||||
pci_clear_mwi(pdev);
|
||||
err_out_disable_2:
|
||||
pci_disable_device(pdev);
|
||||
err_out_free_dev_1:
|
||||
free_netdev(dev);
|
||||
|
|
|
@ -1861,6 +1861,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
|
|||
}
|
||||
|
||||
if (disabled) {
|
||||
dev_close(efx->net_dev);
|
||||
EFX_ERR(efx, "has been disabled\n");
|
||||
efx->state = STATE_DISABLED;
|
||||
} else {
|
||||
|
@ -1884,8 +1885,7 @@ static void efx_reset_work(struct work_struct *data)
|
|||
}
|
||||
|
||||
rtnl_lock();
|
||||
if (efx_reset(efx, efx->reset_pending))
|
||||
dev_close(efx->net_dev);
|
||||
(void)efx_reset(efx, efx->reset_pending);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
|
|
@ -1320,7 +1320,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
|
|||
|
||||
EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
|
||||
|
||||
falcon_probe_board(efx, board_rev);
|
||||
rc = falcon_probe_board(efx, board_rev);
|
||||
if (rc)
|
||||
goto fail2;
|
||||
|
||||
kfree(nvconfig);
|
||||
return 0;
|
||||
|
|
|
@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static const struct falcon_board_type falcon_dummy_board = {
|
||||
.init = efx_port_dummy_op_int,
|
||||
.init_phy = efx_port_dummy_op_void,
|
||||
.fini = efx_port_dummy_op_void,
|
||||
.set_id_led = efx_port_dummy_op_set_id_led,
|
||||
.monitor = efx_port_dummy_op_int,
|
||||
};
|
||||
|
||||
void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
|
||||
int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
|
||||
{
|
||||
struct falcon_board *board = falcon_board(efx);
|
||||
u8 type_id = FALCON_BOARD_TYPE(revision_info);
|
||||
|
@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
|
|||
(efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
|
||||
? board->type->ref_model : board->type->gen_type,
|
||||
'A' + board->major, board->minor);
|
||||
return 0;
|
||||
} else {
|
||||
EFX_ERR(efx, "unknown board type %d\n", type_id);
|
||||
board->type = &falcon_dummy_board;
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type;
|
|||
**************************************************************************
|
||||
*/
|
||||
|
||||
extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
|
||||
extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
|
||||
|
||||
/* TX data path */
|
||||
extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
|
||||
|
|
|
@ -456,8 +456,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
|
|||
|
||||
static void siena_update_nic_stats(struct efx_nic *efx)
|
||||
{
|
||||
while (siena_try_update_nic_stats(efx) == -EAGAIN)
|
||||
cpu_relax();
|
||||
int retry;
|
||||
|
||||
/* If we're unlucky enough to read statistics wduring the DMA, wait
|
||||
* up to 10ms for it to finish (typically takes <500us) */
|
||||
for (retry = 0; retry < 100; ++retry) {
|
||||
if (siena_try_update_nic_stats(efx) == 0)
|
||||
return;
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
/* Use the old values instead */
|
||||
}
|
||||
|
||||
static void siena_start_nic_stats(struct efx_nic *efx)
|
||||
|
|
|
@ -8633,6 +8633,7 @@ static int tg3_test_msi(struct tg3 *tp)
|
|||
pci_disable_msi(tp->pdev);
|
||||
|
||||
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
|
||||
tp->napi[0].irq_vec = tp->pdev->irq;
|
||||
|
||||
err = tg3_request_irq(tp, 0);
|
||||
if (err)
|
||||
|
|
|
@ -397,4 +397,14 @@ config USB_IPHETH
|
|||
|
||||
For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
|
||||
|
||||
config USB_SIERRA_NET
|
||||
tristate "USB-to-WWAN Driver for Sierra Wireless modems"
|
||||
depends on USB_USBNET
|
||||
default y
|
||||
help
|
||||
Choose this option if you have a Sierra Wireless USB-to-WWAN device.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called sierra_net.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -24,4 +24,5 @@ obj-$(CONFIG_USB_USBNET) += usbnet.o
|
|||
obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
|
||||
obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
|
||||
obj-$(CONFIG_USB_IPHETH) += ipheth.o
|
||||
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
|
||||
|
||||
|
|
|
@ -431,6 +431,7 @@ static const struct driver_info mbm_info = {
|
|||
.bind = cdc_bind,
|
||||
.unbind = usbnet_cdc_unbind,
|
||||
.status = cdc_status,
|
||||
.manage_power = cdc_manage_power,
|
||||
};
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
|
|
@ -122,25 +122,25 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
|
|||
|
||||
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (tx_urb == NULL)
|
||||
goto error;
|
||||
goto error_nomem;
|
||||
|
||||
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (rx_urb == NULL)
|
||||
goto error;
|
||||
goto free_tx_urb;
|
||||
|
||||
tx_buf = usb_buffer_alloc(iphone->udev,
|
||||
IPHETH_BUF_SIZE,
|
||||
GFP_KERNEL,
|
||||
&tx_urb->transfer_dma);
|
||||
if (tx_buf == NULL)
|
||||
goto error;
|
||||
goto free_rx_urb;
|
||||
|
||||
rx_buf = usb_buffer_alloc(iphone->udev,
|
||||
IPHETH_BUF_SIZE,
|
||||
GFP_KERNEL,
|
||||
&rx_urb->transfer_dma);
|
||||
if (rx_buf == NULL)
|
||||
goto error;
|
||||
goto free_tx_buf;
|
||||
|
||||
|
||||
iphone->tx_urb = tx_urb;
|
||||
|
@ -149,13 +149,14 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
|
|||
iphone->rx_buf = rx_buf;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, rx_buf,
|
||||
rx_urb->transfer_dma);
|
||||
free_tx_buf:
|
||||
usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
|
||||
tx_urb->transfer_dma);
|
||||
free_rx_urb:
|
||||
usb_free_urb(rx_urb);
|
||||
free_tx_urb:
|
||||
usb_free_urb(tx_urb);
|
||||
error_nomem:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = {
|
|||
{ USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
|
||||
{ USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
|
||||
{ USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
|
||||
{ USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */
|
||||
{ USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
|
||||
{ USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
|
||||
{ USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
|
||||
|
|
1001
drivers/net/usb/sierra_net.c
Normal file
1001
drivers/net/usb/sierra_net.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -107,6 +107,7 @@ typedef enum {
|
|||
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
|
||||
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
|
||||
SCTP_CMD_SEND_MSG, /* Send the whole use message */
|
||||
SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
|
||||
SCTP_CMD_LAST
|
||||
} sctp_verb_t;
|
||||
|
||||
|
|
|
@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
|
|||
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
|
||||
int sctp_inet_listen(struct socket *sock, int backlog);
|
||||
void sctp_write_space(struct sock *sk);
|
||||
void sctp_data_ready(struct sock *sk, int len);
|
||||
unsigned int sctp_poll(struct file *file, struct socket *sock,
|
||||
poll_table *wait);
|
||||
void sctp_sock_rfree(struct sk_buff *skb);
|
||||
|
|
|
@ -1626,7 +1626,10 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
|
|||
/* Connectionless channel */
|
||||
if (sk->sk_type == SOCK_DGRAM) {
|
||||
skb = l2cap_create_connless_pdu(sk, msg, len);
|
||||
err = l2cap_do_send(sk, skb);
|
||||
if (IS_ERR(skb))
|
||||
err = PTR_ERR(skb);
|
||||
else
|
||||
err = l2cap_do_send(sk, skb);
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
|
|
@ -957,9 +957,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
|
|||
unsigned offset;
|
||||
int err;
|
||||
|
||||
BR_INPUT_SKB_CB(skb)->igmp = 0;
|
||||
BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
|
||||
|
||||
/* We treat OOM as packet loss for now. */
|
||||
if (!pskb_may_pull(skb, sizeof(*iph)))
|
||||
return -EINVAL;
|
||||
|
@ -1049,6 +1046,9 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
|
|||
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
BR_INPUT_SKB_CB(skb)->igmp = 0;
|
||||
BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
|
||||
|
||||
if (br->multicast_disabled)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -151,6 +151,9 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
|
|||
dev_load(sock_net(sk), ifr.ifr_name);
|
||||
dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
|
||||
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
|
||||
ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
|
||||
|
||||
|
|
|
@ -70,17 +70,13 @@ int inet_csk_bind_conflict(const struct sock *sk,
|
|||
(!sk->sk_bound_dev_if ||
|
||||
!sk2->sk_bound_dev_if ||
|
||||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
|
||||
const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
|
||||
|
||||
if (!reuse || !sk2->sk_reuse ||
|
||||
sk2->sk_state == TCP_LISTEN) {
|
||||
const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
|
||||
if (!sk2_rcv_saddr || !sk_rcv_saddr ||
|
||||
sk2_rcv_saddr == sk_rcv_saddr)
|
||||
break;
|
||||
} else if (reuse && sk2->sk_reuse &&
|
||||
sk2_rcv_saddr &&
|
||||
sk2_rcv_saddr == sk_rcv_saddr)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return node != NULL;
|
||||
|
@ -124,11 +120,9 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
|
|||
smallest_size = tb->num_owners;
|
||||
smallest_rover = rover;
|
||||
if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
|
||||
if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
|
||||
spin_unlock(&head->lock);
|
||||
snum = smallest_rover;
|
||||
goto have_snum;
|
||||
}
|
||||
spin_unlock(&head->lock);
|
||||
snum = smallest_rover;
|
||||
goto have_snum;
|
||||
}
|
||||
}
|
||||
goto next;
|
||||
|
|
|
@ -42,16 +42,11 @@ int inet6_csk_bind_conflict(const struct sock *sk,
|
|||
if (sk != sk2 &&
|
||||
(!sk->sk_bound_dev_if ||
|
||||
!sk2->sk_bound_dev_if ||
|
||||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
|
||||
if ((!sk->sk_reuse || !sk2->sk_reuse ||
|
||||
sk2->sk_state == TCP_LISTEN) &&
|
||||
ipv6_rcv_saddr_equal(sk, sk2))
|
||||
break;
|
||||
else if (sk->sk_reuse && sk2->sk_reuse &&
|
||||
!ipv6_addr_any(inet6_rcv_saddr(sk)) &&
|
||||
ipv6_rcv_saddr_equal(sk, sk2))
|
||||
break;
|
||||
}
|
||||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
|
||||
(!sk->sk_reuse || !sk2->sk_reuse ||
|
||||
sk2->sk_state == TCP_LISTEN) &&
|
||||
ipv6_rcv_saddr_equal(sk, sk2))
|
||||
break;
|
||||
}
|
||||
|
||||
return node != NULL;
|
||||
|
|
|
@ -1194,8 +1194,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
|
|||
/* Remove any peer addresses not present in the new association. */
|
||||
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
|
||||
trans = list_entry(pos, struct sctp_transport, transports);
|
||||
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
|
||||
sctp_assoc_del_peer(asoc, &trans->ipaddr);
|
||||
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
|
||||
sctp_assoc_rm_peer(asoc, trans);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (asoc->state >= SCTP_STATE_ESTABLISHED)
|
||||
sctp_transport_reset(trans);
|
||||
|
|
|
@ -144,6 +144,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
|||
/* Use SCTP specific send buffer space queues. */
|
||||
ep->sndbuf_policy = sctp_sndbuf_policy;
|
||||
|
||||
sk->sk_data_ready = sctp_data_ready;
|
||||
sk->sk_write_space = sctp_write_space;
|
||||
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
|
||||
|
||||
|
|
|
@ -208,7 +208,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
|
|||
sp = sctp_sk(asoc->base.sk);
|
||||
num_types = sp->pf->supported_addrs(sp, types);
|
||||
|
||||
chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
|
||||
chunksize = sizeof(init) + addrs_len;
|
||||
chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
|
||||
chunksize += sizeof(ecap_param);
|
||||
|
||||
if (sctp_prsctp_enable)
|
||||
|
@ -238,14 +239,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
|
|||
/* Add HMACS parameter length if any were defined */
|
||||
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
|
||||
if (auth_hmacs->length)
|
||||
chunksize += ntohs(auth_hmacs->length);
|
||||
chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
|
||||
else
|
||||
auth_hmacs = NULL;
|
||||
|
||||
/* Add CHUNKS parameter length */
|
||||
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
|
||||
if (auth_chunks->length)
|
||||
chunksize += ntohs(auth_chunks->length);
|
||||
chunksize += WORD_ROUND(ntohs(auth_chunks->length));
|
||||
else
|
||||
auth_chunks = NULL;
|
||||
|
||||
|
@ -255,7 +256,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
|
|||
|
||||
/* If we have any extensions to report, account for that */
|
||||
if (num_ext)
|
||||
chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
|
||||
chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
|
||||
num_ext);
|
||||
|
||||
/* RFC 2960 3.3.2 Initiation (INIT) (1)
|
||||
*
|
||||
|
@ -397,13 +399,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
|
|||
|
||||
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
|
||||
if (auth_hmacs->length)
|
||||
chunksize += ntohs(auth_hmacs->length);
|
||||
chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
|
||||
else
|
||||
auth_hmacs = NULL;
|
||||
|
||||
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
|
||||
if (auth_chunks->length)
|
||||
chunksize += ntohs(auth_chunks->length);
|
||||
chunksize += WORD_ROUND(ntohs(auth_chunks->length));
|
||||
else
|
||||
auth_chunks = NULL;
|
||||
|
||||
|
@ -412,7 +414,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
|
|||
}
|
||||
|
||||
if (num_ext)
|
||||
chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
|
||||
chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
|
||||
num_ext);
|
||||
|
||||
/* Now allocate and fill out the chunk. */
|
||||
retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
|
||||
|
@ -3315,21 +3318,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
|
|||
sctp_chunk_free(asconf);
|
||||
asoc->addip_last_asconf = NULL;
|
||||
|
||||
/* Send the next asconf chunk from the addip chunk queue. */
|
||||
if (!list_empty(&asoc->addip_chunk_list)) {
|
||||
struct list_head *entry = asoc->addip_chunk_list.next;
|
||||
asconf = list_entry(entry, struct sctp_chunk, list);
|
||||
|
||||
list_del_init(entry);
|
||||
|
||||
/* Hold the chunk until an ASCONF_ACK is received. */
|
||||
sctp_chunk_hold(asconf);
|
||||
if (sctp_primitive_ASCONF(asoc, asconf))
|
||||
sctp_chunk_free(asconf);
|
||||
else
|
||||
asoc->addip_last_asconf = asconf;
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
|
@ -962,6 +962,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
|
|||
}
|
||||
|
||||
|
||||
/* Sent the next ASCONF packet currently stored in the association.
|
||||
* This happens after the ASCONF_ACK was succeffully processed.
|
||||
*/
|
||||
static void sctp_cmd_send_asconf(struct sctp_association *asoc)
|
||||
{
|
||||
/* Send the next asconf chunk from the addip chunk
|
||||
* queue.
|
||||
*/
|
||||
if (!list_empty(&asoc->addip_chunk_list)) {
|
||||
struct list_head *entry = asoc->addip_chunk_list.next;
|
||||
struct sctp_chunk *asconf = list_entry(entry,
|
||||
struct sctp_chunk, list);
|
||||
list_del_init(entry);
|
||||
|
||||
/* Hold the chunk until an ASCONF_ACK is received. */
|
||||
sctp_chunk_hold(asconf);
|
||||
if (sctp_primitive_ASCONF(asoc, asconf))
|
||||
sctp_chunk_free(asconf);
|
||||
else
|
||||
asoc->addip_last_asconf = asconf;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* These three macros allow us to pull the debugging code out of the
|
||||
* main flow of sctp_do_sm() to keep attention focused on the real
|
||||
|
@ -1617,6 +1640,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
|
|||
}
|
||||
error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
|
||||
break;
|
||||
case SCTP_CMD_SEND_NEXT_ASCONF:
|
||||
sctp_cmd_send_asconf(asoc);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "Impossible command: %u, %p\n",
|
||||
cmd->verb, cmd->obj.ptr);
|
||||
|
|
|
@ -3676,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
|
|||
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
|
||||
|
||||
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
|
||||
asconf_ack))
|
||||
asconf_ack)) {
|
||||
/* Successfully processed ASCONF_ACK. We can
|
||||
* release the next asconf if we have one.
|
||||
*/
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
|
||||
SCTP_NULL());
|
||||
return SCTP_DISPOSITION_CONSUME;
|
||||
}
|
||||
|
||||
abort = sctp_make_abort(asoc, asconf_ack,
|
||||
sizeof(sctp_errhdr_t));
|
||||
|
|
|
@ -3719,12 +3719,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
|
|||
sp->hmac = NULL;
|
||||
|
||||
SCTP_DBG_OBJCNT_INC(sock);
|
||||
percpu_counter_inc(&sctp_sockets_allocated);
|
||||
|
||||
/* Set socket backlog limit. */
|
||||
sk->sk_backlog.limit = sysctl_sctp_rmem[1];
|
||||
|
||||
local_bh_disable();
|
||||
percpu_counter_inc(&sctp_sockets_allocated);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||
local_bh_enable();
|
||||
|
||||
|
@ -3741,8 +3741,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
|
|||
/* Release our hold on the endpoint. */
|
||||
ep = sctp_sk(sk)->ep;
|
||||
sctp_endpoint_free(ep);
|
||||
percpu_counter_dec(&sctp_sockets_allocated);
|
||||
local_bh_disable();
|
||||
percpu_counter_dec(&sctp_sockets_allocated);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
@ -6189,6 +6189,16 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
|||
goto out;
|
||||
}
|
||||
|
||||
void sctp_data_ready(struct sock *sk, int len)
|
||||
{
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
|
||||
POLLRDNORM | POLLRDBAND);
|
||||
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
/* If socket sndbuf has changed, wake up all per association waiters. */
|
||||
void sctp_write_space(struct sock *sk)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue