Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  igb: fix link reporting when using sgmii
  igb: prevent skb_over panic w/ mtu smaller than 1K
  igb: Fix DCA errors and do not use context index for 82576
  ipv6: compile fix for ip6mr.c
  packet: Avoid lock_sock in mmap handler
  sfc: Replace stats_enabled flag with a disable count
  sfc: SFX7101/SFT9001: Fix AN advertisements
  sfc: SFT9001: Always enable XNP exchange on SFT9001 rev B
  sfc: Update board info for hardware monitor on SFN4111T-R5 and later
  sfc: Test for PHYXS faults whenever we cannot test link state bits
  sfc: Reinitialise the PHY completely in case of a PHY or NIC reset
  sfc: Fix post-reset MAC selection
  sfc: SFN4111T: Fix GPIO sharing between I2C and FLASH_CFG_1
  sfc: SFT9001: Fix speed reporting in 1G PHY loopback
  sfc: SFX7101: Remove workaround for bad link training
  sfc: SFT9001: Enable robust link training
  sky2: fix hard hang with netconsoling and iface going up
This commit is contained in:
Linus Torvalds 2009-01-31 15:50:43 -08:00
commit f984d02419
18 changed files with 314 additions and 329 deletions

View file

@ -699,11 +699,18 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
/* SGMII link check is done through the PCS register. */
if ((hw->phy.media_type != e1000_media_type_copper) ||
(igb_sgmii_active_82575(hw)))
(igb_sgmii_active_82575(hw))) {
ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
&duplex);
else
/*
* Use this flag to determine if link needs to be checked or
* not. If we have link clear the flag so that we do not
* continue to check for link.
*/
hw->mac.get_link_status = !hw->mac.serdes_has_link;
} else {
ret_val = igb_check_for_copper_link(hw);
}
return ret_val;
}

View file

@ -300,11 +300,10 @@ struct igb_adapter {
#define IGB_FLAG_HAS_MSI (1 << 0)
#define IGB_FLAG_MSI_ENABLE (1 << 1)
#define IGB_FLAG_HAS_DCA (1 << 2)
#define IGB_FLAG_DCA_ENABLED (1 << 3)
#define IGB_FLAG_IN_NETPOLL (1 << 5)
#define IGB_FLAG_QUAD_PORT_A (1 << 6)
#define IGB_FLAG_NEED_CTX_IDX (1 << 7)
#define IGB_FLAG_DCA_ENABLED (1 << 2)
#define IGB_FLAG_IN_NETPOLL (1 << 3)
#define IGB_FLAG_QUAD_PORT_A (1 << 4)
#define IGB_FLAG_NEED_CTX_IDX (1 << 5)
enum e1000_state_t {
__IGB_TESTING,

View file

@ -206,10 +206,11 @@ static int __init igb_init_module(void)
global_quad_port_a = 0;
ret = pci_register_driver(&igb_driver);
#ifdef CONFIG_IGB_DCA
dca_register_notify(&dca_notifier);
#endif
ret = pci_register_driver(&igb_driver);
return ret;
}
@ -1156,11 +1157,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* set flags */
switch (hw->mac.type) {
case e1000_82576:
case e1000_82575:
adapter->flags |= IGB_FLAG_HAS_DCA;
adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
break;
case e1000_82576:
default:
break;
}
@ -1310,8 +1310,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
goto err_register;
#ifdef CONFIG_IGB_DCA
if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
(dca_add_requester(&pdev->dev) == 0)) {
if (dca_add_requester(&pdev->dev) == 0) {
adapter->flags |= IGB_FLAG_DCA_ENABLED;
dev_info(&pdev->dev, "DCA enabled\n");
/* Always use CB2 mode, difference is masked
@ -1835,11 +1834,11 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
rctl |= E1000_RCTL_SECRC;
/*
* disable store bad packets, long packet enable, and clear size bits.
* disable store bad packets and clear size bits.
*/
rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256);
rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
if (adapter->netdev->mtu > ETH_DATA_LEN)
/* enable LPE when to prevent packets larger than max_frame_size */
rctl |= E1000_RCTL_LPE;
/* Setup buffer sizes */
@ -1865,7 +1864,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
*/
/* allocations using alloc_page take too long for regular MTU
* so only enable packet split for jumbo frames */
if (rctl & E1000_RCTL_LPE) {
if (adapter->netdev->mtu > ETH_DATA_LEN) {
adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
srrctl |= adapter->rx_ps_hdr_size <<
E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
@ -3473,19 +3472,16 @@ static int __igb_notify_dca(struct device *dev, void *data)
struct e1000_hw *hw = &adapter->hw;
unsigned long event = *(unsigned long *)data;
if (!(adapter->flags & IGB_FLAG_HAS_DCA))
goto out;
switch (event) {
case DCA_PROVIDER_ADD:
/* if already enabled, don't do it again */
if (adapter->flags & IGB_FLAG_DCA_ENABLED)
break;
adapter->flags |= IGB_FLAG_DCA_ENABLED;
/* Always use CB2 mode, difference is masked
* in the CB driver. */
wr32(E1000_DCA_CTRL, 2);
if (dca_add_requester(dev) == 0) {
adapter->flags |= IGB_FLAG_DCA_ENABLED;
dev_info(&adapter->pdev->dev, "DCA enabled\n");
igb_setup_dca(adapter);
break;
@ -3502,7 +3498,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
}
break;
}
out:
return 0;
}

View file

@ -676,9 +676,8 @@ static int efx_init_port(struct efx_nic *efx)
rc = efx->phy_op->init(efx);
if (rc)
return rc;
efx->phy_op->reconfigure(efx);
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
rc = falcon_switch_mac(efx);
mutex_unlock(&efx->mac_lock);
if (rc)
@ -686,7 +685,7 @@ static int efx_init_port(struct efx_nic *efx)
efx->mac_op->reconfigure(efx);
efx->port_initialized = true;
efx->stats_enabled = true;
efx_stats_enable(efx);
return 0;
fail:
@ -735,6 +734,7 @@ static void efx_fini_port(struct efx_nic *efx)
if (!efx->port_initialized)
return;
efx_stats_disable(efx);
efx->phy_op->fini(efx);
efx->port_initialized = false;
@ -1361,6 +1361,20 @@ static int efx_net_stop(struct net_device *net_dev)
return 0;
}
void efx_stats_disable(struct efx_nic *efx)
{
spin_lock(&efx->stats_lock);
++efx->stats_disable_count;
spin_unlock(&efx->stats_lock);
}
void efx_stats_enable(struct efx_nic *efx)
{
spin_lock(&efx->stats_lock);
--efx->stats_disable_count;
spin_unlock(&efx->stats_lock);
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{
@ -1369,12 +1383,12 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
struct net_device_stats *stats = &net_dev->stats;
/* Update stats if possible, but do not wait if another thread
* is updating them (or resetting the NIC); slightly stale
* stats are acceptable.
* is updating them or if MAC stats fetches are temporarily
* disabled; slightly stale stats are acceptable.
*/
if (!spin_trylock(&efx->stats_lock))
return stats;
if (efx->stats_enabled) {
if (!efx->stats_disable_count) {
efx->mac_op->update_stats(efx);
falcon_update_nic_stats(efx);
}
@ -1622,16 +1636,12 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* Tears down the entire software state and most of the hardware state
* before reset. */
void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
void efx_reset_down(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd)
{
EFX_ASSERT_RESET_SERIALISED(efx);
/* The net_dev->get_stats handler is quite slow, and will fail
* if a fetch is pending over reset. Serialise against it. */
spin_lock(&efx->stats_lock);
efx->stats_enabled = false;
spin_unlock(&efx->stats_lock);
efx_stats_disable(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
mutex_lock(&efx->spi_lock);
@ -1639,6 +1649,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
efx->phy_op->get_settings(efx, ecmd);
efx_fini_channels(efx);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx);
}
/* This function will always ensure that the locks acquired in
@ -1646,7 +1658,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
* that we were unable to reinitialise the hardware, and the
* driver should be disabled. If ok is false, then the rx and tx
* engines are not restarted, pending a RESET_DISABLE. */
int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
int efx_reset_up(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd, bool ok)
{
int rc;
@ -1658,6 +1671,15 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
ok = false;
}
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
if (ok) {
rc = efx->phy_op->init(efx);
if (rc)
ok = false;
} else
efx->port_initialized = false;
}
if (ok) {
efx_init_channels(efx);
@ -1670,7 +1692,7 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
if (ok) {
efx_start_all(efx);
efx->stats_enabled = true;
efx_stats_enable(efx);
}
return rc;
}
@ -1702,7 +1724,7 @@ static int efx_reset(struct efx_nic *efx)
EFX_INFO(efx, "resetting (%d)\n", method);
efx_reset_down(efx, &ecmd);
efx_reset_down(efx, method, &ecmd);
rc = falcon_reset_hw(efx, method);
if (rc) {
@ -1721,10 +1743,10 @@ static int efx_reset(struct efx_nic *efx)
/* Leave device stopped if necessary */
if (method == RESET_TYPE_DISABLE) {
efx_reset_up(efx, &ecmd, false);
efx_reset_up(efx, method, &ecmd, false);
rc = -EIO;
} else {
rc = efx_reset_up(efx, &ecmd, true);
rc = efx_reset_up(efx, method, &ecmd, true);
}
out_disable:
@ -1876,6 +1898,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
efx->rx_checksum_enabled = true;
spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock);
efx->stats_disable_count = 1;
mutex_init(&efx->mac_lock);
efx->mac_op = &efx_dummy_mac_operations;
efx->phy_op = &efx_dummy_phy_operations;

View file

@ -36,13 +36,16 @@ extern void efx_process_channel_now(struct efx_channel *channel);
extern void efx_flush_queues(struct efx_nic *efx);
/* Ports */
extern void efx_stats_disable(struct efx_nic *efx);
extern void efx_stats_enable(struct efx_nic *efx);
extern void efx_reconfigure_port(struct efx_nic *efx);
extern void __efx_reconfigure_port(struct efx_nic *efx);
/* Reset handling */
extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd);
extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd,
bool ok);
extern void efx_reset_down(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd);
extern int efx_reset_up(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd, bool ok);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);

View file

@ -219,9 +219,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
if (EFX_WORKAROUND_13963(efx) && !ecmd->autoneg)
return -EINVAL;
/* Falcon GMAC does not support 1000Mbps HD */
if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"

View file

@ -824,10 +824,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
efx->phy_type == PHY_TYPE_SFX7101))
tenxpress_crc_err(efx);
}
/* Handle receive events that are not in-order. */
@ -1887,7 +1883,7 @@ static int falcon_reset_macs(struct efx_nic *efx)
/* MAC stats will fail whilst the TX fifo is draining. Serialise
* the drain sequence with the statistics fetch */
spin_lock(&efx->stats_lock);
efx_stats_disable(efx);
falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
@ -1917,7 +1913,7 @@ static int falcon_reset_macs(struct efx_nic *efx)
udelay(10);
}
spin_unlock(&efx->stats_lock);
efx_stats_enable(efx);
/* If we've reset the EM block and the link is up, then
* we'll have to kick the XAUI link so the PHY can recover */
@ -2277,6 +2273,10 @@ int falcon_switch_mac(struct efx_nic *efx)
struct efx_mac_operations *old_mac_op = efx->mac_op;
efx_oword_t nic_stat;
unsigned strap_val;
int rc = 0;
/* Don't try to fetch MAC stats while we're switching MACs */
efx_stats_disable(efx);
/* Internal loopbacks override the phy speed setting */
if (efx->loopback_mode == LOOPBACK_GMAC) {
@ -2287,16 +2287,12 @@ int falcon_switch_mac(struct efx_nic *efx)
efx->link_fd = true;
}
WARN_ON(!mutex_is_locked(&efx->mac_lock));
efx->mac_op = (EFX_IS10G(efx) ?
&falcon_xmac_operations : &falcon_gmac_operations);
if (old_mac_op == efx->mac_op)
return 0;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
/* Not all macs support a mac-level link state */
efx->mac_up = true;
/* Always push the NIC_STAT_REG setting even if the mac hasn't
* changed, because this function is run post online reset */
falcon_read(efx, &nic_stat, NIC_STAT_REG);
strap_val = EFX_IS10G(efx) ? 5 : 3;
if (falcon_rev(efx) >= FALCON_REV_B0) {
@ -2309,9 +2305,17 @@ int falcon_switch_mac(struct efx_nic *efx)
BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
}
if (old_mac_op == efx->mac_op)
goto out;
EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
return falcon_reset_macs(efx);
/* Not all macs support a mac-level link state */
efx->mac_up = true;
rc = falcon_reset_macs(efx);
out:
efx_stats_enable(efx);
return rc;
}
/* This call is responsible for hooking in the MAC and PHY operations */

View file

@ -15,6 +15,7 @@
#include "net_driver.h"
#include "mdio_10g.h"
#include "boards.h"
#include "workarounds.h"
int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
int spins, int spintime)
@ -179,17 +180,12 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
return false;
else if (efx_phy_mode_disabled(efx->phy_mode))
return false;
else if (efx->loopback_mode == LOOPBACK_PHYXS) {
else if (efx->loopback_mode == LOOPBACK_PHYXS)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS |
MDIO_MMDREG_DEVS_PCS |
MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN);
if (!mmd_mask) {
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
MDIO_PHYXS_STATUS2);
return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
}
} else if (efx->loopback_mode == LOOPBACK_PCS)
else if (efx->loopback_mode == LOOPBACK_PCS)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS |
MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN);
@ -197,6 +193,13 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN);
if (!mmd_mask) {
/* Use presence of XGMII faults in leui of link state */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
MDIO_PHYXS_STATUS2);
return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
}
while (mmd_mask) {
if (mmd_mask & 1) {
/* Double reads because link state is latched, and a
@ -263,7 +266,7 @@ void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
}
}
static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp)
static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr)
{
int phy_id = efx->mii.phy_id;
u32 result = 0;
@ -278,9 +281,6 @@ static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp)
result |= ADVERTISED_100baseT_Half;
if (reg & ADVERTISE_100FULL)
result |= ADVERTISED_100baseT_Full;
if (reg & LPA_RESV)
result |= xnp;
return result;
}
@ -310,7 +310,7 @@ void mdio_clause45_get_settings(struct efx_nic *efx,
*/
void mdio_clause45_get_settings_ext(struct efx_nic *efx,
struct ethtool_cmd *ecmd,
u32 xnp, u32 xnp_lpa)
u32 npage_adv, u32 npage_lpa)
{
int phy_id = efx->mii.phy_id;
int reg;
@ -361,8 +361,8 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx,
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->advertising |=
ADVERTISED_Autoneg |
mdio_clause45_get_an(efx,
MDIO_AN_ADVERTISE, xnp);
mdio_clause45_get_an(efx, MDIO_AN_ADVERTISE) |
npage_adv;
} else
ecmd->autoneg = AUTONEG_DISABLE;
} else
@ -371,27 +371,30 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx,
if (ecmd->autoneg) {
/* If AN is complete, report best common mode,
* otherwise report best advertised mode. */
u32 common = ecmd->advertising;
u32 modes = 0;
if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_STAT1) &
(1 << MDIO_AN_STATUS_AN_DONE_LBN)) {
common &= mdio_clause45_get_an(efx, MDIO_AN_LPA,
xnp_lpa);
}
if (common & ADVERTISED_10000baseT_Full) {
(1 << MDIO_AN_STATUS_AN_DONE_LBN))
modes = (ecmd->advertising &
(mdio_clause45_get_an(efx, MDIO_AN_LPA) |
npage_lpa));
if (modes == 0)
modes = ecmd->advertising;
if (modes & ADVERTISED_10000baseT_Full) {
ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL;
} else if (common & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseT_Half)) {
} else if (modes & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseT_Half)) {
ecmd->speed = SPEED_1000;
ecmd->duplex = !!(common & ADVERTISED_1000baseT_Full);
} else if (common & (ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half)) {
ecmd->duplex = !!(modes & ADVERTISED_1000baseT_Full);
} else if (modes & (ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half)) {
ecmd->speed = SPEED_100;
ecmd->duplex = !!(common & ADVERTISED_100baseT_Full);
ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full);
} else {
ecmd->speed = SPEED_10;
ecmd->duplex = !!(common & ADVERTISED_10baseT_Full);
ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full);
}
} else {
/* Report forced settings */
@ -415,7 +418,7 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
int phy_id = efx->mii.phy_id;
struct ethtool_cmd prev;
u32 required;
int ctrl1_bits, reg;
int reg;
efx->phy_op->get_settings(efx, &prev);
@ -430,99 +433,83 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
if (prev.port != PORT_TP || ecmd->port != PORT_TP)
return -EINVAL;
/* Check that PHY supports these settings and work out the
* basic control bits */
if (ecmd->duplex) {
/* Check that PHY supports these settings */
if (ecmd->autoneg) {
required = SUPPORTED_Autoneg;
} else if (ecmd->duplex) {
switch (ecmd->speed) {
case SPEED_10:
ctrl1_bits = BMCR_FULLDPLX;
required = SUPPORTED_10baseT_Full;
break;
case SPEED_100:
ctrl1_bits = BMCR_SPEED100 | BMCR_FULLDPLX;
required = SUPPORTED_100baseT_Full;
break;
case SPEED_1000:
ctrl1_bits = BMCR_SPEED1000 | BMCR_FULLDPLX;
required = SUPPORTED_1000baseT_Full;
break;
case SPEED_10000:
ctrl1_bits = (BMCR_SPEED1000 | BMCR_SPEED100 |
BMCR_FULLDPLX);
required = SUPPORTED_10000baseT_Full;
break;
default:
return -EINVAL;
case SPEED_10: required = SUPPORTED_10baseT_Full; break;
case SPEED_100: required = SUPPORTED_100baseT_Full; break;
default: return -EINVAL;
}
} else {
switch (ecmd->speed) {
case SPEED_10:
ctrl1_bits = 0;
required = SUPPORTED_10baseT_Half;
break;
case SPEED_100:
ctrl1_bits = BMCR_SPEED100;
required = SUPPORTED_100baseT_Half;
break;
case SPEED_1000:
ctrl1_bits = BMCR_SPEED1000;
required = SUPPORTED_1000baseT_Half;
break;
default:
return -EINVAL;
case SPEED_10: required = SUPPORTED_10baseT_Half; break;
case SPEED_100: required = SUPPORTED_100baseT_Half; break;
default: return -EINVAL;
}
}
if (ecmd->autoneg)
required |= SUPPORTED_Autoneg;
required |= ecmd->advertising;
if (required & ~prev.supported)
return -EINVAL;
/* Set the basic control bits */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
MDIO_MMDREG_CTRL1);
reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | 0x003c);
reg |= ctrl1_bits;
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL1,
reg);
if (ecmd->autoneg) {
bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
|| EFX_WORKAROUND_13204(efx));
/* Set the AN registers */
if (ecmd->autoneg != prev.autoneg ||
ecmd->advertising != prev.advertising) {
bool xnp = false;
/* Set up the base page */
reg = ADVERTISE_CSMA;
if (ecmd->advertising & ADVERTISED_10baseT_Half)
reg |= ADVERTISE_10HALF;
if (ecmd->advertising & ADVERTISED_10baseT_Full)
reg |= ADVERTISE_10FULL;
if (ecmd->advertising & ADVERTISED_100baseT_Half)
reg |= ADVERTISE_100HALF;
if (ecmd->advertising & ADVERTISED_100baseT_Full)
reg |= ADVERTISE_100FULL;
if (xnp)
reg |= ADVERTISE_RESV;
else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
reg |= ADVERTISE_NPAGE;
reg |= efx_fc_advertise(efx->wanted_fc);
mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_ADVERTISE, reg);
if (efx->phy_op->set_xnp_advertise)
xnp = efx->phy_op->set_xnp_advertise(efx,
ecmd->advertising);
if (ecmd->autoneg) {
reg = 0;
if (ecmd->advertising & ADVERTISED_10baseT_Half)
reg |= ADVERTISE_10HALF;
if (ecmd->advertising & ADVERTISED_10baseT_Full)
reg |= ADVERTISE_10FULL;
if (ecmd->advertising & ADVERTISED_100baseT_Half)
reg |= ADVERTISE_100HALF;
if (ecmd->advertising & ADVERTISED_100baseT_Full)
reg |= ADVERTISE_100FULL;
if (xnp)
reg |= ADVERTISE_RESV;
mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_ADVERTISE, reg);
}
/* Set up the (extended) next page if necessary */
if (efx->phy_op->set_npage_adv)
efx->phy_op->set_npage_adv(efx, ecmd->advertising);
/* Enable and restart AN */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1);
if (ecmd->autoneg)
reg |= BMCR_ANENABLE | BMCR_ANRESTART;
else
reg &= ~BMCR_ANENABLE;
reg |= BMCR_ANENABLE;
if (!(EFX_WORKAROUND_15195(efx) &&
LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
reg |= BMCR_ANRESTART;
if (xnp)
reg |= 1 << MDIO_AN_CTRL_XNP_LBN;
else
reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN);
mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1, reg);
} else {
/* Disable AN */
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1,
__ffs(BMCR_ANENABLE), false);
/* Set the basic control bits */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
MDIO_MMDREG_CTRL1);
reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX |
0x003c);
if (ecmd->speed == SPEED_100)
reg |= BMCR_SPEED100;
if (ecmd->duplex)
reg |= BMCR_FULLDPLX;
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
MDIO_MMDREG_CTRL1, reg);
}
return 0;

View file

@ -155,7 +155,8 @@
#define MDIO_AN_XNP 22
#define MDIO_AN_LPA_XNP 25
#define MDIO_AN_10GBT_ADVERTISE 32
#define MDIO_AN_10GBT_CTRL 32
#define MDIO_AN_10GBT_CTRL_ADV_10G_LBN 12
#define MDIO_AN_10GBT_STATUS (33)
#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */

View file

@ -566,7 +566,7 @@ struct efx_mac_operations {
* @poll: Poll for hardware state. Serialised by the mac_lock.
* @get_settings: Get ethtool settings. Serialised by the mac_lock.
* @set_settings: Set ethtool settings. Serialised by the mac_lock.
* @set_xnp_advertise: Set abilities advertised in Extended Next Page
* @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
* @num_tests: Number of PHY-specific tests/results
* @test_names: Names of the tests/results
@ -586,7 +586,7 @@ struct efx_phy_operations {
struct ethtool_cmd *ecmd);
int (*set_settings) (struct efx_nic *efx,
struct ethtool_cmd *ecmd);
bool (*set_xnp_advertise) (struct efx_nic *efx, u32);
void (*set_npage_adv) (struct efx_nic *efx, u32);
u32 num_tests;
const char *const *test_names;
int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
@ -754,8 +754,7 @@ union efx_multicast_hash {
* &struct net_device_stats.
* @stats_buffer: DMA buffer for statistics
* @stats_lock: Statistics update lock. Serialises statistics fetches
* @stats_enabled: Temporarily disable statistics fetches.
* Serialised by @stats_lock
* @stats_disable_count: Nest count for disabling statistics fetches
* @mac_op: MAC interface
* @mac_address: Permanent MAC address
* @phy_type: PHY type
@ -837,7 +836,7 @@ struct efx_nic {
struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
spinlock_t stats_lock;
bool stats_enabled;
unsigned int stats_disable_count;
struct efx_mac_operations *mac_op;
unsigned char mac_address[ETH_ALEN];

View file

@ -17,7 +17,6 @@ extern struct efx_phy_operations falcon_sfx7101_phy_ops;
extern struct efx_phy_operations falcon_sft9001_phy_ops;
extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
extern void tenxpress_crc_err(struct efx_nic *efx);
/****************************************************************************
* Exported functions from the driver for XFP optical PHYs

View file

@ -665,6 +665,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
{
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
enum reset_type reset_method = RESET_TYPE_INVISIBLE;
struct ethtool_cmd ecmd;
struct efx_channel *channel;
int rc_test = 0, rc_reset = 0, rc;
@ -718,21 +719,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
mutex_unlock(&efx->mac_lock);
/* free up all consumers of SRAM (including all the queues) */
efx_reset_down(efx, &ecmd);
efx_reset_down(efx, reset_method, &ecmd);
rc = efx_test_chip(efx, tests);
if (rc && !rc_test)
rc_test = rc;
/* reset the chip to recover from the register test */
rc_reset = falcon_reset_hw(efx, RESET_TYPE_ALL);
rc_reset = falcon_reset_hw(efx, reset_method);
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
rc = efx_reset_up(efx, &ecmd, rc_reset == 0);
rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0);
if (rc && !rc_reset)
rc_reset = rc;

View file

@ -186,19 +186,22 @@ static int sfn4111t_reset(struct efx_nic *efx)
{
efx_oword_t reg;
/* GPIO pins are also used for I2C, so block that temporarily */
/* GPIO 3 and the GPIO register are shared with I2C, so block that */
mutex_lock(&efx->i2c_adap.bus_lock);
/* Pull RST_N (GPIO 2) low then let it up again, setting the
* FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
* output enables; the output levels should always be 0 (low)
* and we rely on external pull-ups. */
falcon_read(efx, &reg, GPIO_CTL_REG_KER);
EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true);
EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, false);
falcon_write(efx, &reg, GPIO_CTL_REG_KER);
msleep(1000);
EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, true);
EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, true);
EFX_SET_OWORD_FIELD(reg, GPIO3_OUT,
!(efx->phy_mode & PHY_MODE_SPECIAL));
EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false);
EFX_SET_OWORD_FIELD(reg, GPIO3_OEN,
!!(efx->phy_mode & PHY_MODE_SPECIAL));
falcon_write(efx, &reg, GPIO_CTL_REG_KER);
msleep(1);
mutex_unlock(&efx->i2c_adap.bus_lock);
@ -232,12 +235,18 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
err = -EBUSY;
} else {
/* Reset the PHY, reconfigure the MAC and enable/disable
* MAC stats accordingly. */
efx->phy_mode = new_mode;
if (new_mode & PHY_MODE_SPECIAL)
efx_stats_disable(efx);
if (efx->board_info.type == EFX_BOARD_SFE4001)
err = sfe4001_poweron(efx);
else
err = sfn4111t_reset(efx);
efx_reconfigure_port(efx);
if (!(new_mode & PHY_MODE_SPECIAL))
efx_stats_enable(efx);
}
rtnl_unlock();
@ -326,6 +335,11 @@ int sfe4001_init(struct efx_nic *efx)
efx->board_info.monitor = sfe4001_check_hw;
efx->board_info.fini = sfe4001_fini;
if (efx->phy_mode & PHY_MODE_SPECIAL) {
/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
* will fail. */
efx_stats_disable(efx);
}
rc = sfe4001_poweron(efx);
if (rc)
goto fail_ioexp;
@ -372,17 +386,25 @@ static void sfn4111t_fini(struct efx_nic *efx)
i2c_unregister_device(efx->board_info.hwmon_client);
}
static struct i2c_board_info sfn4111t_hwmon_info = {
static struct i2c_board_info sfn4111t_a0_hwmon_info = {
I2C_BOARD_INFO("max6647", 0x4e),
.irq = -1,
};
static struct i2c_board_info sfn4111t_r5_hwmon_info = {
I2C_BOARD_INFO("max6646", 0x4d),
.irq = -1,
};
int sfn4111t_init(struct efx_nic *efx)
{
int rc;
efx->board_info.hwmon_client =
i2c_new_device(&efx->i2c_adap, &sfn4111t_hwmon_info);
i2c_new_device(&efx->i2c_adap,
(efx->board_info.minor < 5) ?
&sfn4111t_a0_hwmon_info :
&sfn4111t_r5_hwmon_info);
if (!efx->board_info.hwmon_client)
return -EIO;
@ -394,8 +416,10 @@ int sfn4111t_init(struct efx_nic *efx)
if (rc)
goto fail_hwmon;
if (efx->phy_mode & PHY_MODE_SPECIAL)
if (efx->phy_mode & PHY_MODE_SPECIAL) {
efx_stats_disable(efx);
sfn4111t_reset(efx);
}
return 0;

View file

@ -67,6 +67,8 @@
#define PMA_PMD_EXT_CLK312_WIDTH 1
#define PMA_PMD_EXT_LPOWER_LBN 12
#define PMA_PMD_EXT_LPOWER_WIDTH 1
#define PMA_PMD_EXT_ROBUST_LBN 14
#define PMA_PMD_EXT_ROBUST_WIDTH 1
#define PMA_PMD_EXT_SSR_LBN 15
#define PMA_PMD_EXT_SSR_WIDTH 1
@ -177,35 +179,24 @@
#define C22EXT_STATUS_LINK_LBN 2
#define C22EXT_STATUS_LINK_WIDTH 1
#define C22EXT_MSTSLV_REG 49162
#define C22EXT_MSTSLV_1000_HD_LBN 10
#define C22EXT_MSTSLV_1000_HD_WIDTH 1
#define C22EXT_MSTSLV_1000_FD_LBN 11
#define C22EXT_MSTSLV_1000_FD_WIDTH 1
#define C22EXT_MSTSLV_CTRL 49161
#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8
#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9
#define C22EXT_MSTSLV_STATUS 49162
#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10
#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11
/* Time to wait between powering down the LNPGA and turning off the power
* rails */
#define LNPGA_PDOWN_WAIT (HZ / 5)
static int crc_error_reset_threshold = 100;
module_param(crc_error_reset_threshold, int, 0644);
MODULE_PARM_DESC(crc_error_reset_threshold,
"Max number of CRC errors before XAUI reset");
struct tenxpress_phy_data {
enum efx_loopback_mode loopback_mode;
atomic_t bad_crc_count;
enum efx_phy_mode phy_mode;
int bad_lp_tries;
};
void tenxpress_crc_err(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
if (phy_data != NULL)
atomic_inc(&phy_data->bad_crc_count);
}
static ssize_t show_phy_short_reach(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -284,7 +275,9 @@ static int tenxpress_init(struct efx_nic *efx)
PMA_PMD_XCONTROL_REG);
reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
(1 << PMA_PMD_EXT_CLK_OUT_LBN) |
(1 << PMA_PMD_EXT_CLK312_LBN));
(1 << PMA_PMD_EXT_CLK312_LBN) |
(1 << PMA_PMD_EXT_ROBUST_LBN));
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
PMA_PMD_XCONTROL_REG, reg);
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
@ -346,6 +339,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
rc = tenxpress_init(efx);
if (rc < 0)
goto fail;
mdio_clause45_set_pause(efx);
if (efx->phy_type == PHY_TYPE_SFT9001B) {
rc = device_create_file(&efx->pci_dev->dev,
@ -376,8 +370,8 @@ static int tenxpress_special_reset(struct efx_nic *efx)
/* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
* a special software reset can glitch the XGMAC sufficiently for stats
* requests to fail. Since we don't often special_reset, just lock. */
spin_lock(&efx->stats_lock);
* requests to fail. */
efx_stats_disable(efx);
/* Initiate reset */
reg = mdio_clause45_read(efx, efx->mii.phy_id,
@ -392,17 +386,17 @@ static int tenxpress_special_reset(struct efx_nic *efx)
rc = mdio_clause45_wait_reset_mmds(efx,
TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
goto unlock;
goto out;
/* Try and reconfigure the device */
rc = tenxpress_init(efx);
if (rc < 0)
goto unlock;
goto out;
/* Wait for the XGXS state machine to churn */
mdelay(10);
unlock:
spin_unlock(&efx->stats_lock);
out:
efx_stats_enable(efx);
return rc;
}
@ -520,7 +514,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
struct ethtool_cmd ecmd;
bool phy_mode_change, loop_reset, loop_toggle, loopback;
bool phy_mode_change, loop_reset;
if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
phy_data->phy_mode = efx->phy_mode;
@ -531,12 +525,10 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
phy_data->phy_mode != PHY_MODE_NORMAL);
loopback = LOOPBACK_MASK(efx) & efx->phy_op->loopbacks;
loop_toggle = LOOPBACK_CHANGED(phy_data, efx, efx->phy_op->loopbacks);
loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) ||
LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
if (loop_reset || loop_toggle || loopback || phy_mode_change) {
if (loop_reset || phy_mode_change) {
int rc;
efx->phy_op->get_settings(efx, &ecmd);
@ -551,20 +543,6 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
falcon_reset_xaui(efx);
}
if (efx->phy_type != PHY_TYPE_SFX7101) {
/* Only change autoneg once, on coming out or
* going into loopback */
if (loop_toggle)
ecmd.autoneg = !loopback;
if (loopback) {
ecmd.duplex = DUPLEX_FULL;
if (efx->loopback_mode == LOOPBACK_GPHY)
ecmd.speed = SPEED_1000;
else
ecmd.speed = SPEED_10000;
}
}
rc = efx->phy_op->set_settings(efx, &ecmd);
WARN_ON(rc);
}
@ -623,13 +601,6 @@ static void tenxpress_phy_poll(struct efx_nic *efx)
if (phy_data->phy_mode != PHY_MODE_NORMAL)
return;
if (EFX_WORKAROUND_10750(efx) &&
atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
falcon_reset_xaui(efx);
atomic_set(&phy_data->bad_crc_count, 0);
}
}
static void tenxpress_phy_fini(struct efx_nic *efx)
@ -772,107 +743,76 @@ static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
return rc;
}
static u32 tenxpress_get_xnp_lpa(struct efx_nic *efx)
static void
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
int phy = efx->mii.phy_id;
u32 lpa = 0;
int phy_id = efx->mii.phy_id;
u32 adv = 0, lpa = 0;
int reg;
if (efx->phy_type != PHY_TYPE_SFX7101) {
reg = mdio_clause45_read(efx, phy, MDIO_MMD_C22EXT,
C22EXT_MSTSLV_REG);
if (reg & (1 << C22EXT_MSTSLV_1000_HD_LBN))
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
C22EXT_MSTSLV_CTRL);
if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN))
adv |= ADVERTISED_1000baseT_Full;
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
C22EXT_MSTSLV_STATUS);
if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN))
lpa |= ADVERTISED_1000baseT_Half;
if (reg & (1 << C22EXT_MSTSLV_1000_FD_LBN))
if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN))
lpa |= ADVERTISED_1000baseT_Full;
}
reg = mdio_clause45_read(efx, phy, MDIO_MMD_AN, MDIO_AN_10GBT_STATUS);
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_CTRL);
if (reg & (1 << MDIO_AN_10GBT_CTRL_ADV_10G_LBN))
adv |= ADVERTISED_10000baseT_Full;
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_STATUS);
if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN))
lpa |= ADVERTISED_10000baseT_Full;
return lpa;
mdio_clause45_get_settings_ext(efx, ecmd, adv, lpa);
if (efx->phy_type != PHY_TYPE_SFX7101)
ecmd->supported |= (SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full);
/* In loopback, the PHY automatically brings up the correct interface,
* but doesn't advertise the correct speed. So override it */
if (efx->loopback_mode == LOOPBACK_GPHY)
ecmd->speed = SPEED_1000;
else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)
ecmd->speed = SPEED_10000;
}
static void sfx7101_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
mdio_clause45_get_settings_ext(efx, ecmd, ADVERTISED_10000baseT_Full,
tenxpress_get_xnp_lpa(efx));
ecmd->supported |= SUPPORTED_10000baseT_Full;
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (!ecmd->autoneg)
return -EINVAL;
return mdio_clause45_set_settings(efx, ecmd);
}
static void sft9001_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
{
mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_CTRL,
MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
advertising & ADVERTISED_10000baseT_Full);
}
static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
{
int phy_id = efx->mii.phy_id;
u32 xnp_adv = 0;
int reg;
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
PMA_PMD_SPEED_ENABLE_REG);
if (EFX_WORKAROUND_13204(efx) && (reg & (1 << PMA_PMD_100TX_ADV_LBN)))
xnp_adv |= ADVERTISED_100baseT_Full;
if (reg & (1 << PMA_PMD_1000T_ADV_LBN))
xnp_adv |= ADVERTISED_1000baseT_Full;
if (reg & (1 << PMA_PMD_10000T_ADV_LBN))
xnp_adv |= ADVERTISED_10000baseT_Full;
mdio_clause45_get_settings_ext(efx, ecmd, xnp_adv,
tenxpress_get_xnp_lpa(efx));
ecmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full);
/* Use the vendor defined C22ext register for duplex settings */
if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) {
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
GPHY_XCONTROL_REG);
ecmd->duplex = (reg & (1 << GPHY_DUPLEX_LBN) ?
DUPLEX_FULL : DUPLEX_HALF);
}
}
static int sft9001_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
int phy_id = efx->mii.phy_id;
int rc;
rc = mdio_clause45_set_settings(efx, ecmd);
if (rc)
return rc;
if (ecmd->speed != SPEED_10000 && !ecmd->autoneg)
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
GPHY_XCONTROL_REG, GPHY_DUPLEX_LBN,
ecmd->duplex == DUPLEX_FULL);
return rc;
}
static bool sft9001_set_xnp_advertise(struct efx_nic *efx, u32 advertising)
{
int phy = efx->mii.phy_id;
int reg = mdio_clause45_read(efx, phy, MDIO_MMD_PMAPMD,
PMA_PMD_SPEED_ENABLE_REG);
bool enabled;
reg &= ~((1 << 2) | (1 << 3));
if (EFX_WORKAROUND_13204(efx) &&
(advertising & ADVERTISED_100baseT_Full))
reg |= 1 << PMA_PMD_100TX_ADV_LBN;
if (advertising & ADVERTISED_1000baseT_Full)
reg |= 1 << PMA_PMD_1000T_ADV_LBN;
if (advertising & ADVERTISED_10000baseT_Full)
reg |= 1 << PMA_PMD_10000T_ADV_LBN;
mdio_clause45_write(efx, phy, MDIO_MMD_PMAPMD,
PMA_PMD_SPEED_ENABLE_REG, reg);
enabled = (advertising &
(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full |
ADVERTISED_10000baseT_Full));
if (EFX_WORKAROUND_13204(efx))
enabled |= (advertising & ADVERTISED_100baseT_Full);
return enabled;
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
C22EXT_MSTSLV_CTRL,
C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
advertising & ADVERTISED_1000baseT_Full);
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_CTRL,
MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
advertising & ADVERTISED_10000baseT_Full);
}
struct efx_phy_operations falcon_sfx7101_phy_ops = {
@ -882,8 +822,9 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
.poll = tenxpress_phy_poll,
.fini = tenxpress_phy_fini,
.clear_interrupt = efx_port_dummy_op_void,
.get_settings = sfx7101_get_settings,
.set_settings = mdio_clause45_set_settings,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
.num_tests = ARRAY_SIZE(sfx7101_test_names),
.test_names = sfx7101_test_names,
.run_tests = sfx7101_run_tests,
@ -898,9 +839,9 @@ struct efx_phy_operations falcon_sft9001_phy_ops = {
.poll = tenxpress_phy_poll,
.fini = tenxpress_phy_fini,
.clear_interrupt = efx_port_dummy_op_void,
.get_settings = sft9001_get_settings,
.set_settings = sft9001_set_settings,
.set_xnp_advertise = sft9001_set_xnp_advertise,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sft9001_set_npage_adv,
.num_tests = ARRAY_SIZE(sft9001_test_names),
.test_names = sft9001_test_names,
.run_tests = sft9001_run_tests,

View file

@ -18,8 +18,8 @@
#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx)
#define EFX_WORKAROUND_SFX7101(efx) ((efx)->phy_type == PHY_TYPE_SFX7101)
#define EFX_WORKAROUND_SFT9001A(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A)
#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
(efx)->phy_type == PHY_TYPE_SFT9001B)
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
@ -29,8 +29,6 @@
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
/* TX pkt parser problem with <= 16 byte TXes */
#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
/* Low rate CRC errors require XAUI reset */
#define EFX_WORKAROUND_10750 EFX_WORKAROUND_SFX7101
/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
* or a PCIe error (bug 11028) */
#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
@ -55,8 +53,8 @@
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
/* Need to send XNP pages for 100BaseT */
#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001A
/* Need to keep AN enabled */
#define EFX_WORKAROUND_13963 EFX_WORKAROUND_SFT9001A
#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001
/* Don't restart AN in near-side loopback */
#define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001
#endif /* EFX_WORKAROUNDS_H */

View file

@ -1403,9 +1403,6 @@ static int sky2_up(struct net_device *dev)
}
if (netif_msg_ifup(sky2))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
netif_carrier_off(dev);
/* must be power of 2 */
@ -1484,6 +1481,9 @@ static int sky2_up(struct net_device *dev)
sky2_write32(hw, B0_IMSK, imask);
sky2_set_multicast(dev);
if (netif_msg_ifup(sky2))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
return 0;
err_out:

View file

@ -48,6 +48,7 @@
#include <linux/pim.h>
#include <net/addrconf.h>
#include <linux/netfilter_ipv6.h>
#include <net/ip6_checksum.h>
/* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock.

View file

@ -77,6 +77,7 @@
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mutex.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
@ -175,6 +176,7 @@ struct packet_sock {
#endif
struct packet_type prot_hook;
spinlock_t bind_lock;
struct mutex pg_vec_lock;
unsigned int running:1, /* prot_hook is attached*/
auxdata:1,
origdev:1;
@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
*/
spin_lock_init(&po->bind_lock);
mutex_init(&po->pg_vec_lock);
po->prot_hook.func = packet_rcv;
if (sock->type == SOCK_PACKET)
@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
synchronize_net();
err = -EBUSY;
mutex_lock(&po->pg_vec_lock);
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
if (atomic_read(&po->mapped))
printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);
spin_lock(&po->bind_lock);
if (was_running && !po->running) {
@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
size = vma->vm_end - vma->vm_start;
lock_sock(sk);
mutex_lock(&po->pg_vec_lock);
if (po->pg_vec == NULL)
goto out;
if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
err = 0;
out:
release_sock(sk);
mutex_unlock(&po->pg_vec_lock);
return err;
}
#endif