Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-12-05

This series contains updates to ixgbe and ixgbevf.

Alex provides a couple of patches to cleanup ixgbe.  First cleans up the
page reuse code getting it into a state where all the workarounds needed
are in place as well as cleaning up a few minor oversights such as using
__free_pages instead of put_page to drop a locally allocated page.  Then
cleans up the tail writes for the ixgbe descriptor queues.

Mark Peterson adds support to lookup MAC addresses in Open Firmware or
IDPROM.

Emil provides patches for ixgbe and ixgbevf to fix an issue on rmmod and
to add support for X550 in the VF driver.  First removes the read/write
operations to the CIAA/D registers since it can block access to the PCI
config space and make use of standard kernel functions for accessing the
PCI config space.  Then fixes an issue where the driver has logic to free
up used data in case any of the checks in ixgbe_probe() fail, however
there is a similar set of cleanups that can occur on driver unload in
ixgbe_remove() which can cause the rmmod command to crash.

Don provides the remaining patches in the series to complete the addition
of X550 support into the ixgbe driver.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-12-05 20:50:54 -08:00
commit ddd5c50f9b
18 changed files with 2407 additions and 326 deletions

View file

@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o

View file

@ -300,16 +300,17 @@ enum ixgbe_ring_f_enum {
RING_F_ARRAY_SIZE /* must be last in enum set */
};
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 31
#define IXGBE_MAX_DCBMACVLANS 8
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_RSS_INDICES_X550 64
#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 31
#define IXGBE_MAX_DCBMACVLANS 8
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
@ -553,11 +554,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
{
writel(value, ring->tail);
}
#define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC(R, i) \
@ -769,6 +765,21 @@ struct ixgbe_adapter {
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
{
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
return IXGBE_MAX_RSS_INDICES;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
}
}
struct ixgbe_fdir_filter {
struct hlist_node fdir_node;
union ixgbe_atr_input filter;
@ -804,11 +815,15 @@ enum ixgbe_boards {
board_82598,
board_82599,
board_X540,
board_X550,
board_X550EM_x,
};
extern struct ixgbe_info ixgbe_82598_info;
extern struct ixgbe_info ixgbe_82599_info;
extern struct ixgbe_info ixgbe_X540_info;
extern struct ixgbe_info ixgbe_X550_info;
extern struct ixgbe_info ixgbe_X550EM_x_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
#endif

View file

@ -1625,7 +1625,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
* ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@ -1636,7 +1636,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
/* Include 0x0-0x3F in the checksum */
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
if (hw->eeprom.ops.read(hw, i, &word) != 0) {
if (hw->eeprom.ops.read(hw, i, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
@ -1645,24 +1645,35 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
/* Include all data from pointers except for the fw pointer */
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
hw->eeprom.ops.read(hw, i, &pointer);
if (hw->eeprom.ops.read(hw, i, &pointer)) {
hw_dbg(hw, "EEPROM read failed\n");
return IXGBE_ERR_EEPROM;
}
/* Make sure the pointer seems valid */
if (pointer != 0xFFFF && pointer != 0) {
hw->eeprom.ops.read(hw, pointer, &length);
/* If the pointer seems invalid */
if (pointer == 0xFFFF || pointer == 0)
continue;
if (length != 0xFFFF && length != 0) {
for (j = pointer+1; j <= pointer+length; j++) {
hw->eeprom.ops.read(hw, j, &word);
checksum += word;
}
if (hw->eeprom.ops.read(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
return IXGBE_ERR_EEPROM;
}
if (length == 0xFFFF || length == 0)
continue;
for (j = pointer + 1; j <= pointer + length; j++) {
if (hw->eeprom.ops.read(hw, j, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
return IXGBE_ERR_EEPROM;
}
checksum += word;
}
}
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
return checksum;
return (s32)checksum;
}
/**
@ -1686,26 +1697,33 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status == 0) {
checksum = hw->eeprom.ops.calc_checksum(hw);
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
/*
* Verify read checksum from EEPROM is the same as
* calculated checksum
*/
if (read_checksum != checksum)
status = IXGBE_ERR_EEPROM_CHECKSUM;
/* If the user cares, return the calculated checksum */
if (checksum_val)
*checksum_val = checksum;
} else {
if (status) {
hw_dbg(hw, "EEPROM read failed\n");
return status;
}
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
return status;
checksum = (u16)(status & 0xffff);
status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
if (status) {
hw_dbg(hw, "EEPROM read failed\n");
return status;
}
/* Verify read checksum from EEPROM is the same as
* calculated checksum
*/
if (read_checksum != checksum)
status = IXGBE_ERR_EEPROM_CHECKSUM;
/* If the user cares, return the calculated checksum */
if (checksum_val)
*checksum_val = checksum;
return status;
}
@ -1724,15 +1742,19 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status == 0) {
checksum = hw->eeprom.ops.calc_checksum(hw);
status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
checksum);
} else {
if (status) {
hw_dbg(hw, "EEPROM read failed\n");
return status;
}
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
return status;
checksum = (u16)(status & 0xffff);
status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
return status;
}
@ -2469,7 +2491,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr = 0;
u32 swmask = mask;
@ -2514,7 +2536,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
* Releases the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr;
u32 swmask = mask;
@ -3446,23 +3468,34 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* @buffer: contains the command to write and where the return status will
* be placed
* @length: length of buffer, must be multiple of 4 bytes
* @timeout: time in ms to wait for command completion
* @return_data: read and return data from the buffer (true) or not (false)
* Needed because FW structures are big endian and decoding of
* these fields can be 8 bit or 16 bit based on command. Decoding
* is not easily understood without making a table of commands.
* So we will leave this up to the caller to read back the data
* in these cases.
*
* Communicates with the manageability block. On success return 0
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length)
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout,
bool return_data)
{
u32 hicr, i, bi;
u32 hicr, i, bi, fwsts;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
u8 buf_len, dword_len;
u16 buf_len, dword_len;
if (length == 0 || length & 0x3 ||
length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure.\n");
if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
/* Set bit 9 of FWSTS clearing FW reset indication */
fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
/* Check that the host interface is enabled. */
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if ((hicr & IXGBE_HICR_EN) == 0) {
@ -3470,7 +3503,12 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
/* Calculate length in DWORDs */
/* Calculate length in DWORDs. We must be DWORD aligned */
if ((length % (sizeof(u32))) != 0) {
hw_dbg(hw, "Buffer length failure, not aligned to dword");
return IXGBE_ERR_INVALID_ARGUMENT;
}
dword_len = length >> 2;
/*
@ -3484,7 +3522,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
for (i = 0; i < timeout; i++) {
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_C))
break;
@ -3492,12 +3530,15 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
}
/* Check command successful completion. */
if (i == IXGBE_HI_COMMAND_TIMEOUT ||
if ((timeout != 0 && i == timeout) ||
(!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
hw_dbg(hw, "Command has failed with no status valid.\n");
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
if (!return_data)
return 0;
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
@ -3568,7 +3609,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
sizeof(fw_cmd));
sizeof(fw_cmd),
IXGBE_HI_COMMAND_TIMEOUT,
true);
if (ret_val != 0)
continue;

View file

@ -64,7 +64,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
@ -84,8 +84,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
@ -110,6 +110,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);

View file

@ -2927,7 +2927,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
max_combined = IXGBE_MAX_FDIR_INDICES;
} else {
/* support up to 16 queues with RSS */
max_combined = IXGBE_MAX_RSS_INDICES;
max_combined = ixgbe_max_rss_indices(adapter);
}
return max_combined;
@ -2975,6 +2975,7 @@ static int ixgbe_set_channels(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
unsigned int count = ch->combined_count;
u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
@ -2991,9 +2992,9 @@ static int ixgbe_set_channels(struct net_device *dev,
/* update feature limits from largest to smallest supported values */
adapter->ring_feature[RING_F_FDIR].limit = count;
/* cap RSS limit at 16 */
if (count > IXGBE_MAX_RSS_INDICES)
count = IXGBE_MAX_RSS_INDICES;
/* cap RSS limit */
if (count > max_rss_indices)
count = max_rss_indices;
adapter->ring_feature[RING_F_RSS].limit = count;
#ifdef IXGBE_FCOE

View file

@ -42,6 +42,7 @@
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
@ -50,6 +51,15 @@
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
#ifdef CONFIG_OF
#include <linux/of_net.h>
#endif
#ifdef CONFIG_SPARC
#include <asm/idprom.h>
#include <asm/prom.h>
#endif
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
@ -65,15 +75,17 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
#define DRV_VERSION "3.19.1-k"
#define DRV_VERSION "4.0.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2014 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
[board_82599] = &ixgbe_82599_info,
[board_X540] = &ixgbe_X540_info,
[board_82598] = &ixgbe_82598_info,
[board_82599] = &ixgbe_82599_info,
[board_X540] = &ixgbe_X540_info,
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@ -115,6 +127,9 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
/* required last entry */
{0, }
};
@ -1416,40 +1431,21 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = val;
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
ixgbe_write_tail(rx_ring, val);
}
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
struct page *page = bi->page;
dma_addr_t dma = bi->dma;
dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
if (likely(dma))
if (likely(page))
return true;
/* alloc new page for storage */
if (likely(!page)) {
page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
bi->page = page;
page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
/* map page for use */
@ -1462,13 +1458,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, ixgbe_rx_pg_order(rx_ring));
bi->page = NULL;
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
return true;
@ -1512,16 +1508,28 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
/* clear the hdr_addr for the next_to_use descriptor */
rx_desc->read.hdr_addr = 0;
/* clear the status bits for the next_to_use descriptor */
rx_desc->wb.upper.status_error = 0;
cleaned_count--;
} while (cleaned_count);
i += rx_ring->count;
if (rx_ring->next_to_use != i)
ixgbe_release_rx_desc(rx_ring, i);
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(i, rx_ring->tail);
}
}
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
@ -1798,9 +1806,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
new_buff->page = old_buff->page;
new_buff->dma = old_buff->dma;
new_buff->page_offset = old_buff->page_offset;
*new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
@ -1809,6 +1815,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
DMA_FROM_DEVICE);
}
static inline bool ixgbe_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
}
/**
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@ -1844,12 +1855,12 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* we can reuse buffer as-is, just make sure it is local */
if (likely(page_to_nid(page) == numa_node_id()))
/* page is not reserved, we can reuse buffer as-is */
if (likely(!ixgbe_page_is_reserved(page)))
return true;
/* this page cannot be reused so discard it */
put_page(page);
__free_pages(page, ixgbe_rx_pg_order(rx_ring));
return false;
}
@ -1857,7 +1868,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
rx_buffer->page_offset, size, truesize);
/* avoid re-using remote pages */
if (unlikely(page_to_nid(page) != numa_node_id()))
if (unlikely(ixgbe_page_is_reserved(page)))
return false;
#if (PAGE_SIZE < 8192)
@ -1867,22 +1878,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
/* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
atomic_inc(&page->_count);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > last_offset)
return false;
/* bump ref count on page before it is given to the stack */
get_page(page);
#endif
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
atomic_inc(&page->_count);
return true;
}
@ -1945,6 +1953,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
rx_buffer->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
rx_buffer->skb = NULL;
}
/* pull page into skb */
@ -1962,8 +1972,6 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
}
/* clear contents of buffer_info */
rx_buffer->skb = NULL;
rx_buffer->dma = 0;
rx_buffer->page = NULL;
return skb;
@ -3214,7 +3222,9 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed)
struct ixgbe_hw *hw = &adapter->hw;
u32 reta = 0;
int i, j;
int reta_entries = 128;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
int indices_multi;
/*
* Program table for at least 2 queues w/ SR-IOV so that VFs can
@ -3228,22 +3238,67 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed)
for (i = 0; i < 10; i++)
IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
/* Fill out the redirection table as follows:
* 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices
* 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index
* X550: 512 (8 bit wide) entries containing 6 bit RSS index
*/
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
indices_multi = 0x11;
else
indices_multi = 0x1;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
reta_entries = 512;
default:
break;
}
/* Fill out redirection table */
for (i = 0, j = 0; i < 128; i++, j++) {
for (i = 0, j = 0; i < reta_entries; i++, j++) {
if (j == rss_i)
j = 0;
/* reta = 4-byte sliding window of
* 0x00..(indices-1)(indices-1)00..etc. */
reta = (reta << 8) | (j * 0x11);
reta = (reta << 8) | (j * indices_multi);
if ((i & 3) == 3) {
if (i < 128)
IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
else
IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
reta);
}
}
}
static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vfreta = 0;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
unsigned int pf_pool = adapter->num_vfs;
int i, j;
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]);
/* Fill out the redirection table */
for (i = 0, j = 0; i < 64; i++, j++) {
if (j == rss_i)
j = 0;
vfreta = (vfreta << 8) | j;
if ((i & 3) == 3)
IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
vfreta);
}
}
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 mrqc = 0, rss_field = 0;
u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
u32 rss_key[10];
u32 rxcsum;
@ -3289,9 +3344,24 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
netdev_rss_key_fill(rss_key, sizeof(rss_key));
ixgbe_setup_reta(adapter, rss_key);
mrqc |= rss_field;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
if ((hw->mac.type >= ixgbe_mac_X550) &&
(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
unsigned int pf_pool = adapter->num_vfs;
/* Enable VF RSS mode */
mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
/* Setup RSS through the VF registers */
ixgbe_setup_vfreta(adapter, rss_key);
vfmrqc = IXGBE_MRQC_RSSEN;
vfmrqc |= rss_field;
IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
} else {
ixgbe_setup_reta(adapter, rss_key);
mrqc |= rss_field;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
}
/**
@ -4344,29 +4414,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ixgbe_rx_buffer *rx_buffer;
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
rx_buffer = &rx_ring->rx_buffer_info[i];
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
if (IXGBE_CB(skb)->page_released) {
if (IXGBE_CB(skb)->page_released)
dma_unmap_page(dev,
IXGBE_CB(skb)->dma,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
IXGBE_CB(skb)->page_released = false;
}
dev_kfree_skb(skb);
rx_buffer->skb = NULL;
}
if (rx_buffer->dma)
dma_unmap_page(dev, rx_buffer->dma,
ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE);
rx_buffer->dma = 0;
if (rx_buffer->page)
__free_pages(rx_buffer->page,
ixgbe_rx_pg_order(rx_ring));
if (!rx_buffer->page)
continue;
dma_unmap_page(dev, rx_buffer->dma,
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
__free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
rx_buffer->page = NULL;
}
@ -5056,7 +5123,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_device_id = pdev->subsystem_device;
/* Set common capability flags and settings */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@ -6318,6 +6385,66 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
}
}
#ifdef CONFIG_PCI_IOV
static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
struct pci_dev *vfdev)
{
if (!pci_wait_for_pending_transaction(vfdev))
e_dev_warn("Issuing VFLR with pending transactions\n");
e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
msleep(100);
}
static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
struct pci_dev *vfdev;
u32 gpc;
int pos;
unsigned short vf_id;
if (!(netif_carrier_ok(adapter->netdev)))
return;
gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
if (gpc) /* If incrementing then no need for the check below */
return;
/* Check to see if a bad DMA write target from an errant or
* malicious VF has caused a PCIe error. If so then we can
* issue a VFLR to the offending VF(s) and then resume without
* requesting a full slot reset.
*/
if (!pdev)
return;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
if (!pos)
return;
/* get the device ID for the VF */
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
/* check status reg for all VFs owned by this PF */
vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
while (vfdev) {
if (vfdev->is_virtfn && (vfdev->physfn == pdev)) {
u16 status_reg;
pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
if (status_reg & PCI_STATUS_REC_MASTER_ABORT)
/* issue VFLR */
ixgbe_issue_vf_flr(adapter, vfdev);
}
vfdev = pci_get_device(pdev->vendor, vf_id, vfdev);
}
}
static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
{
u32 ssvpc;
@ -6338,6 +6465,17 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
}
#else
static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
{
}
static void
ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
{
}
#endif /* CONFIG_PCI_IOV */
/**
* ixgbe_watchdog_subtask - check and bring link up
@ -6358,6 +6496,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
else
ixgbe_watchdog_link_is_down(adapter);
ixgbe_check_for_bad_vf(adapter);
ixgbe_spoof_check(adapter);
ixgbe_update_stats(adapter);
@ -6469,51 +6608,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
}
#ifdef CONFIG_PCI_IOV
static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
{
int vf;
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
u32 gpc;
u32 ciaa, ciad;
gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
if (gpc) /* If incrementing then no need for the check below */
return;
/*
* Check to see if a bad DMA write target from an errant or
* malicious VF has caused a PCIe error. If so then we can
* issue a VFLR to the offending VF(s) and then resume without
* requesting a full slot reset.
*/
for (vf = 0; vf < adapter->num_vfs; vf++) {
ciaa = (vf << 16) | 0x80000000;
/* 32 bit read so align, we really want status at offset 6 */
ciaa |= PCI_COMMAND;
IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_BY_MAC(hw));
ciaa &= 0x7FFFFFFF;
/* disable debug mode asap after reading data */
IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
/* Get the upper 16 bits which will be the PCI status reg */
ciad >>= 16;
if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
netdev_err(netdev, "VF %d Hung DMA\n", vf);
/* Issue VFLR */
ciaa = (vf << 16) | 0x80000000;
ciaa |= 0xA8;
IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
ciad = 0x00008000; /* VFLR */
IXGBE_WRITE_REG(hw, IXGBE_CIAD_BY_MAC(hw), ciad);
ciaa &= 0x7FFFFFFF;
IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
}
}
}
#endif
/**
* ixgbe_service_timer - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
@ -6522,7 +6616,6 @@ static void ixgbe_service_timer(unsigned long data)
{
struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
unsigned long next_event_offset;
bool ready = true;
/* poll faster when waiting for link */
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
@ -6530,32 +6623,10 @@ static void ixgbe_service_timer(unsigned long data)
else
next_event_offset = HZ * 2;
#ifdef CONFIG_PCI_IOV
/*
* don't bother with SR-IOV VF DMA hang check if there are
* no VFs or the link is down
*/
if (!adapter->num_vfs ||
(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
goto normal_timer_service;
/* If we have VFs allocated then we must check for DMA hangs */
ixgbe_check_for_bad_vf(adapter);
next_event_offset = HZ / 50;
adapter->timer_event_accumulator++;
if (adapter->timer_event_accumulator >= 100)
adapter->timer_event_accumulator = 0;
else
ready = false;
normal_timer_service:
#endif
/* Reset the timer */
mod_timer(&adapter->service_timer, next_event_offset + jiffies);
if (ready)
ixgbe_service_event_schedule(adapter);
ixgbe_service_event_schedule(adapter);
}
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
@ -6960,8 +7031,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
/* notify HW of packet */
ixgbe_write_tail(tx_ring, i);
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
}
return;
@ -8026,6 +8101,29 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
return is_wol_supported;
}
/**
* ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
* @adapter: Pointer to adapter struct
*/
static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
{
#ifdef CONFIG_OF
struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
struct ixgbe_hw *hw = &adapter->hw;
const unsigned char *addr;
addr = of_get_mac_address(dp);
if (addr) {
ether_addr_copy(hw->mac.perm_addr, addr);
return;
}
#endif /* CONFIG_OF */
#ifdef CONFIG_SPARC
ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
#endif /* CONFIG_SPARC */
}
/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
@ -8108,7 +8206,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter = netdev_priv(netdev);
pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
adapter->pdev = pdev;
@ -8295,6 +8392,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_sw_init;
}
ixgbe_get_platform_mac_addr(adapter);
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
@ -8386,6 +8485,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_register;
pci_set_drvdata(pdev, adapter);
/* power down the optics for 82599 SFP+ fiber */
if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
@ -8465,9 +8566,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static void ixgbe_remove(struct pci_dev *pdev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
struct net_device *netdev;
bool disable_dev;
/* if !adapter then we already cleaned up in probe */
if (!adapter)
return;
netdev = adapter->netdev;
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
@ -8614,8 +8720,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
* VFLR. Just clean up the AER in that case.
*/
if (vfdev) {
e_dev_err("Issuing VFLR to VF %d\n", vf);
pci_write_config_dword(vfdev, 0xA8, 0x00008000);
ixgbe_issue_vf_flr(adapter, vfdev);
/* Free device reference count */
pci_dev_put(vfdev);
}

View file

@ -49,6 +49,188 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
/**
* ixgbe_out_i2c_byte_ack - Send I2C byte with ack
* @hw: pointer to the hardware structure
* @byte: byte to send
*
* Returns an error code on error.
**/
static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
{
s32 status;
status = ixgbe_clock_out_i2c_byte(hw, byte);
if (status)
return status;
return ixgbe_get_i2c_ack(hw);
}
/**
* ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
* @hw: pointer to the hardware structure
* @byte: pointer to a u8 to receive the byte
*
* Returns an error code on error.
**/
static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
{
s32 status;
status = ixgbe_clock_in_i2c_byte(hw, byte);
if (status)
return status;
/* ACK */
return ixgbe_clock_out_i2c_bit(hw, false);
}
/**
* ixgbe_ones_comp_byte_add - Perform one's complement addition
* @add1: addend 1
* @add2: addend 2
*
* Returns one's complement 8-bit sum.
**/
static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
{
u16 sum = add1 + add2;
sum = (sum & 0xFF) + (sum >> 8);
return sum & 0xFF;
}
/**
* ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to read from
* @reg: I2C device register to read from
* @val: pointer to location to receive read value
*
* Returns an error code on error.
**/
s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
int max_retry = 10;
int retry = 0;
u8 csum_byte;
u8 high_bits;
u8 low_bits;
u8 reg_high;
u8 csum;
reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
csum = ~csum;
do {
if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
goto fail;
/* Write bits 14:8 */
if (ixgbe_out_i2c_byte_ack(hw, reg_high))
goto fail;
/* Write bits 7:0 */
if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
goto fail;
/* Write csum */
if (ixgbe_out_i2c_byte_ack(hw, csum))
goto fail;
/* Re-start condition */
ixgbe_i2c_start(hw);
/* Device Address and read indication */
if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
goto fail;
/* Get upper bits */
if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
goto fail;
/* Get low bits */
if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
goto fail;
/* Get csum */
if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
goto fail;
/* NACK */
if (ixgbe_clock_out_i2c_bit(hw, false))
goto fail;
ixgbe_i2c_stop(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
*val = (high_bits << 8) | low_bits;
return 0;
fail:
ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read combined error - Retry.\n");
else
hw_dbg(hw, "I2C byte read combined error.\n");
} while (retry < max_retry);
return IXGBE_ERR_I2C;
}
/**
* ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to write to
* @reg: I2C device register to write to
* @val: value to write
*
* Returns an error code on error.
**/
s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
int max_retry = 1;
int retry = 0;
u8 reg_high;
u8 csum;
reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */
csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
csum = ~csum;
do {
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
goto fail;
/* Write bits 14:8 */
if (ixgbe_out_i2c_byte_ack(hw, reg_high))
goto fail;
/* Write bits 7:0 */
if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
goto fail;
/* Write data 15:8 */
if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
goto fail;
/* Write data 7:0 */
if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
goto fail;
/* Write csum */
if (ixgbe_out_i2c_byte_ack(hw, csum))
goto fail;
ixgbe_i2c_stop(hw);
return 0;
fail:
ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte write combined error - Retry.\n");
else
hw_dbg(hw, "I2C byte write combined error.\n");
} while (retry < max_retry);
return IXGBE_ERR_I2C;
}
/**
* ixgbe_identify_phy_generic - Get physical layer module
* @hw: pointer to hardware structure
@ -60,6 +242,15 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
u32 phy_addr;
u16 ext_ability = 0;
if (!hw->phy.phy_semaphore_mask) {
hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
IXGBE_STATUS_LAN_ID_1;
if (hw->phy.lan_id)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
else
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
}
if (hw->phy.type == ixgbe_phy_unknown) {
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
hw->phy.mdio.prtad = phy_addr;
@ -315,12 +506,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
s32 status;
u16 gssr;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
gssr = IXGBE_GSSR_PHY1_SM;
else
gssr = IXGBE_GSSR_PHY0_SM;
u32 gssr = hw->phy.phy_semaphore_mask;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
@ -418,7 +604,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
s32 status;
u16 gssr;
u32 gssr;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
gssr = IXGBE_GSSR_PHY1_SM;
@ -1469,15 +1655,10 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
s32 status;
u32 max_retry = 10;
u32 retry = 0;
u16 swfw_mask = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
bool nack = true;
*data = 0;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
swfw_mask = IXGBE_GSSR_PHY1_SM;
else
swfw_mask = IXGBE_GSSR_PHY0_SM;
do {
if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
@ -1555,12 +1736,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
s32 status;
u32 max_retry = 1;
u32 retry = 0;
u16 swfw_mask = 0;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
swfw_mask = IXGBE_GSSR_PHY1_SM;
else
swfw_mask = IXGBE_GSSR_PHY0_SM;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;

View file

@ -77,6 +77,11 @@
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
#define IXGBE_CS4227 0xBE /* CS4227 address */
#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */
#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
#define IXGBE_CS4227_EDC_MODE_SR 0x0004
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
#define IXGBE_TAF_ASM_PAUSE 0x800
@ -110,7 +115,6 @@
/* SFP+ SFF-8472 Compliance code */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@ -157,4 +161,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val);
s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val);
#endif /* _IXGBE_PHY_H_ */

View file

@ -221,7 +221,8 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
rss = min_t(int, ixgbe_max_rss_indices(adapter),
num_online_cpus());
} else {
rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
}

View file

@ -74,6 +74,17 @@
#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X550T 0x1563
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
@ -297,6 +308,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_IMIRVP 0x05AC0
#define IXGBE_VMD_CTL 0x0581C
#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */
#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
/* Registers for setting up RSS on X550 with SRIOV
@ -740,6 +752,24 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_LDPCECL 0x0E820
#define IXGBE_LDPCECH 0x0E821
/* MII clause 22/28 definitions */
#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */
#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */
#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
/* Management */
#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
@ -1141,6 +1171,13 @@ struct ixgbe_thermal_sensor_data {
/* MDIO definitions */
#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
#define IXGBE_TWINAX_DEV 1
#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
@ -1150,9 +1187,23 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */
#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */
#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */
/* MII clause 22/28 definitions */
#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
@ -1696,12 +1747,14 @@ enum {
#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
/* SW_FW_SYNC/GSSR definitions */
#define IXGBE_GSSR_EEP_SM 0x0001
#define IXGBE_GSSR_PHY0_SM 0x0002
#define IXGBE_GSSR_PHY1_SM 0x0004
#define IXGBE_GSSR_MAC_CSR_SM 0x0008
#define IXGBE_GSSR_FLASH_SM 0x0010
#define IXGBE_GSSR_SW_MNG_SM 0x0400
#define IXGBE_GSSR_EEP_SM 0x0001
#define IXGBE_GSSR_PHY0_SM 0x0002
#define IXGBE_GSSR_PHY1_SM 0x0004
#define IXGBE_GSSR_MAC_CSR_SM 0x0008
#define IXGBE_GSSR_FLASH_SM 0x0010
#define IXGBE_GSSR_SW_MNG_SM 0x0400
#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
#define IXGBE_GSSR_I2C_MASK 0x1800
/* FW Status register bitmask */
#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
@ -1735,27 +1788,32 @@ enum {
#define IXGBE_PBANUM_LENGTH 11
/* Checksum and EEPROM pointers */
#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
#define IXGBE_PHY_PTR 0x04
#define IXGBE_ATLAS1_CONFIG_PTR 0x05
#define IXGBE_OPTION_ROM_PTR 0x05
#define IXGBE_PCIE_GENERAL_PTR 0x06
#define IXGBE_PCIE_CONFIG0_PTR 0x07
#define IXGBE_PCIE_CONFIG1_PTR 0x08
#define IXGBE_CORE0_PTR 0x09
#define IXGBE_CORE1_PTR 0x0A
#define IXGBE_MAC0_PTR 0x0B
#define IXGBE_MAC1_PTR 0x0C
#define IXGBE_CSR0_CONFIG_PTR 0x0D
#define IXGBE_CSR1_CONFIG_PTR 0x0E
#define IXGBE_FW_PTR 0x0F
#define IXGBE_PBANUM0_PTR 0x15
#define IXGBE_PBANUM1_PTR 0x16
#define IXGBE_FREE_SPACE_PTR 0X3E
#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
#define IXGBE_PHY_PTR 0x04
#define IXGBE_ATLAS1_CONFIG_PTR 0x05
#define IXGBE_OPTION_ROM_PTR 0x05
#define IXGBE_PCIE_GENERAL_PTR 0x06
#define IXGBE_PCIE_CONFIG0_PTR 0x07
#define IXGBE_PCIE_CONFIG1_PTR 0x08
#define IXGBE_CORE0_PTR 0x09
#define IXGBE_CORE1_PTR 0x0A
#define IXGBE_MAC0_PTR 0x0B
#define IXGBE_MAC1_PTR 0x0C
#define IXGBE_CSR0_CONFIG_PTR 0x0D
#define IXGBE_CSR1_CONFIG_PTR 0x0E
#define IXGBE_PCIE_ANALOG_PTR_X550 0x02
#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000
#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24
#define IXGBE_PCIE_CONFIG_SIZE 0x08
#define IXGBE_EEPROM_LAST_WORD 0x41
#define IXGBE_FW_PTR 0x0F
#define IXGBE_PBANUM0_PTR 0x15
#define IXGBE_PBANUM1_PTR 0x16
#define IXGBE_FREE_SPACE_PTR 0X3E
/* External Thermal Sensor Config */
#define IXGBE_ETS_CFG 0x26
@ -2016,6 +2074,7 @@ enum {
#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000
#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
#define IXGBE_FWSM_TS_ENABLED 0x1
@ -2312,18 +2371,32 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIR_DROP_QUEUE 127
/* Manageablility Host Interface defines */
#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */
#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */
#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */
/* CEM Support */
#define FW_CEM_HDR_LEN 0x4
#define FW_CEM_CMD_DRIVER_INFO 0xDD
#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
#define FW_CEM_CMD_RESERVED 0x0
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
#define FW_CEM_HDR_LEN 0x4
#define FW_CEM_CMD_DRIVER_INFO 0xDD
#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
#define FW_CEM_CMD_RESERVED 0x0
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
#define FW_WRITE_SHADOW_RAM_CMD 0x33
#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */
#define FW_SHADOW_RAM_DUMP_CMD 0x36
#define FW_SHADOW_RAM_DUMP_LEN 0
#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
#define FW_NVM_DATA_OFFSET 3
#define FW_MAX_READ_BUFFER_SIZE 1024
#define FW_DISABLE_RXEN_CMD 0xDE
#define FW_DISABLE_RXEN_LEN 0x1
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@ -2336,6 +2409,25 @@ struct ixgbe_hic_hdr {
u8 checksum;
};
struct ixgbe_hic_hdr2_req {
u8 cmd;
u8 buf_lenh;
u8 buf_lenl;
u8 checksum;
};
struct ixgbe_hic_hdr2_rsp {
u8 cmd;
u8 buf_lenl;
u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
u8 checksum;
};
union ixgbe_hic_hdr2 {
struct ixgbe_hic_hdr2_req req;
struct ixgbe_hic_hdr2_rsp rsp;
};
struct ixgbe_hic_drv_info {
struct ixgbe_hic_hdr hdr;
u8 port_num;
@ -2347,6 +2439,32 @@ struct ixgbe_hic_drv_info {
u16 pad2; /* end spacing to ensure length is mult. of dword2 */
};
/* These need to be dword aligned */
struct ixgbe_hic_read_shadow_ram {
union ixgbe_hic_hdr2 hdr;
u32 address;
u16 length;
u16 pad2;
u16 data;
u16 pad3;
};
struct ixgbe_hic_write_shadow_ram {
union ixgbe_hic_hdr2 hdr;
u32 address;
u16 length;
u16 pad2;
u16 data;
u16 pad3;
};
struct ixgbe_hic_disable_rxen {
struct ixgbe_hic_hdr hdr;
u8 port_number;
u8 pad2;
u16 pad3;
};
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@ -2623,6 +2741,9 @@ enum ixgbe_phy_type {
ixgbe_phy_none,
ixgbe_phy_tn,
ixgbe_phy_aq,
ixgbe_phy_x550em_kr,
ixgbe_phy_x550em_kx4,
ixgbe_phy_x550em_ext_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@ -2866,7 +2987,7 @@ struct ixgbe_eeprom_operations {
s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
u16 (*calc_checksum)(struct ixgbe_hw *);
s32 (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
@ -2888,8 +3009,8 @@ struct ixgbe_mac_operations {
s32 (*disable_rx_buff)(struct ixgbe_hw *);
s32 (*enable_rx_buff)(struct ixgbe_hw *);
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
@ -2935,6 +3056,11 @@ struct ixgbe_mac_operations {
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
/* DMA Coalescing */
s32 (*dmac_config)(struct ixgbe_hw *hw);
s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
};
struct ixgbe_phy_operations {
@ -2947,6 +3073,7 @@ struct ixgbe_phy_operations {
s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
s32 (*setup_internal_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
@ -2955,6 +3082,8 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
};
@ -3007,6 +3136,8 @@ struct ixgbe_phy_info {
bool sfp_setup_needed;
u32 revision;
enum ixgbe_media_type media_type;
u8 lan_id;
u32 phy_semaphore_mask;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
enum ixgbe_smart_speed smart_speed;
@ -3113,4 +3244,71 @@ struct ixgbe_info {
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
#define IXGBE_KX4_LINK_CNTL_1 0x4C
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0
#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF
#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18
#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \
(0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20
#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \
(0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
#endif /* _IXGBE_TYPE_H_ */

View file

@ -32,6 +32,7 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
#include "ixgbe_x540.h"
#define IXGBE_X540_MAX_TX_QUEUES 128
#define IXGBE_X540_MAX_RX_QUEUES 128
@ -42,17 +43,15 @@
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
{
return ixgbe_media_type_copper;
}
static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@ -76,9 +75,8 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
@ -92,7 +90,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
s32 status;
u32 ctrl, i;
@ -179,7 +177,7 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
s32 ret_val;
@ -197,7 +195,7 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
@ -316,7 +314,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
*
* @hw: pointer to hardware structure
**/
static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@ -324,6 +322,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
/*
* Do not use hw->eeprom.ops.read because we do not want to take
@ -332,10 +332,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
*/
/* Include 0x0-0x3F in the checksum */
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
for (i = 0; i < checksum_last_word; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
break;
return IXGBE_ERR_EEPROM;
}
checksum += word;
}
@ -344,11 +344,11 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
* Include all data from pointers 0x3, 0x6-0xE. This excludes the
* FW, PHY module, and PCIe Expansion/Option ROM pointers.
*/
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
continue;
if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
@ -358,8 +358,9 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
pointer >= hw->eeprom.word_size)
continue;
if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) {
if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
return IXGBE_ERR_EEPROM;
break;
}
@ -368,10 +369,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
(pointer + length) >= hw->eeprom.word_size)
continue;
for (j = pointer+1; j <= pointer+length; j++) {
if (ixgbe_read_eerd_generic(hw, j, &word) != 0) {
for (j = pointer + 1; j <= pointer + length; j++) {
if (ixgbe_read_eerd_generic(hw, j, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
break;
return IXGBE_ERR_EEPROM;
}
checksum += word;
}
@ -379,7 +380,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
return checksum;
return (s32)checksum;
}
/**
@ -410,23 +411,34 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return IXGBE_ERR_SWFW_SYNC;
checksum = hw->eeprom.ops.calc_checksum(hw);
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
goto out;
checksum = (u16)(status & 0xffff);
/* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores twice here.
*/
status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
&read_checksum);
if (status)
goto out;
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
/* Verify read checksum from EEPROM is the same as
* calculated checksum
*/
if (read_checksum != checksum) {
hw_dbg(hw, "Invalid EEPROM checksum");
status = IXGBE_ERR_EEPROM_CHECKSUM;
}
/* If the user cares, return the calculated checksum */
if (checksum_val)
*checksum_val = checksum;
/* Verify read and calculated checksums are the same */
if (read_checksum != checksum)
return IXGBE_ERR_EEPROM_CHECKSUM;
out:
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
@ -457,15 +469,22 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return IXGBE_ERR_SWFW_SYNC;
checksum = hw->eeprom.ops.calc_checksum(hw);
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
goto out;
checksum = (u16)(status & 0xffff);
/* Do not use hw->eeprom.ops.write because we do not want to
* take the synchronization semaphores twice here.
*/
status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
if (!status)
status = ixgbe_update_flash_X540(hw);
if (status)
goto out;
status = ixgbe_update_flash_X540(hw);
out:
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
@ -544,7 +563,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swfw_sync;
u32 swmask = mask;
@ -612,7 +631,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
* Releases the SWFW semaphore through the SW_FW_SYNC register
* for the specified function (CSR, PHY0, PHY1, EVM, Flash)
**/
static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swfw_sync;
u32 swmask = mask;
@ -699,7 +718,7 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
* Devices that implement the version 2 interface:
* X540
**/
static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
@ -735,7 +754,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
* Devices that implement the version 2 interface:
* X540
**/
static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;

View file

@ -0,0 +1,39 @@
/*******************************************************************************
*
* Intel 10 Gigabit PCI Express Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include "ixgbe_type.h"
s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);

File diff suppressed because it is too large Load diff

View file

@ -31,6 +31,8 @@
/* Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8

View file

@ -432,10 +432,14 @@ enum ixbgevf_state_t {
enum ixgbevf_boards {
board_82599_vf,
board_X540_vf,
board_X550_vf,
board_X550EM_x_vf,
};
extern const struct ixgbevf_info ixgbevf_82599_vf_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
/* needed by ethtool.c */

View file

@ -66,6 +66,8 @@ static char ixgbevf_copyright[] =
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
[board_X540_vf] = &ixgbevf_X540_vf_info,
[board_X550_vf] = &ixgbevf_X550_vf_info,
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@ -79,6 +81,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
/* required last entry */
{0, }
};
@ -3529,7 +3533,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
break;
default:
if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
break;
}
@ -3733,6 +3737,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_hw *hw = NULL;
const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
int err, pci_using_dac;
bool disable_dev = false;
err = pci_enable_device(pdev);
if (err)
@ -3767,7 +3772,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
@ -3856,16 +3860,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_register;
pci_set_drvdata(pdev, netdev);
netif_carrier_off(netdev);
ixgbevf_init_last_counter_stats(adapter);
/* print the MAC address */
hw_dbg(hw, "%pM\n", netdev->dev_addr);
/* print the VF info */
dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
switch (hw->mac.type) {
case ixgbe_mac_X550_vf:
dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
break;
case ixgbe_mac_X540_vf:
dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
break;
case ixgbe_mac_82599_vf:
default:
dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
break;
}
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
return 0;
err_register:
@ -3874,12 +3890,13 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgbevf_reset_interrupt_capability(adapter);
iounmap(adapter->io_addr);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
if (!adapter || disable_dev)
pci_disable_device(pdev);
return err;
}
@ -3896,7 +3913,13 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static void ixgbevf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_adapter *adapter;
bool disable_dev;
if (!netdev)
return;
adapter = netdev_priv(netdev);
set_bit(__IXGBEVF_REMOVING, &adapter->state);
@ -3916,9 +3939,10 @@ static void ixgbevf_remove(struct pci_dev *pdev)
hw_dbg(&adapter->hw, "Remove complete\n");
disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
free_netdev(netdev);
if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
if (disable_dev)
pci_disable_device(pdev);
}

View file

@ -617,3 +617,13 @@ const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};
const struct ixgbevf_info ixgbevf_X550_vf_info = {
.mac = ixgbe_mac_X550_vf,
.mac_ops = &ixgbevf_mac_ops,
};
const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
.mac = ixgbe_mac_X550EM_x_vf,
.mac_ops = &ixgbevf_mac_ops,
};

View file

@ -74,6 +74,8 @@ enum ixgbe_mac_type {
ixgbe_mac_unknown = 0,
ixgbe_mac_82599_vf,
ixgbe_mac_X540_vf,
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_num_macs
};