Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
Ben Hutchings says: ==================== More refactoring and cleanup, particularly around filter management. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
45cc3a0c97
17 changed files with 2119 additions and 1768 deletions
|
@ -1,5 +1,4 @@
|
|||
sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \
|
||||
filter.o \
|
||||
selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
|
||||
tenxpress.o txc43128_phy.o falcon_boards.o \
|
||||
mcdi.o mcdi_port.o mcdi_mon.o ptp.o
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <linux/ip.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/gfp.h>
|
||||
|
@ -339,6 +338,7 @@ static void efx_init_eventq(struct efx_channel *channel)
|
|||
channel->eventq_read_ptr = 0;
|
||||
|
||||
efx_nic_init_eventq(channel);
|
||||
channel->eventq_init = true;
|
||||
}
|
||||
|
||||
/* Enable event queue processing and NAPI */
|
||||
|
@ -367,10 +367,14 @@ static void efx_stop_eventq(struct efx_channel *channel)
|
|||
|
||||
static void efx_fini_eventq(struct efx_channel *channel)
|
||||
{
|
||||
if (!channel->eventq_init)
|
||||
return;
|
||||
|
||||
netif_dbg(channel->efx, drv, channel->efx->net_dev,
|
||||
"chan %d fini event queue\n", channel->channel);
|
||||
|
||||
efx_nic_fini_eventq(channel);
|
||||
channel->eventq_init = false;
|
||||
}
|
||||
|
||||
static void efx_remove_eventq(struct efx_channel *channel)
|
||||
|
@ -606,7 +610,7 @@ static void efx_start_datapath(struct efx_nic *efx)
|
|||
|
||||
/* RX filters also have scatter-enabled flags */
|
||||
if (efx->rx_scatter != old_rx_scatter)
|
||||
efx_filter_update_rx_scatter(efx);
|
||||
efx->type->filter_update_rx_scatter(efx);
|
||||
|
||||
/* We must keep at least one descriptor in a TX ring empty.
|
||||
* We could avoid this when the queue size does not exactly
|
||||
|
@ -871,10 +875,9 @@ void efx_link_status_changed(struct efx_nic *efx)
|
|||
/* Status message for kernel log */
|
||||
if (link_state->up)
|
||||
netif_info(efx, link, efx->net_dev,
|
||||
"link up at %uMbps %s-duplex (MTU %d)%s\n",
|
||||
"link up at %uMbps %s-duplex (MTU %d)\n",
|
||||
link_state->speed, link_state->fd ? "full" : "half",
|
||||
efx->net_dev->mtu,
|
||||
(efx->promiscuous ? " [PROMISC]" : ""));
|
||||
efx->net_dev->mtu);
|
||||
else
|
||||
netif_info(efx, link, efx->net_dev, "link down\n");
|
||||
}
|
||||
|
@ -923,10 +926,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
|
|||
|
||||
WARN_ON(!mutex_is_locked(&efx->mac_lock));
|
||||
|
||||
/* Serialise the promiscuous flag with efx_set_rx_mode. */
|
||||
netif_addr_lock_bh(efx->net_dev);
|
||||
netif_addr_unlock_bh(efx->net_dev);
|
||||
|
||||
/* Disable PHY transmit in mac level loopbacks */
|
||||
phy_mode = efx->phy_mode;
|
||||
if (LOOPBACK_INTERNAL(efx))
|
||||
|
@ -1084,6 +1083,7 @@ static int efx_init_io(struct efx_nic *efx)
|
|||
{
|
||||
struct pci_dev *pci_dev = efx->pci_dev;
|
||||
dma_addr_t dma_mask = efx->type->max_dma_mask;
|
||||
unsigned int mem_map_size = efx->type->mem_map_size(efx);
|
||||
int rc;
|
||||
|
||||
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
|
||||
|
@ -1136,20 +1136,18 @@ static int efx_init_io(struct efx_nic *efx)
|
|||
rc = -EIO;
|
||||
goto fail3;
|
||||
}
|
||||
efx->membase = ioremap_nocache(efx->membase_phys,
|
||||
efx->type->mem_map_size);
|
||||
efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
|
||||
if (!efx->membase) {
|
||||
netif_err(efx, probe, efx->net_dev,
|
||||
"could not map memory BAR at %llx+%x\n",
|
||||
(unsigned long long)efx->membase_phys,
|
||||
efx->type->mem_map_size);
|
||||
(unsigned long long)efx->membase_phys, mem_map_size);
|
||||
rc = -ENOMEM;
|
||||
goto fail4;
|
||||
}
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"memory BAR at %llx+%x (virtual %p)\n",
|
||||
(unsigned long long)efx->membase_phys,
|
||||
efx->type->mem_map_size, efx->membase);
|
||||
(unsigned long long)efx->membase_phys, mem_map_size,
|
||||
efx->membase);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1228,8 +1226,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
|
|||
*/
|
||||
static int efx_probe_interrupts(struct efx_nic *efx)
|
||||
{
|
||||
unsigned int max_channels =
|
||||
min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
|
||||
unsigned int extra_channels = 0;
|
||||
unsigned int i, j;
|
||||
int rc;
|
||||
|
@ -1246,7 +1242,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
|
|||
if (separate_tx_channels)
|
||||
n_channels *= 2;
|
||||
n_channels += extra_channels;
|
||||
n_channels = min(n_channels, max_channels);
|
||||
n_channels = min(n_channels, efx->max_channels);
|
||||
|
||||
for (i = 0; i < n_channels; i++)
|
||||
xentries[i].entry = i;
|
||||
|
@ -1497,6 +1493,44 @@ static void efx_remove_nic(struct efx_nic *efx)
|
|||
efx->type->remove(efx);
|
||||
}
|
||||
|
||||
static int efx_probe_filters(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
spin_lock_init(&efx->filter_lock);
|
||||
|
||||
rc = efx->type->filter_table_probe(efx);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (efx->type->offload_features & NETIF_F_NTUPLE) {
|
||||
efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
|
||||
sizeof(*efx->rps_flow_id),
|
||||
GFP_KERNEL);
|
||||
if (!efx->rps_flow_id) {
|
||||
efx->type->filter_table_remove(efx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void efx_remove_filters(struct efx_nic *efx)
|
||||
{
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
kfree(efx->rps_flow_id);
|
||||
#endif
|
||||
efx->type->filter_table_remove(efx);
|
||||
}
|
||||
|
||||
static void efx_restore_filters(struct efx_nic *efx)
|
||||
{
|
||||
efx->type->filter_table_restore(efx);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* NIC startup/shutdown
|
||||
|
@ -1987,30 +2021,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
|
|||
static void efx_set_rx_mode(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct netdev_hw_addr *ha;
|
||||
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
|
||||
u32 crc;
|
||||
int bit;
|
||||
|
||||
efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
|
||||
|
||||
/* Build multicast hash table */
|
||||
if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
|
||||
memset(mc_hash, 0xff, sizeof(*mc_hash));
|
||||
} else {
|
||||
memset(mc_hash, 0x00, sizeof(*mc_hash));
|
||||
netdev_for_each_mc_addr(ha, net_dev) {
|
||||
crc = ether_crc_le(ETH_ALEN, ha->addr);
|
||||
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
|
||||
__set_bit_le(bit, mc_hash);
|
||||
}
|
||||
|
||||
/* Broadcast packets go through the multicast hash filter.
|
||||
* ether_crc_le() of the broadcast address is 0xbe2612ff
|
||||
* so we always add bit 0xff to the mask.
|
||||
*/
|
||||
__set_bit_le(0xff, mc_hash);
|
||||
}
|
||||
|
||||
if (efx->port_enabled)
|
||||
queue_work(efx->workqueue, &efx->mac_work);
|
||||
|
@ -2489,8 +2499,6 @@ static int efx_init_struct(struct efx_nic *efx,
|
|||
efx->msi_context[i].index = i;
|
||||
}
|
||||
|
||||
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
|
||||
|
||||
/* Higher numbered interrupt modes are less capable! */
|
||||
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
|
||||
interrupt_mode);
|
||||
|
|
|
@ -68,27 +68,92 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
|
|||
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
|
||||
|
||||
/* Filters */
|
||||
extern int efx_probe_filters(struct efx_nic *efx);
|
||||
extern void efx_restore_filters(struct efx_nic *efx);
|
||||
extern void efx_remove_filters(struct efx_nic *efx);
|
||||
extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
|
||||
extern s32 efx_filter_insert_filter(struct efx_nic *efx,
|
||||
struct efx_filter_spec *spec,
|
||||
bool replace);
|
||||
extern int efx_filter_remove_id_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id);
|
||||
extern int efx_filter_get_filter_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id, struct efx_filter_spec *);
|
||||
extern void efx_filter_clear_rx(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
|
||||
extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 *buf, u32 size);
|
||||
|
||||
/**
|
||||
* efx_filter_insert_filter - add or replace a filter
|
||||
* @efx: NIC in which to insert the filter
|
||||
* @spec: Specification for the filter
|
||||
* @replace_equal: Flag for whether the specified filter may replace an
|
||||
* existing filter with equal priority
|
||||
*
|
||||
* On success, return the filter ID.
|
||||
* On failure, return a negative error code.
|
||||
*
|
||||
* If an existing filter has equal match values to the new filter
|
||||
* spec, then the new filter might replace it, depending on the
|
||||
* relative priorities. If the existing filter has lower priority, or
|
||||
* if @replace_equal is set and it has equal priority, then it is
|
||||
* replaced. Otherwise the function fails, returning -%EPERM if
|
||||
* the existing filter has higher priority or -%EEXIST if it has
|
||||
* equal priority.
|
||||
*/
|
||||
static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
|
||||
struct efx_filter_spec *spec,
|
||||
bool replace_equal)
|
||||
{
|
||||
return efx->type->filter_insert(efx, spec, replace_equal);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_remove_id_safe - remove a filter by ID, carefully
|
||||
* @efx: NIC from which to remove the filter
|
||||
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
|
||||
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
|
||||
*
|
||||
* This function will range-check @filter_id, so it is safe to call
|
||||
* with a value passed from userland.
|
||||
*/
|
||||
static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id)
|
||||
{
|
||||
return efx->type->filter_remove_safe(efx, priority, filter_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_get_filter_safe - retrieve a filter by ID, carefully
|
||||
* @efx: NIC from which to remove the filter
|
||||
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
|
||||
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
|
||||
* @spec: Buffer in which to store filter specification
|
||||
*
|
||||
* This function will range-check @filter_id, so it is safe to call
|
||||
* with a value passed from userland.
|
||||
*/
|
||||
static inline int
|
||||
efx_filter_get_filter_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id, struct efx_filter_spec *spec)
|
||||
{
|
||||
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_farch_filter_clear_rx - remove RX filters by priority
|
||||
* @efx: NIC from which to remove the filters
|
||||
* @priority: Maximum priority to remove
|
||||
*/
|
||||
static inline void efx_filter_clear_rx(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority)
|
||||
{
|
||||
return efx->type->filter_clear_rx(efx, priority);
|
||||
}
|
||||
|
||||
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority)
|
||||
{
|
||||
return efx->type->filter_count_rx_used(efx, priority);
|
||||
}
|
||||
static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
|
||||
{
|
||||
return efx->type->filter_get_rx_id_limit(efx);
|
||||
}
|
||||
static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 *buf, u32 size)
|
||||
{
|
||||
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
|
||||
}
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
||||
u16 rxq_index, u32 flow_id);
|
||||
|
|
|
@ -799,11 +799,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
|
|||
return efx_reset(efx, rc);
|
||||
}
|
||||
|
||||
/* MAC address mask including only MC flag */
|
||||
static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
|
||||
/* MAC address mask including only I/G bit */
|
||||
static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
|
||||
|
||||
#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
|
||||
#define PORT_FULL_MASK ((__force __be16)~0)
|
||||
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
|
||||
|
||||
static int efx_ethtool_get_class_rule(struct efx_nic *efx,
|
||||
struct ethtool_rx_flow_spec *rule)
|
||||
|
@ -813,8 +814,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
|
|||
struct ethhdr *mac_entry = &rule->h_u.ether_spec;
|
||||
struct ethhdr *mac_mask = &rule->m_u.ether_spec;
|
||||
struct efx_filter_spec spec;
|
||||
u16 vid;
|
||||
u8 proto;
|
||||
int rc;
|
||||
|
||||
rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
|
||||
|
@ -822,44 +821,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (spec.dmaq_id == 0xfff)
|
||||
if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
|
||||
rule->ring_cookie = RX_CLS_FLOW_DISC;
|
||||
else
|
||||
rule->ring_cookie = spec.dmaq_id;
|
||||
|
||||
if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) {
|
||||
rule->flow_type = ETHER_FLOW;
|
||||
memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN);
|
||||
if (spec.type == EFX_FILTER_MC_DEF)
|
||||
memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest);
|
||||
if (rc == 0) {
|
||||
rule->flow_type = ETHER_FLOW;
|
||||
memset(mac_mask->h_dest, ~0, ETH_ALEN);
|
||||
if (vid != EFX_FILTER_VID_UNSPEC) {
|
||||
rule->flow_type |= FLOW_EXT;
|
||||
rule->h_ext.vlan_tci = htons(vid);
|
||||
rule->m_ext.vlan_tci = htons(0xfff);
|
||||
if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
|
||||
spec.ether_type == htons(ETH_P_IP) &&
|
||||
(spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
|
||||
(spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
|
||||
!(spec.match_flags &
|
||||
~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
|
||||
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
|
||||
EFX_FILTER_MATCH_IP_PROTO |
|
||||
EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
|
||||
rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
|
||||
TCP_V4_FLOW : UDP_V4_FLOW);
|
||||
if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
|
||||
ip_entry->ip4dst = spec.loc_host[0];
|
||||
ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
|
||||
}
|
||||
return 0;
|
||||
if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
|
||||
ip_entry->ip4src = spec.rem_host[0];
|
||||
ip_mask->ip4src = IP4_ADDR_FULL_MASK;
|
||||
}
|
||||
if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
|
||||
ip_entry->pdst = spec.loc_port;
|
||||
ip_mask->pdst = PORT_FULL_MASK;
|
||||
}
|
||||
if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
|
||||
ip_entry->psrc = spec.rem_port;
|
||||
ip_mask->psrc = PORT_FULL_MASK;
|
||||
}
|
||||
} else if (!(spec.match_flags &
|
||||
~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
|
||||
EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
|
||||
EFX_FILTER_MATCH_OUTER_VID))) {
|
||||
rule->flow_type = ETHER_FLOW;
|
||||
if (spec.match_flags &
|
||||
(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
|
||||
memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
|
||||
if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
|
||||
memset(mac_mask->h_dest, ~0, ETH_ALEN);
|
||||
else
|
||||
memcpy(mac_mask->h_dest, mac_addr_ig_mask,
|
||||
ETH_ALEN);
|
||||
}
|
||||
if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
|
||||
memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
|
||||
memset(mac_mask->h_source, ~0, ETH_ALEN);
|
||||
}
|
||||
if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
|
||||
mac_entry->h_proto = spec.ether_type;
|
||||
mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
|
||||
}
|
||||
} else {
|
||||
/* The above should handle all filters that we insert */
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = efx_filter_get_ipv4_local(&spec, &proto,
|
||||
&ip_entry->ip4dst, &ip_entry->pdst);
|
||||
if (rc != 0) {
|
||||
rc = efx_filter_get_ipv4_full(
|
||||
&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
|
||||
&ip_entry->ip4src, &ip_entry->psrc);
|
||||
EFX_WARN_ON_PARANOID(rc);
|
||||
ip_mask->ip4src = IP4_ADDR_FULL_MASK;
|
||||
ip_mask->psrc = PORT_FULL_MASK;
|
||||
if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
|
||||
rule->flow_type |= FLOW_EXT;
|
||||
rule->h_ext.vlan_tci = spec.outer_vid;
|
||||
rule->m_ext.vlan_tci = htons(0xfff);
|
||||
}
|
||||
rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
|
||||
ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
|
||||
ip_mask->pdst = PORT_FULL_MASK;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -967,82 +994,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
|
|||
efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
|
||||
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
|
||||
(rule->ring_cookie == RX_CLS_FLOW_DISC) ?
|
||||
0xfff : rule->ring_cookie);
|
||||
EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
|
||||
|
||||
switch (rule->flow_type) {
|
||||
switch (rule->flow_type & ~FLOW_EXT) {
|
||||
case TCP_V4_FLOW:
|
||||
case UDP_V4_FLOW: {
|
||||
u8 proto = (rule->flow_type == TCP_V4_FLOW ?
|
||||
IPPROTO_TCP : IPPROTO_UDP);
|
||||
|
||||
/* Must match all of destination, */
|
||||
if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
|
||||
ip_mask->pdst == PORT_FULL_MASK))
|
||||
case UDP_V4_FLOW:
|
||||
spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
|
||||
EFX_FILTER_MATCH_IP_PROTO);
|
||||
spec.ether_type = htons(ETH_P_IP);
|
||||
spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
|
||||
IPPROTO_TCP : IPPROTO_UDP);
|
||||
if (ip_mask->ip4dst) {
|
||||
if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
|
||||
return -EINVAL;
|
||||
spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
|
||||
spec.loc_host[0] = ip_entry->ip4dst;
|
||||
}
|
||||
if (ip_mask->ip4src) {
|
||||
if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
|
||||
return -EINVAL;
|
||||
spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
|
||||
spec.rem_host[0] = ip_entry->ip4src;
|
||||
}
|
||||
if (ip_mask->pdst) {
|
||||
if (ip_mask->pdst != PORT_FULL_MASK)
|
||||
return -EINVAL;
|
||||
spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
|
||||
spec.loc_port = ip_entry->pdst;
|
||||
}
|
||||
if (ip_mask->psrc) {
|
||||
if (ip_mask->psrc != PORT_FULL_MASK)
|
||||
return -EINVAL;
|
||||
spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
|
||||
spec.rem_port = ip_entry->psrc;
|
||||
}
|
||||
if (ip_mask->tos)
|
||||
return -EINVAL;
|
||||
/* all or none of source, */
|
||||
if ((ip_mask->ip4src || ip_mask->psrc) &&
|
||||
!(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
|
||||
ip_mask->psrc == PORT_FULL_MASK))
|
||||
return -EINVAL;
|
||||
/* and nothing else */
|
||||
if (ip_mask->tos || rule->m_ext.vlan_tci)
|
||||
return -EINVAL;
|
||||
|
||||
if (ip_mask->ip4src)
|
||||
rc = efx_filter_set_ipv4_full(&spec, proto,
|
||||
ip_entry->ip4dst,
|
||||
ip_entry->pdst,
|
||||
ip_entry->ip4src,
|
||||
ip_entry->psrc);
|
||||
else
|
||||
rc = efx_filter_set_ipv4_local(&spec, proto,
|
||||
ip_entry->ip4dst,
|
||||
ip_entry->pdst);
|
||||
if (rc)
|
||||
return rc;
|
||||
break;
|
||||
}
|
||||
|
||||
case ETHER_FLOW | FLOW_EXT:
|
||||
case ETHER_FLOW: {
|
||||
u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
|
||||
ntohs(rule->m_ext.vlan_tci) : 0);
|
||||
|
||||
/* Must not match on source address or Ethertype */
|
||||
if (!is_zero_ether_addr(mac_mask->h_source) ||
|
||||
mac_mask->h_proto)
|
||||
return -EINVAL;
|
||||
|
||||
/* Is it a default UC or MC filter? */
|
||||
if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) &&
|
||||
vlan_tag_mask == 0) {
|
||||
if (is_multicast_ether_addr(mac_entry->h_dest))
|
||||
rc = efx_filter_set_mc_def(&spec);
|
||||
case ETHER_FLOW:
|
||||
if (!is_zero_ether_addr(mac_mask->h_dest)) {
|
||||
if (ether_addr_equal(mac_mask->h_dest,
|
||||
mac_addr_ig_mask))
|
||||
spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
|
||||
else if (is_broadcast_ether_addr(mac_mask->h_dest))
|
||||
spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
|
||||
else
|
||||
rc = efx_filter_set_uc_def(&spec);
|
||||
return -EINVAL;
|
||||
memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
|
||||
}
|
||||
/* Otherwise, it must match all of destination and all
|
||||
* or none of VID.
|
||||
*/
|
||||
else if (is_broadcast_ether_addr(mac_mask->h_dest) &&
|
||||
(vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) {
|
||||
rc = efx_filter_set_eth_local(
|
||||
&spec,
|
||||
vlan_tag_mask ?
|
||||
ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
|
||||
mac_entry->h_dest);
|
||||
} else {
|
||||
rc = -EINVAL;
|
||||
if (!is_zero_ether_addr(mac_mask->h_source)) {
|
||||
if (!is_broadcast_ether_addr(mac_mask->h_source))
|
||||
return -EINVAL;
|
||||
spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
|
||||
memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
|
||||
}
|
||||
if (mac_mask->h_proto) {
|
||||
if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
|
||||
return -EINVAL;
|
||||
spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
|
||||
spec.ether_type = mac_entry->h_proto;
|
||||
}
|
||||
if (rc)
|
||||
return rc;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
|
||||
if (rule->m_ext.vlan_tci != htons(0xfff))
|
||||
return -EINVAL;
|
||||
spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
|
||||
spec.outer_vid = rule->h_ext.vlan_tci;
|
||||
}
|
||||
|
||||
rc = efx_filter_insert_filter(efx, &spec, true);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
|
|
@ -434,7 +434,7 @@ static int falcon_spi_wait(struct efx_nic *efx)
|
|||
}
|
||||
}
|
||||
|
||||
int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
|
||||
int falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
|
||||
unsigned int command, int address,
|
||||
const void *in, void *out, size_t len)
|
||||
{
|
||||
|
@ -491,22 +491,22 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
|
|||
}
|
||||
|
||||
static size_t
|
||||
falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
|
||||
falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
|
||||
{
|
||||
return min(FALCON_SPI_MAX_LEN,
|
||||
(spi->block_size - (start & (spi->block_size - 1))));
|
||||
}
|
||||
|
||||
static inline u8
|
||||
efx_spi_munge_command(const struct efx_spi_device *spi,
|
||||
const u8 command, const unsigned int address)
|
||||
falcon_spi_munge_command(const struct falcon_spi_device *spi,
|
||||
const u8 command, const unsigned int address)
|
||||
{
|
||||
return command | (((address >> 8) & spi->munge_address) << 3);
|
||||
}
|
||||
|
||||
/* Wait up to 10 ms for buffered write completion */
|
||||
int
|
||||
falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
|
||||
falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
|
||||
{
|
||||
unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
|
||||
u8 status;
|
||||
|
@ -530,7 +530,7 @@ falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
|
|||
}
|
||||
}
|
||||
|
||||
int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
|
||||
int falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
|
||||
loff_t start, size_t len, size_t *retlen, u8 *buffer)
|
||||
{
|
||||
size_t block_len, pos = 0;
|
||||
|
@ -540,7 +540,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
|
|||
while (pos < len) {
|
||||
block_len = min(len - pos, FALCON_SPI_MAX_LEN);
|
||||
|
||||
command = efx_spi_munge_command(spi, SPI_READ, start + pos);
|
||||
command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
|
||||
rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
|
||||
buffer + pos, block_len);
|
||||
if (rc)
|
||||
|
@ -561,7 +561,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
|
|||
}
|
||||
|
||||
int
|
||||
falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
|
||||
falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
|
||||
loff_t start, size_t len, size_t *retlen, const u8 *buffer)
|
||||
{
|
||||
u8 verify_buffer[FALCON_SPI_MAX_LEN];
|
||||
|
@ -576,7 +576,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
|
|||
|
||||
block_len = min(len - pos,
|
||||
falcon_spi_write_limit(spi, start + pos));
|
||||
command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
|
||||
command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
|
||||
rc = falcon_spi_cmd(efx, spi, command, start + pos,
|
||||
buffer + pos, NULL, block_len);
|
||||
if (rc)
|
||||
|
@ -586,7 +586,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
|
|||
if (rc)
|
||||
break;
|
||||
|
||||
command = efx_spi_munge_command(spi, SPI_READ, start + pos);
|
||||
command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
|
||||
rc = falcon_spi_cmd(efx, spi, command, start + pos,
|
||||
NULL, verify_buffer, block_len);
|
||||
if (memcmp(verify_buffer, buffer + pos, block_len)) {
|
||||
|
@ -686,7 +686,7 @@ static void falcon_ack_status_intr(struct efx_nic *efx)
|
|||
return;
|
||||
|
||||
/* We expect xgmii faults if the wireside link is down */
|
||||
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
|
||||
if (!efx->link_state.up)
|
||||
return;
|
||||
|
||||
/* We can only use this interrupt to signal the negative edge of
|
||||
|
@ -764,7 +764,7 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
|
|||
FRF_AB_XM_RXEN, 1,
|
||||
FRF_AB_XM_AUTO_DEPAD, 0,
|
||||
FRF_AB_XM_ACPT_ALL_MCAST, 1,
|
||||
FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
|
||||
FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
|
||||
FRF_AB_XM_PASS_CRC_ERR, 1);
|
||||
efx_writeo(efx, ®, FR_AB_XM_RX_CFG);
|
||||
|
||||
|
@ -795,29 +795,22 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
|
|||
bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
|
||||
bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
|
||||
bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
|
||||
bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
|
||||
|
||||
/* XGXS block is flaky and will need to be reset if moving
|
||||
* into our out of XGMII, XGXS or XAUI loopbacks. */
|
||||
if (EFX_WORKAROUND_5147(efx)) {
|
||||
bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
|
||||
bool reset_xgxs;
|
||||
efx_reado(efx, ®, FR_AB_XX_CORE_STAT);
|
||||
old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
|
||||
old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
|
||||
|
||||
efx_reado(efx, ®, FR_AB_XX_CORE_STAT);
|
||||
old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
|
||||
old_xgmii_loopback =
|
||||
EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
|
||||
efx_reado(efx, ®, FR_AB_XX_SD_CTL);
|
||||
old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
|
||||
|
||||
efx_reado(efx, ®, FR_AB_XX_SD_CTL);
|
||||
old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
|
||||
|
||||
/* The PHY driver may have turned XAUI off */
|
||||
reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
|
||||
(xaui_loopback != old_xaui_loopback) ||
|
||||
(xgmii_loopback != old_xgmii_loopback));
|
||||
|
||||
if (reset_xgxs)
|
||||
falcon_reset_xaui(efx);
|
||||
}
|
||||
/* The PHY driver may have turned XAUI off */
|
||||
if ((xgxs_loopback != old_xgxs_loopback) ||
|
||||
(xaui_loopback != old_xaui_loopback) ||
|
||||
(xgmii_loopback != old_xgmii_loopback))
|
||||
falcon_reset_xaui(efx);
|
||||
|
||||
efx_reado(efx, ®, FR_AB_XX_CORE_STAT);
|
||||
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
|
||||
|
@ -871,6 +864,8 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
|
|||
{
|
||||
struct falcon_nic_data *nic_data = efx->nic_data;
|
||||
|
||||
efx_farch_filter_sync_rx_mode(efx);
|
||||
|
||||
falcon_reconfigure_xgxs_core(efx);
|
||||
falcon_reconfigure_xmac_core(efx);
|
||||
|
||||
|
@ -946,8 +941,8 @@ static void falcon_poll_xmac(struct efx_nic *efx)
|
|||
{
|
||||
struct falcon_nic_data *nic_data = efx->nic_data;
|
||||
|
||||
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
|
||||
!nic_data->xmac_poll_required)
|
||||
/* We expect xgmii faults if the wireside link is down */
|
||||
if (!efx->link_state.up || !nic_data->xmac_poll_required)
|
||||
return;
|
||||
|
||||
nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
|
||||
|
@ -1088,7 +1083,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
|
|||
EFX_POPULATE_OWORD_5(reg,
|
||||
FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
|
||||
FRF_AB_MAC_BCAD_ACPT, 1,
|
||||
FRF_AB_MAC_UC_PROM, efx->promiscuous,
|
||||
FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
|
||||
FRF_AB_MAC_LINK_STATUS, 1, /* always set */
|
||||
FRF_AB_MAC_SPEED, link_speed);
|
||||
/* On B0, MAC backpressure can be disabled and packets get
|
||||
|
@ -1486,15 +1481,15 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
|
|||
{
|
||||
struct falcon_nic_data *nic_data = efx->nic_data;
|
||||
struct falcon_nvconfig *nvconfig;
|
||||
struct efx_spi_device *spi;
|
||||
struct falcon_spi_device *spi;
|
||||
void *region;
|
||||
int rc, magic_num, struct_ver;
|
||||
__le16 *word, *limit;
|
||||
u32 csum;
|
||||
|
||||
if (efx_spi_present(&nic_data->spi_flash))
|
||||
if (falcon_spi_present(&nic_data->spi_flash))
|
||||
spi = &nic_data->spi_flash;
|
||||
else if (efx_spi_present(&nic_data->spi_eeprom))
|
||||
else if (falcon_spi_present(&nic_data->spi_eeprom))
|
||||
spi = &nic_data->spi_eeprom;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
@ -1509,7 +1504,7 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
|
|||
mutex_unlock(&nic_data->spi_lock);
|
||||
if (rc) {
|
||||
netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
|
||||
efx_spi_present(&nic_data->spi_flash) ?
|
||||
falcon_spi_present(&nic_data->spi_flash) ?
|
||||
"flash" : "EEPROM");
|
||||
rc = -EIO;
|
||||
goto out;
|
||||
|
@ -1854,7 +1849,7 @@ static int falcon_reset_sram(struct efx_nic *efx)
|
|||
}
|
||||
|
||||
static void falcon_spi_device_init(struct efx_nic *efx,
|
||||
struct efx_spi_device *spi_device,
|
||||
struct falcon_spi_device *spi_device,
|
||||
unsigned int device_id, u32 device_type)
|
||||
{
|
||||
if (device_type != 0) {
|
||||
|
@ -1970,6 +1965,20 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
|
|||
large_eeprom_type);
|
||||
}
|
||||
|
||||
static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
|
||||
{
|
||||
return 0x20000;
|
||||
}
|
||||
|
||||
static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
|
||||
{
|
||||
/* Map everything up to and including the RSS indirection table.
|
||||
* The PCI core takes care of mapping the MSI-X tables.
|
||||
*/
|
||||
return FR_BZ_RX_INDIRECTION_TBL +
|
||||
FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
|
||||
}
|
||||
|
||||
static int falcon_probe_nic(struct efx_nic *efx)
|
||||
{
|
||||
struct falcon_nic_data *nic_data;
|
||||
|
@ -2060,6 +2069,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
|
|||
goto fail5;
|
||||
}
|
||||
|
||||
efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
|
||||
EFX_MAX_CHANNELS);
|
||||
efx->timer_quantum_ns = 4968; /* 621 cycles */
|
||||
|
||||
/* Initialise I2C adapter */
|
||||
|
@ -2339,6 +2350,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
|
|||
*/
|
||||
|
||||
const struct efx_nic_type falcon_a1_nic_type = {
|
||||
.mem_map_size = falcon_a1_mem_map_size,
|
||||
.probe = falcon_probe_nic,
|
||||
.remove = falcon_remove_nic,
|
||||
.init = falcon_init_nic,
|
||||
|
@ -2390,8 +2402,22 @@ const struct efx_nic_type falcon_a1_nic_type = {
|
|||
.ev_read_ack = efx_farch_ev_read_ack,
|
||||
.ev_test_generate = efx_farch_ev_test_generate,
|
||||
|
||||
/* We don't expose the filter table on Falcon A1 as it is not
|
||||
* mapped into function 0, but these implementations still
|
||||
* work with a degenerate case of all tables set to size 0.
|
||||
*/
|
||||
.filter_table_probe = efx_farch_filter_table_probe,
|
||||
.filter_table_restore = efx_farch_filter_table_restore,
|
||||
.filter_table_remove = efx_farch_filter_table_remove,
|
||||
.filter_insert = efx_farch_filter_insert,
|
||||
.filter_remove_safe = efx_farch_filter_remove_safe,
|
||||
.filter_get_safe = efx_farch_filter_get_safe,
|
||||
.filter_clear_rx = efx_farch_filter_clear_rx,
|
||||
.filter_count_rx_used = efx_farch_filter_count_rx_used,
|
||||
.filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
|
||||
.filter_get_rx_ids = efx_farch_filter_get_rx_ids,
|
||||
|
||||
.revision = EFX_REV_FALCON_A1,
|
||||
.mem_map_size = 0x20000,
|
||||
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
|
||||
.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
|
||||
.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
|
||||
|
@ -2401,13 +2427,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
|
|||
.rx_buffer_padding = 0x24,
|
||||
.can_rx_scatter = false,
|
||||
.max_interrupt_mode = EFX_INT_MODE_MSI,
|
||||
.phys_addr_channels = 4,
|
||||
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
|
||||
.offload_features = NETIF_F_IP_CSUM,
|
||||
.mcdi_max_ver = -1,
|
||||
};
|
||||
|
||||
const struct efx_nic_type falcon_b0_nic_type = {
|
||||
.mem_map_size = falcon_b0_mem_map_size,
|
||||
.probe = falcon_probe_nic,
|
||||
.remove = falcon_remove_nic,
|
||||
.init = falcon_init_nic,
|
||||
|
@ -2459,14 +2485,23 @@ const struct efx_nic_type falcon_b0_nic_type = {
|
|||
.ev_process = efx_farch_ev_process,
|
||||
.ev_read_ack = efx_farch_ev_read_ack,
|
||||
.ev_test_generate = efx_farch_ev_test_generate,
|
||||
.filter_table_probe = efx_farch_filter_table_probe,
|
||||
.filter_table_restore = efx_farch_filter_table_restore,
|
||||
.filter_table_remove = efx_farch_filter_table_remove,
|
||||
.filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
|
||||
.filter_insert = efx_farch_filter_insert,
|
||||
.filter_remove_safe = efx_farch_filter_remove_safe,
|
||||
.filter_get_safe = efx_farch_filter_get_safe,
|
||||
.filter_clear_rx = efx_farch_filter_clear_rx,
|
||||
.filter_count_rx_used = efx_farch_filter_count_rx_used,
|
||||
.filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
|
||||
.filter_get_rx_ids = efx_farch_filter_get_rx_ids,
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
.filter_rfs_insert = efx_farch_filter_rfs_insert,
|
||||
.filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
|
||||
#endif
|
||||
|
||||
.revision = EFX_REV_FALCON_B0,
|
||||
/* Map everything up to and including the RSS indirection
|
||||
* table. Don't map MSI-X table, MSI-X PBA since Linux
|
||||
* requires that they not be mapped. */
|
||||
.mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
|
||||
FR_BZ_RX_INDIRECTION_TBL_STEP *
|
||||
FR_BZ_RX_INDIRECTION_TBL_ROWS),
|
||||
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
|
||||
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
|
||||
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
|
||||
|
@ -2477,11 +2512,9 @@ const struct efx_nic_type falcon_b0_nic_type = {
|
|||
.rx_buffer_padding = 0,
|
||||
.can_rx_scatter = true,
|
||||
.max_interrupt_mode = EFX_INT_MODE_MSIX,
|
||||
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
|
||||
* interrupt handler only supports 32
|
||||
* channels */
|
||||
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
|
||||
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
|
||||
.mcdi_max_ver = -1,
|
||||
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
|
||||
};
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -11,32 +11,49 @@
|
|||
#define EFX_FILTER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/**
|
||||
* enum efx_filter_type - type of hardware filter
|
||||
* @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
|
||||
* @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
|
||||
* @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
|
||||
* @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
|
||||
* @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
|
||||
* @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
|
||||
* @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast
|
||||
* @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast
|
||||
* @EFX_FILTER_UNSPEC: Match type is unspecified
|
||||
* enum efx_filter_match_flags - Flags for hardware filter match type
|
||||
* @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
|
||||
* @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
|
||||
* @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
|
||||
* @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
|
||||
* @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
|
||||
* @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
|
||||
* @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
|
||||
* @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
|
||||
* @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
|
||||
* @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
|
||||
* @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
|
||||
* Used for RX default unicast and multicast/broadcast filters.
|
||||
*
|
||||
* Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
|
||||
* Only some combinations are supported, depending on NIC type:
|
||||
*
|
||||
* - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
|
||||
* local 2-tuple (only implemented for Falcon B0)
|
||||
*
|
||||
* - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
|
||||
* or local 2-tuple, or local MAC with or without outer VID, and RX
|
||||
* default filters
|
||||
*
|
||||
* - Huntington supports filter matching controlled by firmware, potentially
|
||||
* using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
|
||||
* with or without outer and inner VID
|
||||
*/
|
||||
enum efx_filter_type {
|
||||
EFX_FILTER_TCP_FULL = 0,
|
||||
EFX_FILTER_TCP_WILD,
|
||||
EFX_FILTER_UDP_FULL,
|
||||
EFX_FILTER_UDP_WILD,
|
||||
EFX_FILTER_MAC_FULL = 4,
|
||||
EFX_FILTER_MAC_WILD,
|
||||
EFX_FILTER_UC_DEF = 8,
|
||||
EFX_FILTER_MC_DEF,
|
||||
EFX_FILTER_TYPE_COUNT, /* number of specific types */
|
||||
EFX_FILTER_UNSPEC = 0xf,
|
||||
enum efx_filter_match_flags {
|
||||
EFX_FILTER_MATCH_REM_HOST = 0x0001,
|
||||
EFX_FILTER_MATCH_LOC_HOST = 0x0002,
|
||||
EFX_FILTER_MATCH_REM_MAC = 0x0004,
|
||||
EFX_FILTER_MATCH_REM_PORT = 0x0008,
|
||||
EFX_FILTER_MATCH_LOC_MAC = 0x0010,
|
||||
EFX_FILTER_MATCH_LOC_PORT = 0x0020,
|
||||
EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
|
||||
EFX_FILTER_MATCH_INNER_VID = 0x0080,
|
||||
EFX_FILTER_MATCH_OUTER_VID = 0x0100,
|
||||
EFX_FILTER_MATCH_IP_PROTO = 0x0200,
|
||||
EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -61,37 +78,75 @@ enum efx_filter_priority {
|
|||
* according to the indirection table.
|
||||
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
|
||||
* queue.
|
||||
* @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
|
||||
* network stack. The filter must have a priority of
|
||||
* %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
|
||||
* request with priority %EFX_FILTER_PRI_MANUAL, and a removal
|
||||
* request with priority %EFX_FILTER_PRI_MANUAL will reset the
|
||||
* steering (but not remove the filter).
|
||||
* @EFX_FILTER_FLAG_RX: Filter is for RX
|
||||
* @EFX_FILTER_FLAG_TX: Filter is for TX
|
||||
*/
|
||||
enum efx_filter_flags {
|
||||
EFX_FILTER_FLAG_RX_RSS = 0x01,
|
||||
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
|
||||
EFX_FILTER_FLAG_RX_STACK = 0x04,
|
||||
EFX_FILTER_FLAG_RX = 0x08,
|
||||
EFX_FILTER_FLAG_TX = 0x10,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_filter_spec - specification for a hardware filter
|
||||
* @type: Type of match to be performed, from &enum efx_filter_type
|
||||
* @match_flags: Match type flags, from &enum efx_filter_match_flags
|
||||
* @priority: Priority of the filter, from &enum efx_filter_priority
|
||||
* @flags: Miscellaneous flags, from &enum efx_filter_flags
|
||||
* @dmaq_id: Source/target queue index
|
||||
* @data: Match data (type-dependent)
|
||||
* @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set
|
||||
* @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
|
||||
* an RX drop filter
|
||||
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
|
||||
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
|
||||
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
|
||||
* %EFX_FILTER_MATCH_LOC_MAC_IG is set
|
||||
* @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
|
||||
* @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
|
||||
* @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
|
||||
* is set
|
||||
* @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
|
||||
* @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
|
||||
* @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
|
||||
* @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
|
||||
*
|
||||
* Use the efx_filter_set_*() functions to initialise the @type and
|
||||
* @data fields.
|
||||
* The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
|
||||
* used to initialise the structure. The efx_filter_set_*() functions
|
||||
* may then be used to set @rss_context, @match_flags and related
|
||||
* fields.
|
||||
*
|
||||
* The @priority field is used by software to determine whether a new
|
||||
* filter may replace an old one. The hardware priority of a filter
|
||||
* depends on the filter type.
|
||||
* depends on which fields are matched.
|
||||
*/
|
||||
struct efx_filter_spec {
|
||||
u8 type:4;
|
||||
u8 priority:4;
|
||||
u8 flags;
|
||||
u16 dmaq_id;
|
||||
u32 data[3];
|
||||
u32 match_flags:12;
|
||||
u32 priority:2;
|
||||
u32 flags:6;
|
||||
u32 dmaq_id:12;
|
||||
u32 rss_context;
|
||||
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
|
||||
__be16 inner_vid;
|
||||
u8 loc_mac[ETH_ALEN];
|
||||
u8 rem_mac[ETH_ALEN];
|
||||
__be16 ether_type;
|
||||
u8 ip_proto;
|
||||
__be32 loc_host[4];
|
||||
__be32 rem_host[4];
|
||||
__be16 loc_port;
|
||||
__be16 rem_port;
|
||||
/* total 64 bytes */
|
||||
};
|
||||
|
||||
enum {
|
||||
EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
|
||||
EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
|
||||
};
|
||||
|
||||
static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
|
||||
|
@ -99,39 +154,116 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
|
|||
enum efx_filter_flags flags,
|
||||
unsigned rxq_id)
|
||||
{
|
||||
spec->type = EFX_FILTER_UNSPEC;
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
spec->priority = priority;
|
||||
spec->flags = EFX_FILTER_FLAG_RX | flags;
|
||||
spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
|
||||
spec->dmaq_id = rxq_id;
|
||||
}
|
||||
|
||||
static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
|
||||
unsigned txq_id)
|
||||
{
|
||||
spec->type = EFX_FILTER_UNSPEC;
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
spec->priority = EFX_FILTER_PRI_REQUIRED;
|
||||
spec->flags = EFX_FILTER_FLAG_TX;
|
||||
spec->dmaq_id = txq_id;
|
||||
}
|
||||
|
||||
extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
|
||||
__be32 host, __be16 port);
|
||||
extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
|
||||
u8 *proto, __be32 *host, __be16 *port);
|
||||
extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
|
||||
__be32 host, __be16 port,
|
||||
__be32 rhost, __be16 rport);
|
||||
extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
|
||||
u8 *proto, __be32 *host, __be16 *port,
|
||||
__be32 *rhost, __be16 *rport);
|
||||
extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
|
||||
u16 vid, const u8 *addr);
|
||||
extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
|
||||
u16 *vid, u8 *addr);
|
||||
extern int efx_filter_set_uc_def(struct efx_filter_spec *spec);
|
||||
extern int efx_filter_set_mc_def(struct efx_filter_spec *spec);
|
||||
/**
|
||||
* efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
|
||||
* @spec: Specification to initialise
|
||||
* @proto: Transport layer protocol number
|
||||
* @host: Local host address (network byte order)
|
||||
* @port: Local port (network byte order)
|
||||
*/
|
||||
static inline int
|
||||
efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
|
||||
__be32 host, __be16 port)
|
||||
{
|
||||
spec->match_flags |=
|
||||
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
|
||||
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
|
||||
spec->ether_type = htons(ETH_P_IP);
|
||||
spec->ip_proto = proto;
|
||||
spec->loc_host[0] = host;
|
||||
spec->loc_port = port;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
|
||||
* @spec: Specification to initialise
|
||||
* @proto: Transport layer protocol number
|
||||
* @lhost: Local host address (network byte order)
|
||||
* @lport: Local port (network byte order)
|
||||
* @rhost: Remote host address (network byte order)
|
||||
* @rport: Remote port (network byte order)
|
||||
*/
|
||||
static inline int
|
||||
efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
|
||||
__be32 lhost, __be16 lport,
|
||||
__be32 rhost, __be16 rport)
|
||||
{
|
||||
spec->match_flags |=
|
||||
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
|
||||
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
|
||||
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
|
||||
spec->ether_type = htons(ETH_P_IP);
|
||||
spec->ip_proto = proto;
|
||||
spec->loc_host[0] = lhost;
|
||||
spec->loc_port = lport;
|
||||
spec->rem_host[0] = rhost;
|
||||
spec->rem_port = rport;
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
EFX_FILTER_VID_UNSPEC = 0xffff,
|
||||
};
|
||||
|
||||
/**
|
||||
* efx_filter_set_eth_local - specify local Ethernet address and/or VID
|
||||
* @spec: Specification to initialise
|
||||
* @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
|
||||
* @addr: Local Ethernet MAC address, or %NULL
|
||||
*/
|
||||
static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
|
||||
u16 vid, const u8 *addr)
|
||||
{
|
||||
if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (vid != EFX_FILTER_VID_UNSPEC) {
|
||||
spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
|
||||
spec->outer_vid = htons(vid);
|
||||
}
|
||||
if (addr != NULL) {
|
||||
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
|
||||
memcpy(spec->loc_mac, addr, ETH_ALEN);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
|
||||
* @spec: Specification to initialise
|
||||
*/
|
||||
static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
|
||||
{
|
||||
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
|
||||
* @spec: Specification to initialise
|
||||
*/
|
||||
static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
|
||||
{
|
||||
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
|
||||
spec->loc_mac[0] = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* EFX_FILTER_H */
|
||||
|
|
|
@ -128,6 +128,60 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
|
|||
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
|
||||
#define MCDI_DWORD(_buf, _field) \
|
||||
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
|
||||
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
|
||||
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1)
|
||||
#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2) \
|
||||
EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2)
|
||||
#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3) \
|
||||
EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3)
|
||||
#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4) \
|
||||
EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4)
|
||||
#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5) \
|
||||
EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5)
|
||||
#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5, \
|
||||
_name6, _value6) \
|
||||
EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5, \
|
||||
MC_CMD_ ## _name6, _value6)
|
||||
#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5, \
|
||||
_name6, _value6, _name7, _value7) \
|
||||
EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5, \
|
||||
MC_CMD_ ## _name6, _value6, \
|
||||
MC_CMD_ ## _name7, _value7)
|
||||
#define MCDI_SET_QWORD(_buf, _field, _value) \
|
||||
do { \
|
||||
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
|
||||
|
|
|
@ -861,7 +861,7 @@ void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
|
|||
|
||||
int efx_mcdi_set_mac(struct efx_nic *efx)
|
||||
{
|
||||
u32 reject, fcntl;
|
||||
u32 fcntl;
|
||||
MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
|
||||
|
@ -873,12 +873,9 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
|
|||
EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
|
||||
|
||||
/* The MCDI command provides for controlling accept/reject
|
||||
* of broadcast packets too, but the driver doesn't currently
|
||||
* expose this. */
|
||||
reject = (efx->promiscuous) ? 0 :
|
||||
(1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
|
||||
/* Set simple MAC filter for Siena */
|
||||
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
|
||||
SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
|
||||
|
||||
switch (efx->wanted_fc) {
|
||||
case EFX_FC_RX | EFX_FC_TX:
|
||||
|
@ -926,21 +923,19 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
|
|||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
|
||||
int rc;
|
||||
efx_dword_t *cmd_ptr;
|
||||
int period = enable ? 1000 : 0;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
|
||||
|
||||
MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
|
||||
cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
|
||||
EFX_POPULATE_DWORD_7(*cmd_ptr,
|
||||
MC_CMD_MAC_STATS_IN_DMA, !!enable,
|
||||
MC_CMD_MAC_STATS_IN_CLEAR, clear,
|
||||
MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
|
||||
MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
|
||||
MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
|
||||
MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
|
||||
MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
|
||||
MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
|
||||
MAC_STATS_IN_DMA, !!enable,
|
||||
MAC_STATS_IN_CLEAR, clear,
|
||||
MAC_STATS_IN_PERIODIC_CHANGE, 1,
|
||||
MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
|
||||
MAC_STATS_IN_PERIODIC_CLEAR, 0,
|
||||
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
|
||||
MAC_STATS_IN_PERIOD_MS, period);
|
||||
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
|
||||
|
|
|
@ -22,9 +22,10 @@
|
|||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
|
||||
#define EFX_SPI_VERIFY_BUF_LEN 16
|
||||
#define FALCON_SPI_VERIFY_BUF_LEN 16
|
||||
|
||||
struct efx_mtd_partition {
|
||||
struct list_head node;
|
||||
struct mtd_info mtd;
|
||||
union {
|
||||
struct {
|
||||
|
@ -32,8 +33,12 @@ struct efx_mtd_partition {
|
|||
u8 nvram_type;
|
||||
u16 fw_subtype;
|
||||
} mcdi;
|
||||
size_t offset;
|
||||
struct {
|
||||
const struct falcon_spi_device *spi;
|
||||
size_t offset;
|
||||
} falcon;
|
||||
};
|
||||
const char *dev_type_name;
|
||||
const char *type_name;
|
||||
char name[IFNAMSIZ + 20];
|
||||
};
|
||||
|
@ -47,21 +52,6 @@ struct efx_mtd_ops {
|
|||
int (*sync)(struct mtd_info *mtd);
|
||||
};
|
||||
|
||||
struct efx_mtd {
|
||||
struct list_head node;
|
||||
struct efx_nic *efx;
|
||||
const struct efx_spi_device *spi;
|
||||
const char *name;
|
||||
const struct efx_mtd_ops *ops;
|
||||
size_t n_parts;
|
||||
struct efx_mtd_partition part[0];
|
||||
};
|
||||
|
||||
#define efx_for_each_partition(part, efx_mtd) \
|
||||
for ((part) = &(efx_mtd)->part[0]; \
|
||||
(part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
|
||||
(part)++)
|
||||
|
||||
#define to_efx_mtd_partition(mtd) \
|
||||
container_of(mtd, struct efx_mtd_partition, mtd)
|
||||
|
||||
|
@ -71,11 +61,10 @@ static int siena_mtd_probe(struct efx_nic *efx);
|
|||
/* SPI utilities */
|
||||
|
||||
static int
|
||||
efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
|
||||
falcon_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
|
||||
{
|
||||
struct efx_mtd *efx_mtd = part->mtd.priv;
|
||||
const struct efx_spi_device *spi = efx_mtd->spi;
|
||||
struct efx_nic *efx = efx_mtd->efx;
|
||||
const struct falcon_spi_device *spi = part->falcon.spi;
|
||||
struct efx_nic *efx = part->mtd.priv;
|
||||
u8 status;
|
||||
int rc, i;
|
||||
|
||||
|
@ -93,12 +82,13 @@ efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
|
|||
if (signal_pending(current))
|
||||
return -EINTR;
|
||||
}
|
||||
pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
|
||||
pr_err("%s: timed out waiting for %s\n",
|
||||
part->name, part->dev_type_name);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int
|
||||
efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
|
||||
falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
|
||||
{
|
||||
const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
|
||||
SPI_STATUS_BP0);
|
||||
|
@ -133,14 +123,13 @@ efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
|
|||
}
|
||||
|
||||
static int
|
||||
efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
|
||||
falcon_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
|
||||
{
|
||||
struct efx_mtd *efx_mtd = part->mtd.priv;
|
||||
const struct efx_spi_device *spi = efx_mtd->spi;
|
||||
struct efx_nic *efx = efx_mtd->efx;
|
||||
const struct falcon_spi_device *spi = part->falcon.spi;
|
||||
struct efx_nic *efx = part->mtd.priv;
|
||||
unsigned pos, block_len;
|
||||
u8 empty[EFX_SPI_VERIFY_BUF_LEN];
|
||||
u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
|
||||
u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
|
||||
u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
|
||||
int rc;
|
||||
|
||||
if (len != spi->erase_size)
|
||||
|
@ -149,7 +138,7 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
|
|||
if (spi->erase_command == 0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rc = efx_spi_unlock(efx, spi);
|
||||
rc = falcon_spi_unlock(efx, spi);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
|
||||
|
@ -159,7 +148,7 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
|
|||
NULL, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = efx_spi_slow_wait(part, false);
|
||||
rc = falcon_spi_slow_wait(part, false);
|
||||
|
||||
/* Verify the entire region has been wiped */
|
||||
memset(empty, 0xff, sizeof(empty));
|
||||
|
@ -185,10 +174,10 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
|
|||
|
||||
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
|
||||
{
|
||||
struct efx_mtd *efx_mtd = mtd->priv;
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
int rc;
|
||||
|
||||
rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
|
||||
rc = efx->mtd_ops->erase(mtd, erase->addr, erase->len);
|
||||
if (rc == 0) {
|
||||
erase->state = MTD_ERASE_DONE;
|
||||
} else {
|
||||
|
@ -202,13 +191,13 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
|
|||
static void efx_mtd_sync(struct mtd_info *mtd)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_mtd *efx_mtd = mtd->priv;
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
int rc;
|
||||
|
||||
rc = efx_mtd->ops->sync(mtd);
|
||||
rc = efx->mtd_ops->sync(mtd);
|
||||
if (rc)
|
||||
pr_err("%s: %s sync failed (%d)\n",
|
||||
part->name, efx_mtd->name, rc);
|
||||
part->name, part->dev_type_name, rc);
|
||||
}
|
||||
|
||||
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
|
||||
|
@ -222,86 +211,84 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
|
|||
ssleep(1);
|
||||
}
|
||||
WARN_ON(rc);
|
||||
list_del(&part->node);
|
||||
}
|
||||
|
||||
static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
|
||||
static void efx_mtd_rename_partition(struct efx_mtd_partition *part)
|
||||
{
|
||||
struct efx_mtd_partition *part;
|
||||
struct efx_nic *efx = part->mtd.priv;
|
||||
|
||||
efx_for_each_partition(part, efx_mtd)
|
||||
efx_mtd_remove_partition(part);
|
||||
list_del(&efx_mtd->node);
|
||||
kfree(efx_mtd);
|
||||
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
|
||||
snprintf(part->name, sizeof(part->name), "%s %s:%02x",
|
||||
efx->name, part->type_name, part->mcdi.fw_subtype);
|
||||
else
|
||||
snprintf(part->name, sizeof(part->name), "%s %s",
|
||||
efx->name, part->type_name);
|
||||
}
|
||||
|
||||
static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
|
||||
static int efx_mtd_add(struct efx_nic *efx,
|
||||
struct efx_mtd_partition *parts, size_t n_parts)
|
||||
{
|
||||
struct efx_mtd_partition *part;
|
||||
size_t i;
|
||||
|
||||
efx_for_each_partition(part, efx_mtd)
|
||||
if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
|
||||
snprintf(part->name, sizeof(part->name),
|
||||
"%s %s:%02x", efx_mtd->efx->name,
|
||||
part->type_name, part->mcdi.fw_subtype);
|
||||
else
|
||||
snprintf(part->name, sizeof(part->name),
|
||||
"%s %s", efx_mtd->efx->name,
|
||||
part->type_name);
|
||||
}
|
||||
for (i = 0; i < n_parts; i++) {
|
||||
part = &parts[i];
|
||||
|
||||
static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
|
||||
{
|
||||
struct efx_mtd_partition *part;
|
||||
|
||||
efx_mtd->efx = efx;
|
||||
|
||||
efx_mtd_rename_device(efx_mtd);
|
||||
|
||||
efx_for_each_partition(part, efx_mtd) {
|
||||
part->mtd.writesize = 1;
|
||||
|
||||
part->mtd.owner = THIS_MODULE;
|
||||
part->mtd.priv = efx_mtd;
|
||||
part->mtd.priv = efx;
|
||||
part->mtd.name = part->name;
|
||||
part->mtd._erase = efx_mtd_erase;
|
||||
part->mtd._read = efx_mtd->ops->read;
|
||||
part->mtd._write = efx_mtd->ops->write;
|
||||
part->mtd._read = efx->mtd_ops->read;
|
||||
part->mtd._write = efx->mtd_ops->write;
|
||||
part->mtd._sync = efx_mtd_sync;
|
||||
|
||||
efx_mtd_rename_partition(part);
|
||||
|
||||
if (mtd_device_register(&part->mtd, NULL, 0))
|
||||
goto fail;
|
||||
|
||||
/* Add to list in order - efx_mtd_remove() depends on this */
|
||||
list_add_tail(&part->node, &efx->mtd_list);
|
||||
}
|
||||
|
||||
list_add(&efx_mtd->node, &efx->mtd_list);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
while (part != &efx_mtd->part[0]) {
|
||||
--part;
|
||||
efx_mtd_remove_partition(part);
|
||||
}
|
||||
while (i--)
|
||||
efx_mtd_remove_partition(&parts[i]);
|
||||
/* Failure is unlikely here, but probably means we're out of memory */
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void efx_mtd_remove(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mtd *efx_mtd, *next;
|
||||
struct efx_mtd_partition *parts, *part, *next;
|
||||
|
||||
WARN_ON(efx_dev_registered(efx));
|
||||
|
||||
list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
|
||||
efx_mtd_remove_device(efx_mtd);
|
||||
if (list_empty(&efx->mtd_list))
|
||||
return;
|
||||
|
||||
parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
|
||||
node);
|
||||
|
||||
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
|
||||
efx_mtd_remove_partition(part);
|
||||
|
||||
kfree(parts);
|
||||
}
|
||||
|
||||
void efx_mtd_rename(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mtd *efx_mtd;
|
||||
struct efx_mtd_partition *part;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry(efx_mtd, &efx->mtd_list, node)
|
||||
efx_mtd_rename_device(efx_mtd);
|
||||
list_for_each_entry(part, &efx->mtd_list, node)
|
||||
efx_mtd_rename_partition(part);
|
||||
}
|
||||
|
||||
int efx_mtd_probe(struct efx_nic *efx)
|
||||
|
@ -318,17 +305,15 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
|
|||
size_t len, size_t *retlen, u8 *buffer)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_mtd *efx_mtd = mtd->priv;
|
||||
const struct efx_spi_device *spi = efx_mtd->spi;
|
||||
struct efx_nic *efx = efx_mtd->efx;
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
struct falcon_nic_data *nic_data = efx->nic_data;
|
||||
int rc;
|
||||
|
||||
rc = mutex_lock_interruptible(&nic_data->spi_lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = falcon_spi_read(efx, spi, part->offset + start, len,
|
||||
retlen, buffer);
|
||||
rc = falcon_spi_read(efx, part->falcon.spi, part->falcon.offset + start,
|
||||
len, retlen, buffer);
|
||||
mutex_unlock(&nic_data->spi_lock);
|
||||
return rc;
|
||||
}
|
||||
|
@ -336,15 +321,14 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
|
|||
static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_mtd *efx_mtd = mtd->priv;
|
||||
struct efx_nic *efx = efx_mtd->efx;
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
struct falcon_nic_data *nic_data = efx->nic_data;
|
||||
int rc;
|
||||
|
||||
rc = mutex_lock_interruptible(&nic_data->spi_lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = efx_spi_erase(part, part->offset + start, len);
|
||||
rc = falcon_spi_erase(part, part->falcon.offset + start, len);
|
||||
mutex_unlock(&nic_data->spi_lock);
|
||||
return rc;
|
||||
}
|
||||
|
@ -353,17 +337,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
|
|||
size_t len, size_t *retlen, const u8 *buffer)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_mtd *efx_mtd = mtd->priv;
|
||||
const struct efx_spi_device *spi = efx_mtd->spi;
|
||||
struct efx_nic *efx = efx_mtd->efx;
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
struct falcon_nic_data *nic_data = efx->nic_data;
|
||||
int rc;
|
||||
|
||||
rc = mutex_lock_interruptible(&nic_data->spi_lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = falcon_spi_write(efx, spi, part->offset + start, len,
|
||||
retlen, buffer);
|
||||
rc = falcon_spi_write(efx, part->falcon.spi,
|
||||
part->falcon.offset + start, len, retlen, buffer);
|
||||
mutex_unlock(&nic_data->spi_lock);
|
||||
return rc;
|
||||
}
|
||||
|
@ -371,13 +353,12 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
|
|||
static int falcon_mtd_sync(struct mtd_info *mtd)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_mtd *efx_mtd = mtd->priv;
|
||||
struct efx_nic *efx = efx_mtd->efx;
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
struct falcon_nic_data *nic_data = efx->nic_data;
|
||||
int rc;
|
||||
|
||||
mutex_lock(&nic_data->spi_lock);
|
||||
rc = efx_spi_slow_wait(part, true);
|
||||
rc = falcon_spi_slow_wait(part, true);
|
||||
mutex_unlock(&nic_data->spi_lock);
|
||||
return rc;
|
||||
}
|
||||
|
@ -392,66 +373,50 @@ static const struct efx_mtd_ops falcon_mtd_ops = {
|
|||
static int falcon_mtd_probe(struct efx_nic *efx)
|
||||
{
|
||||
struct falcon_nic_data *nic_data = efx->nic_data;
|
||||
struct efx_spi_device *spi;
|
||||
struct efx_mtd *efx_mtd;
|
||||
struct efx_mtd_partition *parts;
|
||||
struct falcon_spi_device *spi;
|
||||
size_t n_parts;
|
||||
int rc = -ENODEV;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
efx->mtd_ops = &falcon_mtd_ops;
|
||||
|
||||
/* Allocate space for maximum number of partitions */
|
||||
parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
|
||||
n_parts = 0;
|
||||
|
||||
spi = &nic_data->spi_flash;
|
||||
if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
|
||||
efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
|
||||
GFP_KERNEL);
|
||||
if (!efx_mtd)
|
||||
return -ENOMEM;
|
||||
|
||||
efx_mtd->spi = spi;
|
||||
efx_mtd->name = "flash";
|
||||
efx_mtd->ops = &falcon_mtd_ops;
|
||||
|
||||
efx_mtd->n_parts = 1;
|
||||
efx_mtd->part[0].mtd.type = MTD_NORFLASH;
|
||||
efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
|
||||
efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
|
||||
efx_mtd->part[0].mtd.erasesize = spi->erase_size;
|
||||
efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
|
||||
efx_mtd->part[0].type_name = "sfc_flash_bootrom";
|
||||
|
||||
rc = efx_mtd_probe_device(efx, efx_mtd);
|
||||
if (rc) {
|
||||
kfree(efx_mtd);
|
||||
return rc;
|
||||
}
|
||||
if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
|
||||
parts[n_parts].falcon.spi = spi;
|
||||
parts[n_parts].falcon.offset = FALCON_FLASH_BOOTCODE_START;
|
||||
parts[n_parts].dev_type_name = "flash";
|
||||
parts[n_parts].type_name = "sfc_flash_bootrom";
|
||||
parts[n_parts].mtd.type = MTD_NORFLASH;
|
||||
parts[n_parts].mtd.flags = MTD_CAP_NORFLASH;
|
||||
parts[n_parts].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
|
||||
parts[n_parts].mtd.erasesize = spi->erase_size;
|
||||
n_parts++;
|
||||
}
|
||||
|
||||
spi = &nic_data->spi_eeprom;
|
||||
if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
|
||||
efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
|
||||
GFP_KERNEL);
|
||||
if (!efx_mtd)
|
||||
return -ENOMEM;
|
||||
|
||||
efx_mtd->spi = spi;
|
||||
efx_mtd->name = "EEPROM";
|
||||
efx_mtd->ops = &falcon_mtd_ops;
|
||||
|
||||
efx_mtd->n_parts = 1;
|
||||
efx_mtd->part[0].mtd.type = MTD_RAM;
|
||||
efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
|
||||
efx_mtd->part[0].mtd.size =
|
||||
min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
|
||||
EFX_EEPROM_BOOTCONFIG_START;
|
||||
efx_mtd->part[0].mtd.erasesize = spi->erase_size;
|
||||
efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
|
||||
efx_mtd->part[0].type_name = "sfc_bootconfig";
|
||||
|
||||
rc = efx_mtd_probe_device(efx, efx_mtd);
|
||||
if (rc) {
|
||||
kfree(efx_mtd);
|
||||
return rc;
|
||||
}
|
||||
if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
|
||||
parts[n_parts].falcon.spi = spi;
|
||||
parts[n_parts].falcon.offset = FALCON_EEPROM_BOOTCONFIG_START;
|
||||
parts[n_parts].dev_type_name = "EEPROM";
|
||||
parts[n_parts].type_name = "sfc_bootconfig";
|
||||
parts[n_parts].mtd.type = MTD_RAM;
|
||||
parts[n_parts].mtd.flags = MTD_CAP_RAM;
|
||||
parts[n_parts].mtd.size =
|
||||
min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
|
||||
FALCON_EEPROM_BOOTCONFIG_START;
|
||||
parts[n_parts].mtd.erasesize = spi->erase_size;
|
||||
n_parts++;
|
||||
}
|
||||
|
||||
rc = efx_mtd_add(efx, parts, n_parts);
|
||||
if (rc)
|
||||
kfree(parts);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -461,8 +426,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
|
|||
size_t len, size_t *retlen, u8 *buffer)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_mtd *efx_mtd = mtd->priv;
|
||||
struct efx_nic *efx = efx_mtd->efx;
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
loff_t offset = start;
|
||||
loff_t end = min_t(loff_t, start + len, mtd->size);
|
||||
size_t chunk;
|
||||
|
@ -485,8 +449,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
|
|||
static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_mtd *efx_mtd = mtd->priv;
|
||||
struct efx_nic *efx = efx_mtd->efx;
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
|
||||
loff_t end = min_t(loff_t, start + len, mtd->size);
|
||||
size_t chunk = part->mtd.erasesize;
|
||||
|
@ -517,8 +480,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
|
|||
size_t len, size_t *retlen, const u8 *buffer)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_mtd *efx_mtd = mtd->priv;
|
||||
struct efx_nic *efx = efx_mtd->efx;
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
loff_t offset = start;
|
||||
loff_t end = min_t(loff_t, start + len, mtd->size);
|
||||
size_t chunk;
|
||||
|
@ -548,8 +510,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
|
|||
static int siena_mtd_sync(struct mtd_info *mtd)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_mtd *efx_mtd = mtd->priv;
|
||||
struct efx_nic *efx = efx_mtd->efx;
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
int rc = 0;
|
||||
|
||||
if (part->mcdi.updating) {
|
||||
|
@ -589,11 +550,9 @@ static const struct siena_nvram_type_info siena_nvram_types[] = {
|
|||
};
|
||||
|
||||
static int siena_mtd_probe_partition(struct efx_nic *efx,
|
||||
struct efx_mtd *efx_mtd,
|
||||
unsigned int part_id,
|
||||
struct efx_mtd_partition *part,
|
||||
unsigned int type)
|
||||
{
|
||||
struct efx_mtd_partition *part = &efx_mtd->part[part_id];
|
||||
const struct siena_nvram_type_info *info;
|
||||
size_t size, erase_size;
|
||||
bool protected;
|
||||
|
@ -615,6 +574,7 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
|
|||
return -ENODEV; /* hide it */
|
||||
|
||||
part->mcdi.nvram_type = type;
|
||||
part->dev_type_name = "Siena NVRAM manager";
|
||||
part->type_name = info->name;
|
||||
|
||||
part->mtd.type = MTD_NORFLASH;
|
||||
|
@ -626,55 +586,54 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
|
|||
}
|
||||
|
||||
static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
|
||||
struct efx_mtd *efx_mtd)
|
||||
struct efx_mtd_partition *parts,
|
||||
size_t n_parts)
|
||||
{
|
||||
struct efx_mtd_partition *part;
|
||||
uint16_t fw_subtype_list[
|
||||
MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
|
||||
size_t i;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
efx_for_each_partition(part, efx_mtd)
|
||||
part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
|
||||
for (i = 0; i < n_parts; i++)
|
||||
parts[i].mcdi.fw_subtype =
|
||||
fw_subtype_list[parts[i].mcdi.nvram_type];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int siena_mtd_probe(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mtd *efx_mtd;
|
||||
int rc = -ENODEV;
|
||||
struct efx_mtd_partition *parts;
|
||||
u32 nvram_types;
|
||||
unsigned int type;
|
||||
size_t n_parts;
|
||||
int rc;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
efx->mtd_ops = &siena_mtd_ops;
|
||||
|
||||
rc = efx_mcdi_nvram_types(efx, &nvram_types);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
efx_mtd = kzalloc(sizeof(*efx_mtd) +
|
||||
hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
|
||||
GFP_KERNEL);
|
||||
if (!efx_mtd)
|
||||
parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
|
||||
if (!parts)
|
||||
return -ENOMEM;
|
||||
|
||||
efx_mtd->name = "Siena NVRAM manager";
|
||||
|
||||
efx_mtd->ops = &siena_mtd_ops;
|
||||
|
||||
type = 0;
|
||||
efx_mtd->n_parts = 0;
|
||||
n_parts = 0;
|
||||
|
||||
while (nvram_types != 0) {
|
||||
if (nvram_types & 1) {
|
||||
rc = siena_mtd_probe_partition(efx, efx_mtd,
|
||||
efx_mtd->n_parts, type);
|
||||
rc = siena_mtd_probe_partition(efx, &parts[n_parts],
|
||||
type);
|
||||
if (rc == 0)
|
||||
efx_mtd->n_parts++;
|
||||
n_parts++;
|
||||
else if (rc != -ENODEV)
|
||||
goto fail;
|
||||
}
|
||||
|
@ -682,14 +641,14 @@ static int siena_mtd_probe(struct efx_nic *efx)
|
|||
nvram_types >>= 1;
|
||||
}
|
||||
|
||||
rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
|
||||
rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = efx_mtd_probe_device(efx, efx_mtd);
|
||||
rc = efx_mtd_add(efx, parts, n_parts);
|
||||
fail:
|
||||
if (rc)
|
||||
kfree(efx_mtd);
|
||||
kfree(parts);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
|
||||
#include "enum.h"
|
||||
#include "bitfield.h"
|
||||
#include "filter.h"
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
|
@ -356,6 +357,7 @@ enum efx_rx_alloc_method {
|
|||
* @efx: Associated Efx NIC
|
||||
* @channel: Channel instance number
|
||||
* @type: Channel type definition
|
||||
* @eventq_init: Event queue initialised flag
|
||||
* @enabled: Channel enabled indicator
|
||||
* @irq: IRQ number (MSI and MSI-X only)
|
||||
* @irq_moderation: IRQ moderation value (in hardware ticks)
|
||||
|
@ -387,6 +389,7 @@ struct efx_channel {
|
|||
struct efx_nic *efx;
|
||||
int channel;
|
||||
const struct efx_channel_type *type;
|
||||
bool eventq_init;
|
||||
bool enabled;
|
||||
int irq;
|
||||
unsigned int irq_moderation;
|
||||
|
@ -674,7 +677,6 @@ union efx_multicast_hash {
|
|||
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
|
||||
};
|
||||
|
||||
struct efx_filter_state;
|
||||
struct efx_vf;
|
||||
struct vfdi_status;
|
||||
|
||||
|
@ -751,8 +753,10 @@ struct vfdi_status;
|
|||
* @link_advertising: Autonegotiation advertising flags
|
||||
* @link_state: Current state of the link
|
||||
* @n_link_state_changes: Number of times the link has changed state
|
||||
* @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
|
||||
* @multicast_hash: Multicast hash table
|
||||
* @unicast_filter: Flag for Falcon-arch simple unicast filter.
|
||||
* Protected by @mac_lock.
|
||||
* @multicast_hash: Multicast hash table for Falcon-arch.
|
||||
* Protected by @mac_lock.
|
||||
* @wanted_fc: Wanted flow control flags
|
||||
* @fc_disable: When non-zero flow control is disabled. Typically used to
|
||||
* ensure that network back pressure doesn't delay dma queue flushes.
|
||||
|
@ -761,6 +765,11 @@ struct vfdi_status;
|
|||
* @loopback_mode: Loopback status
|
||||
* @loopback_modes: Supported loopback mode bitmask
|
||||
* @loopback_selftest: Offline self-test private state
|
||||
* @filter_lock: Filter table lock
|
||||
* @filter_state: Architecture-dependent filter table state
|
||||
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
|
||||
* indexed by filter ID
|
||||
* @rps_expire_index: Next index to check for expiry in @rps_flow_id
|
||||
* @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
|
||||
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
|
||||
* Decremented when the efx_flush_rx_queue() is called.
|
||||
|
@ -832,6 +841,8 @@ struct efx_nic {
|
|||
unsigned rx_dc_base;
|
||||
unsigned sram_lim_qw;
|
||||
unsigned next_buffer_table;
|
||||
|
||||
unsigned int max_channels;
|
||||
unsigned n_channels;
|
||||
unsigned n_rx_channels;
|
||||
unsigned rss_spread;
|
||||
|
@ -857,6 +868,7 @@ struct efx_nic {
|
|||
struct delayed_work selftest_work;
|
||||
|
||||
#ifdef CONFIG_SFC_MTD
|
||||
const struct efx_mtd_ops *mtd_ops;
|
||||
struct list_head mtd_list;
|
||||
#endif
|
||||
|
||||
|
@ -883,7 +895,7 @@ struct efx_nic {
|
|||
struct efx_link_state link_state;
|
||||
unsigned int n_link_state_changes;
|
||||
|
||||
bool promiscuous;
|
||||
bool unicast_filter;
|
||||
union efx_multicast_hash multicast_hash;
|
||||
u8 wanted_fc;
|
||||
unsigned fc_disable;
|
||||
|
@ -894,7 +906,12 @@ struct efx_nic {
|
|||
|
||||
void *loopback_selftest;
|
||||
|
||||
struct efx_filter_state *filter_state;
|
||||
spinlock_t filter_lock;
|
||||
void *filter_state;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
u32 *rps_flow_id;
|
||||
unsigned int rps_expire_index;
|
||||
#endif
|
||||
|
||||
atomic_t drain_pending;
|
||||
atomic_t rxq_flush_pending;
|
||||
|
@ -939,6 +956,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
|
|||
|
||||
/**
|
||||
* struct efx_nic_type - Efx device type definition
|
||||
* @mem_map_size: Get memory BAR mapped size
|
||||
* @probe: Probe the controller
|
||||
* @remove: Free resources allocated by probe()
|
||||
* @init: Initialise the controller
|
||||
|
@ -1011,8 +1029,25 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
|
|||
* @ev_process: Process events for a queue, up to the given NAPI quota
|
||||
* @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
|
||||
* @ev_test_generate: Generate a test event
|
||||
* @filter_table_probe: Probe filter capabilities and set up filter software state
|
||||
* @filter_table_restore: Restore filters removed from hardware
|
||||
* @filter_table_remove: Remove filters from hardware and tear down software state
|
||||
* @filter_update_rx_scatter: Update filters after change to rx scatter setting
|
||||
* @filter_insert: add or replace a filter
|
||||
* @filter_remove_safe: remove a filter by ID, carefully
|
||||
* @filter_get_safe: retrieve a filter by ID, carefully
|
||||
* @filter_clear_rx: remove RX filters by priority
|
||||
* @filter_count_rx_used: Get the number of filters in use at a given priority
|
||||
* @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
|
||||
* @filter_get_rx_ids: Get list of RX filters at a given priority
|
||||
* @filter_rfs_insert: Add or replace a filter for RFS. This must be
|
||||
* atomic. The hardware change may be asynchronous but should
|
||||
* not be delayed for long. It may fail if this can't be done
|
||||
* atomically.
|
||||
* @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
|
||||
* This must check whether the specified table entry is used by RFS
|
||||
* and that rps_may_expire_flow() returns true for it.
|
||||
* @revision: Hardware architecture revision
|
||||
* @mem_map_size: Memory BAR mapped size
|
||||
* @txd_ptr_tbl_base: TX descriptor ring base address
|
||||
* @rxd_ptr_tbl_base: RX descriptor ring base address
|
||||
* @buf_tbl_base: Buffer table base address
|
||||
|
@ -1024,14 +1059,13 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
|
|||
* @can_rx_scatter: NIC is able to scatter packet to multiple buffers
|
||||
* @max_interrupt_mode: Highest capability interrupt mode supported
|
||||
* from &enum efx_init_mode.
|
||||
* @phys_addr_channels: Number of channels with physically addressed
|
||||
* descriptors
|
||||
* @timer_period_max: Maximum period of interrupt timer (in ticks)
|
||||
* @offload_features: net_device feature flags for protocol offload
|
||||
* features implemented in hardware
|
||||
* @mcdi_max_ver: Maximum MCDI version supported
|
||||
*/
|
||||
struct efx_nic_type {
|
||||
unsigned int (*mem_map_size)(struct efx_nic *efx);
|
||||
int (*probe)(struct efx_nic *efx);
|
||||
void (*remove)(struct efx_nic *efx);
|
||||
int (*init)(struct efx_nic *efx);
|
||||
|
@ -1090,9 +1124,34 @@ struct efx_nic_type {
|
|||
int (*ev_process)(struct efx_channel *channel, int quota);
|
||||
void (*ev_read_ack)(struct efx_channel *channel);
|
||||
void (*ev_test_generate)(struct efx_channel *channel);
|
||||
int (*filter_table_probe)(struct efx_nic *efx);
|
||||
void (*filter_table_restore)(struct efx_nic *efx);
|
||||
void (*filter_table_remove)(struct efx_nic *efx);
|
||||
void (*filter_update_rx_scatter)(struct efx_nic *efx);
|
||||
s32 (*filter_insert)(struct efx_nic *efx,
|
||||
struct efx_filter_spec *spec, bool replace);
|
||||
int (*filter_remove_safe)(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id);
|
||||
int (*filter_get_safe)(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id, struct efx_filter_spec *);
|
||||
void (*filter_clear_rx)(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
u32 (*filter_count_rx_used)(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
|
||||
s32 (*filter_get_rx_ids)(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 *buf, u32 size);
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
s32 (*filter_rfs_insert)(struct efx_nic *efx,
|
||||
struct efx_filter_spec *spec);
|
||||
bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
|
||||
unsigned int index);
|
||||
#endif
|
||||
|
||||
int revision;
|
||||
unsigned int mem_map_size;
|
||||
unsigned int txd_ptr_tbl_base;
|
||||
unsigned int rxd_ptr_tbl_base;
|
||||
unsigned int buf_tbl_base;
|
||||
|
@ -1103,10 +1162,10 @@ struct efx_nic_type {
|
|||
unsigned int rx_buffer_padding;
|
||||
bool can_rx_scatter;
|
||||
unsigned int max_interrupt_mode;
|
||||
unsigned int phys_addr_channels;
|
||||
unsigned int timer_period_max;
|
||||
netdev_features_t offload_features;
|
||||
int mcdi_max_ver;
|
||||
unsigned int max_rx_ip_filters;
|
||||
};
|
||||
|
||||
/**************************************************************************
|
||||
|
|
|
@ -184,8 +184,8 @@ struct falcon_nic_data {
|
|||
bool stats_pending;
|
||||
struct timer_list stats_timer;
|
||||
u32 *stats_dma_done;
|
||||
struct efx_spi_device spi_flash;
|
||||
struct efx_spi_device spi_eeprom;
|
||||
struct falcon_spi_device spi_flash;
|
||||
struct falcon_spi_device spi_eeprom;
|
||||
struct mutex spi_lock;
|
||||
struct mutex mdio_lock;
|
||||
bool xmac_poll_required;
|
||||
|
@ -404,6 +404,35 @@ extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
|
|||
extern void efx_farch_ev_read_ack(struct efx_channel *channel);
|
||||
extern void efx_farch_ev_test_generate(struct efx_channel *channel);
|
||||
|
||||
/* Falcon/Siena filter operations */
|
||||
extern int efx_farch_filter_table_probe(struct efx_nic *efx);
|
||||
extern void efx_farch_filter_table_restore(struct efx_nic *efx);
|
||||
extern void efx_farch_filter_table_remove(struct efx_nic *efx);
|
||||
extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
|
||||
extern s32 efx_farch_filter_insert(struct efx_nic *efx,
|
||||
struct efx_filter_spec *spec, bool replace);
|
||||
extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id);
|
||||
extern int efx_farch_filter_get_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id, struct efx_filter_spec *);
|
||||
extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
|
||||
extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 *buf, u32 size);
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
|
||||
struct efx_filter_spec *spec);
|
||||
extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
|
||||
unsigned int index);
|
||||
#endif
|
||||
extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
|
||||
|
||||
extern bool efx_nic_event_present(struct efx_channel *channel);
|
||||
|
||||
/* Some statistics are computed as A - B where A and B each increase
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <net/checksum.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "filter.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
#include "workarounds.h"
|
||||
|
@ -802,3 +803,96 @@ module_param(rx_refill_threshold, uint, 0444);
|
|||
MODULE_PARM_DESC(rx_refill_threshold,
|
||||
"RX descriptor ring refill threshold (%)");
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
|
||||
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
||||
u16 rxq_index, u32 flow_id)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct efx_channel *channel;
|
||||
struct efx_filter_spec spec;
|
||||
const struct iphdr *ip;
|
||||
const __be16 *ports;
|
||||
int nhoff;
|
||||
int rc;
|
||||
|
||||
nhoff = skb_network_offset(skb);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_8021Q)) {
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) <
|
||||
nhoff + sizeof(struct vlan_hdr));
|
||||
if (((const struct vlan_hdr *)skb->data + nhoff)->
|
||||
h_vlan_encapsulated_proto != htons(ETH_P_IP))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
/* This is IP over 802.1q VLAN. We can't filter on the
|
||||
* IP 5-tuple and the vlan together, so just strip the
|
||||
* vlan header and filter on the IP part.
|
||||
*/
|
||||
nhoff += sizeof(struct vlan_hdr);
|
||||
} else if (skb->protocol != htons(ETH_P_IP)) {
|
||||
return -EPROTONOSUPPORT;
|
||||
}
|
||||
|
||||
/* RFS must validate the IP header length before calling us */
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
|
||||
ip = (const struct iphdr *)(skb->data + nhoff);
|
||||
if (ip_is_fragment(ip))
|
||||
return -EPROTONOSUPPORT;
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
|
||||
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
|
||||
|
||||
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
|
||||
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
|
||||
rxq_index);
|
||||
rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
|
||||
ip->daddr, ports[1], ip->saddr, ports[0]);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = efx->type->filter_rfs_insert(efx, &spec);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Remember this so we can check whether to expire the filter later */
|
||||
efx->rps_flow_id[rc] = flow_id;
|
||||
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
|
||||
++channel->rfs_filters_added;
|
||||
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
|
||||
(ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
|
||||
&ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
|
||||
rxq_index, flow_id, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
|
||||
{
|
||||
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
|
||||
unsigned int index, size;
|
||||
u32 flow_id;
|
||||
|
||||
if (!spin_trylock_bh(&efx->filter_lock))
|
||||
return false;
|
||||
|
||||
expire_one = efx->type->filter_rfs_expire_one;
|
||||
index = efx->rps_expire_index;
|
||||
size = efx->type->max_rx_ip_filters;
|
||||
while (quota--) {
|
||||
flow_id = efx->rps_flow_id[index];
|
||||
if (expire_one(efx, flow_id, index))
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"expired filter %d [flow %u]\n",
|
||||
index, flow_id);
|
||||
if (++index == size)
|
||||
index = 0;
|
||||
}
|
||||
efx->rps_expire_index = index;
|
||||
|
||||
spin_unlock_bh(&efx->filter_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_RFS_ACCEL */
|
||||
|
|
|
@ -187,6 +187,12 @@ static void siena_dimension_resources(struct efx_nic *efx)
|
|||
efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
|
||||
}
|
||||
|
||||
static unsigned int siena_mem_map_size(struct efx_nic *efx)
|
||||
{
|
||||
return FR_CZ_MC_TREG_SMEM +
|
||||
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS;
|
||||
}
|
||||
|
||||
static int siena_probe_nic(struct efx_nic *efx)
|
||||
{
|
||||
struct siena_nic_data *nic_data;
|
||||
|
@ -207,6 +213,8 @@ static int siena_probe_nic(struct efx_nic *efx)
|
|||
goto fail1;
|
||||
}
|
||||
|
||||
efx->max_channels = EFX_MAX_CHANNELS;
|
||||
|
||||
efx_reado(efx, ®, FR_AZ_CS_DEBUG);
|
||||
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
|
||||
|
||||
|
@ -495,6 +503,8 @@ static int siena_mac_reconfigure(struct efx_nic *efx)
|
|||
MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST +
|
||||
sizeof(efx->multicast_hash));
|
||||
|
||||
efx_farch_filter_sync_rx_mode(efx);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&efx->mac_lock));
|
||||
|
||||
rc = efx_mcdi_set_mac(efx);
|
||||
|
@ -670,6 +680,7 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx)
|
|||
*/
|
||||
|
||||
const struct efx_nic_type siena_a0_nic_type = {
|
||||
.mem_map_size = siena_mem_map_size,
|
||||
.probe = siena_probe_nic,
|
||||
.remove = siena_remove_nic,
|
||||
.init = siena_init_nic,
|
||||
|
@ -727,10 +738,23 @@ const struct efx_nic_type siena_a0_nic_type = {
|
|||
.ev_process = efx_farch_ev_process,
|
||||
.ev_read_ack = efx_farch_ev_read_ack,
|
||||
.ev_test_generate = efx_farch_ev_test_generate,
|
||||
.filter_table_probe = efx_farch_filter_table_probe,
|
||||
.filter_table_restore = efx_farch_filter_table_restore,
|
||||
.filter_table_remove = efx_farch_filter_table_remove,
|
||||
.filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
|
||||
.filter_insert = efx_farch_filter_insert,
|
||||
.filter_remove_safe = efx_farch_filter_remove_safe,
|
||||
.filter_get_safe = efx_farch_filter_get_safe,
|
||||
.filter_clear_rx = efx_farch_filter_clear_rx,
|
||||
.filter_count_rx_used = efx_farch_filter_count_rx_used,
|
||||
.filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
|
||||
.filter_get_rx_ids = efx_farch_filter_get_rx_ids,
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
.filter_rfs_insert = efx_farch_filter_rfs_insert,
|
||||
.filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
|
||||
#endif
|
||||
|
||||
.revision = EFX_REV_SIENA_A0,
|
||||
.mem_map_size = (FR_CZ_MC_TREG_SMEM +
|
||||
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
|
||||
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
|
||||
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
|
||||
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
|
||||
|
@ -741,11 +765,9 @@ const struct efx_nic_type siena_a0_nic_type = {
|
|||
.rx_buffer_padding = 0,
|
||||
.can_rx_scatter = true,
|
||||
.max_interrupt_mode = EFX_INT_MODE_MSIX,
|
||||
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
|
||||
* interrupt handler only supports 32
|
||||
* channels */
|
||||
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
|
||||
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_RXHASH | NETIF_F_NTUPLE),
|
||||
.mcdi_max_ver = 1,
|
||||
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
|
||||
};
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
|
||||
|
||||
/**
|
||||
* struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
|
||||
* struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
|
||||
* @device_id: Controller's id for the device
|
||||
* @size: Size (in bytes)
|
||||
* @addr_len: Number of address bytes in read/write commands
|
||||
|
@ -51,7 +51,7 @@
|
|||
* @block_size: Write block size (in bytes).
|
||||
* Write commands are limited to blocks with this size and alignment.
|
||||
*/
|
||||
struct efx_spi_device {
|
||||
struct falcon_spi_device {
|
||||
int device_id;
|
||||
unsigned int size;
|
||||
unsigned int addr_len;
|
||||
|
@ -61,21 +61,21 @@ struct efx_spi_device {
|
|||
unsigned int block_size;
|
||||
};
|
||||
|
||||
static inline bool efx_spi_present(const struct efx_spi_device *spi)
|
||||
static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
|
||||
{
|
||||
return spi->size != 0;
|
||||
}
|
||||
|
||||
int falcon_spi_cmd(struct efx_nic *efx,
|
||||
const struct efx_spi_device *spi, unsigned int command,
|
||||
const struct falcon_spi_device *spi, unsigned int command,
|
||||
int address, const void *in, void *out, size_t len);
|
||||
int falcon_spi_wait_write(struct efx_nic *efx,
|
||||
const struct efx_spi_device *spi);
|
||||
const struct falcon_spi_device *spi);
|
||||
int falcon_spi_read(struct efx_nic *efx,
|
||||
const struct efx_spi_device *spi, loff_t start,
|
||||
const struct falcon_spi_device *spi, loff_t start,
|
||||
size_t len, size_t *retlen, u8 *buffer);
|
||||
int falcon_spi_write(struct efx_nic *efx,
|
||||
const struct efx_spi_device *spi, loff_t start,
|
||||
const struct falcon_spi_device *spi, loff_t start,
|
||||
size_t len, size_t *retlen, const u8 *buffer);
|
||||
|
||||
/*
|
||||
|
@ -93,7 +93,7 @@ int falcon_spi_write(struct efx_nic *efx,
|
|||
*/
|
||||
#define FALCON_NVCONFIG_END 0x400U
|
||||
#define FALCON_FLASH_BOOTCODE_START 0x8000U
|
||||
#define EFX_EEPROM_BOOTCONFIG_START 0x800U
|
||||
#define EFX_EEPROM_BOOTCONFIG_END 0x1800U
|
||||
#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
|
||||
#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
|
||||
|
||||
#endif /* EFX_SPI_H */
|
||||
|
|
|
@ -15,25 +15,15 @@
|
|||
* Bug numbers are from Solarflare's Bugzilla.
|
||||
*/
|
||||
|
||||
#define EFX_WORKAROUND_ALWAYS(efx) 1
|
||||
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
|
||||
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
|
||||
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
|
||||
#define EFX_WORKAROUND_10G(efx) 1
|
||||
|
||||
/* XAUI resets if link not detected */
|
||||
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
|
||||
/* RX PCIe double split performance issue */
|
||||
#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
|
||||
/* Bit-bashed I2C reads cause performance drop */
|
||||
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
|
||||
/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
|
||||
* or a PCIe error (bug 11028) */
|
||||
#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
|
||||
/* Truncated IPv4 packets can confuse the TX packet parser */
|
||||
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
|
||||
/* Legacy ISR read can return zero once */
|
||||
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
|
||||
/* Legacy interrupt storm when interrupt fifo fills */
|
||||
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
|
||||
|
||||
|
|
Loading…
Reference in a new issue