sfc: Track RPS flow IDs per channel instead of per function
Otherwise we get confused when two flows on different channels get the same flow ID. Signed-off-by: Edward Cree <ecree@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d69d169493
commit
faf8dcc12c
3 changed files with 56 additions and 17 deletions
|
@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
|
|||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (efx->type->offload_features & NETIF_F_NTUPLE) {
|
||||
efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
|
||||
sizeof(*efx->rps_flow_id),
|
||||
GFP_KERNEL);
|
||||
if (!efx->rps_flow_id) {
|
||||
struct efx_channel *channel;
|
||||
int i, success = 1;
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
channel->rps_flow_id =
|
||||
kcalloc(efx->type->max_rx_ip_filters,
|
||||
sizeof(*channel->rps_flow_id),
|
||||
GFP_KERNEL);
|
||||
if (!channel->rps_flow_id)
|
||||
success = 0;
|
||||
else
|
||||
for (i = 0;
|
||||
i < efx->type->max_rx_ip_filters;
|
||||
++i)
|
||||
channel->rps_flow_id[i] =
|
||||
RPS_FLOW_ID_INVALID;
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
efx_for_each_channel(channel, efx)
|
||||
kfree(channel->rps_flow_id);
|
||||
efx->type->filter_table_remove(efx);
|
||||
rc = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
efx->rps_expire_index = efx->rps_expire_channel = 0;
|
||||
}
|
||||
#endif
|
||||
out_unlock:
|
||||
|
@ -1744,7 +1763,10 @@ static int efx_probe_filters(struct efx_nic *efx)
|
|||
static void efx_remove_filters(struct efx_nic *efx)
|
||||
{
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
kfree(efx->rps_flow_id);
|
||||
struct efx_channel *channel;
|
||||
|
||||
efx_for_each_channel(channel, efx)
|
||||
kfree(channel->rps_flow_id);
|
||||
#endif
|
||||
down_write(&efx->filter_sem);
|
||||
efx->type->filter_table_remove(efx);
|
||||
|
|
|
@ -403,6 +403,8 @@ enum efx_sync_events_state {
|
|||
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
|
||||
* @irq_count: Number of IRQs since last adaptive moderation decision
|
||||
* @irq_mod_score: IRQ moderation score
|
||||
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
|
||||
* indexed by filter ID
|
||||
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
|
||||
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
|
||||
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
|
||||
|
@ -446,6 +448,8 @@ struct efx_channel {
|
|||
unsigned int irq_mod_score;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
unsigned int rfs_filters_added;
|
||||
#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
|
||||
u32 *rps_flow_id;
|
||||
#endif
|
||||
|
||||
unsigned n_rx_tobe_disc;
|
||||
|
@ -889,9 +893,9 @@ struct vfdi_status;
|
|||
* @filter_sem: Filter table rw_semaphore, for freeing the table
|
||||
* @filter_lock: Filter table lock, for mere content changes
|
||||
* @filter_state: Architecture-dependent filter table state
|
||||
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
|
||||
* indexed by filter ID
|
||||
* @rps_expire_index: Next index to check for expiry in @rps_flow_id
|
||||
* @rps_expire_channel: Next channel to check for expiry
|
||||
* @rps_expire_index: Next index to check for expiry in
|
||||
* @rps_expire_channel's @rps_flow_id
|
||||
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
|
||||
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
|
||||
* Decremented when the efx_flush_rx_queue() is called.
|
||||
|
@ -1035,7 +1039,7 @@ struct efx_nic {
|
|||
spinlock_t filter_lock;
|
||||
void *filter_state;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
u32 *rps_flow_id;
|
||||
unsigned int rps_expire_channel;
|
||||
unsigned int rps_expire_index;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -845,6 +845,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|||
struct flow_keys fk;
|
||||
int rc;
|
||||
|
||||
if (flow_id == RPS_FLOW_ID_INVALID)
|
||||
return -EINVAL;
|
||||
|
||||
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
|
@ -879,8 +882,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|||
return rc;
|
||||
|
||||
/* Remember this so we can check whether to expire the filter later */
|
||||
efx->rps_flow_id[rc] = flow_id;
|
||||
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
|
||||
channel = efx_get_channel(efx, rxq_index);
|
||||
channel->rps_flow_id[rc] = flow_id;
|
||||
++channel->rfs_filters_added;
|
||||
|
||||
if (spec.ether_type == htons(ETH_P_IP))
|
||||
|
@ -902,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|||
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
|
||||
{
|
||||
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
|
||||
unsigned int index, size;
|
||||
unsigned int channel_idx, index, size;
|
||||
u32 flow_id;
|
||||
|
||||
if (!spin_trylock_bh(&efx->filter_lock))
|
||||
return false;
|
||||
|
||||
expire_one = efx->type->filter_rfs_expire_one;
|
||||
channel_idx = efx->rps_expire_channel;
|
||||
index = efx->rps_expire_index;
|
||||
size = efx->type->max_rx_ip_filters;
|
||||
while (quota--) {
|
||||
flow_id = efx->rps_flow_id[index];
|
||||
if (expire_one(efx, flow_id, index))
|
||||
struct efx_channel *channel = efx_get_channel(efx, channel_idx);
|
||||
flow_id = channel->rps_flow_id[index];
|
||||
|
||||
if (flow_id != RPS_FLOW_ID_INVALID &&
|
||||
expire_one(efx, flow_id, index)) {
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"expired filter %d [flow %u]\n",
|
||||
index, flow_id);
|
||||
if (++index == size)
|
||||
"expired filter %d [queue %u flow %u]\n",
|
||||
index, channel_idx, flow_id);
|
||||
channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
|
||||
}
|
||||
if (++index == size) {
|
||||
if (++channel_idx == efx->n_channels)
|
||||
channel_idx = 0;
|
||||
index = 0;
|
||||
}
|
||||
}
|
||||
efx->rps_expire_channel = channel_idx;
|
||||
efx->rps_expire_index = index;
|
||||
|
||||
spin_unlock_bh(&efx->filter_lock);
|
||||
|
|
Loading…
Reference in a new issue