sfc: Cleanup RX event processing
Make efx_process_channel() and falcon_process_eventq() return the number of packets received rather than updating the quota, consistent with new NAPI. Since channels and RX queues are mapped one-to-one, remove return value from falcon_handle_rx_event() and add a warning for events with the wrong RX queue number. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
This commit is contained in:
parent
d307402534
commit
42cbe2d73c
3 changed files with 23 additions and 36 deletions
|
@ -160,14 +160,16 @@ static void efx_fini_channels(struct efx_nic *efx);
|
||||||
*/
|
*/
|
||||||
static int efx_process_channel(struct efx_channel *channel, int rx_quota)
|
static int efx_process_channel(struct efx_channel *channel, int rx_quota)
|
||||||
{
|
{
|
||||||
int rxdmaqs;
|
struct efx_nic *efx = channel->efx;
|
||||||
struct efx_rx_queue *rx_queue;
|
int rx_packets;
|
||||||
|
|
||||||
if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
|
if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
|
||||||
!channel->enabled))
|
!channel->enabled))
|
||||||
return rx_quota;
|
return 0;
|
||||||
|
|
||||||
rxdmaqs = falcon_process_eventq(channel, &rx_quota);
|
rx_packets = falcon_process_eventq(channel, rx_quota);
|
||||||
|
if (rx_packets == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* Deliver last RX packet. */
|
/* Deliver last RX packet. */
|
||||||
if (channel->rx_pkt) {
|
if (channel->rx_pkt) {
|
||||||
|
@ -179,16 +181,9 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
|
||||||
efx_flush_lro(channel);
|
efx_flush_lro(channel);
|
||||||
efx_rx_strategy(channel);
|
efx_rx_strategy(channel);
|
||||||
|
|
||||||
/* Refill descriptor rings as necessary */
|
efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
|
||||||
rx_queue = &channel->efx->rx_queue[0];
|
|
||||||
while (rxdmaqs) {
|
|
||||||
if (rxdmaqs & 0x01)
|
|
||||||
efx_fast_push_rx_descriptors(rx_queue);
|
|
||||||
rx_queue++;
|
|
||||||
rxdmaqs >>= 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return rx_quota;
|
return rx_packets;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark channel as finished processing
|
/* Mark channel as finished processing
|
||||||
|
@ -218,14 +213,12 @@ static int efx_poll(struct napi_struct *napi, int budget)
|
||||||
struct efx_channel *channel =
|
struct efx_channel *channel =
|
||||||
container_of(napi, struct efx_channel, napi_str);
|
container_of(napi, struct efx_channel, napi_str);
|
||||||
struct net_device *napi_dev = channel->napi_dev;
|
struct net_device *napi_dev = channel->napi_dev;
|
||||||
int unused;
|
|
||||||
int rx_packets;
|
int rx_packets;
|
||||||
|
|
||||||
EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
|
EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
|
||||||
channel->channel, raw_smp_processor_id());
|
channel->channel, raw_smp_processor_id());
|
||||||
|
|
||||||
unused = efx_process_channel(channel, budget);
|
rx_packets = efx_process_channel(channel, budget);
|
||||||
rx_packets = (budget - unused);
|
|
||||||
|
|
||||||
if (rx_packets < budget) {
|
if (rx_packets < budget) {
|
||||||
/* There is no race here; although napi_disable() will
|
/* There is no race here; although napi_disable() will
|
||||||
|
|
|
@ -952,10 +952,10 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
|
||||||
* Also "is multicast" and "matches multicast filter" flags can be used to
|
* Also "is multicast" and "matches multicast filter" flags can be used to
|
||||||
* discard non-matching multicast packets.
|
* discard non-matching multicast packets.
|
||||||
*/
|
*/
|
||||||
static int falcon_handle_rx_event(struct efx_channel *channel,
|
static void falcon_handle_rx_event(struct efx_channel *channel,
|
||||||
const efx_qword_t *event)
|
const efx_qword_t *event)
|
||||||
{
|
{
|
||||||
unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt;
|
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
|
||||||
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
|
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
|
||||||
unsigned expected_ptr;
|
unsigned expected_ptr;
|
||||||
bool rx_ev_pkt_ok, discard = false, checksummed;
|
bool rx_ev_pkt_ok, discard = false, checksummed;
|
||||||
|
@ -968,16 +968,14 @@ static int falcon_handle_rx_event(struct efx_channel *channel,
|
||||||
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
|
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
|
||||||
WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
|
WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
|
||||||
WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
|
WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
|
||||||
|
WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
|
||||||
|
|
||||||
rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL);
|
rx_queue = &efx->rx_queue[channel->channel];
|
||||||
rx_queue = &efx->rx_queue[rx_ev_q_label];
|
|
||||||
|
|
||||||
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
|
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
|
||||||
expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
|
expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
|
||||||
if (unlikely(rx_ev_desc_ptr != expected_ptr)) {
|
if (unlikely(rx_ev_desc_ptr != expected_ptr))
|
||||||
falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
|
falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
|
||||||
return rx_ev_q_label;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (likely(rx_ev_pkt_ok)) {
|
if (likely(rx_ev_pkt_ok)) {
|
||||||
/* If packet is marked as OK and packet type is TCP/IPv4 or
|
/* If packet is marked as OK and packet type is TCP/IPv4 or
|
||||||
|
@ -1003,8 +1001,6 @@ static int falcon_handle_rx_event(struct efx_channel *channel,
|
||||||
/* Handle received packet */
|
/* Handle received packet */
|
||||||
efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
|
efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
|
||||||
checksummed, discard);
|
checksummed, discard);
|
||||||
|
|
||||||
return rx_ev_q_label;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Global events are basically PHY events */
|
/* Global events are basically PHY events */
|
||||||
|
@ -1109,13 +1105,12 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
|
int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
|
||||||
{
|
{
|
||||||
unsigned int read_ptr;
|
unsigned int read_ptr;
|
||||||
efx_qword_t event, *p_event;
|
efx_qword_t event, *p_event;
|
||||||
int ev_code;
|
int ev_code;
|
||||||
int rxq;
|
int rx_packets = 0;
|
||||||
int rxdmaqs = 0;
|
|
||||||
|
|
||||||
read_ptr = channel->eventq_read_ptr;
|
read_ptr = channel->eventq_read_ptr;
|
||||||
|
|
||||||
|
@ -1137,9 +1132,8 @@ int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
|
||||||
|
|
||||||
switch (ev_code) {
|
switch (ev_code) {
|
||||||
case RX_IP_EV_DECODE:
|
case RX_IP_EV_DECODE:
|
||||||
rxq = falcon_handle_rx_event(channel, &event);
|
falcon_handle_rx_event(channel, &event);
|
||||||
rxdmaqs |= (1 << rxq);
|
++rx_packets;
|
||||||
(*rx_quota)--;
|
|
||||||
break;
|
break;
|
||||||
case TX_IP_EV_DECODE:
|
case TX_IP_EV_DECODE:
|
||||||
falcon_handle_tx_event(channel, &event);
|
falcon_handle_tx_event(channel, &event);
|
||||||
|
@ -1166,10 +1160,10 @@ int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
|
||||||
/* Increment read pointer */
|
/* Increment read pointer */
|
||||||
read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
|
read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
|
||||||
|
|
||||||
} while (*rx_quota);
|
} while (rx_packets < rx_quota);
|
||||||
|
|
||||||
channel->eventq_read_ptr = read_ptr;
|
channel->eventq_read_ptr = read_ptr;
|
||||||
return rxdmaqs;
|
return rx_packets;
|
||||||
}
|
}
|
||||||
|
|
||||||
void falcon_set_int_moderation(struct efx_channel *channel)
|
void falcon_set_int_moderation(struct efx_channel *channel)
|
||||||
|
|
|
@ -57,7 +57,7 @@ extern int falcon_probe_eventq(struct efx_channel *channel);
|
||||||
extern int falcon_init_eventq(struct efx_channel *channel);
|
extern int falcon_init_eventq(struct efx_channel *channel);
|
||||||
extern void falcon_fini_eventq(struct efx_channel *channel);
|
extern void falcon_fini_eventq(struct efx_channel *channel);
|
||||||
extern void falcon_remove_eventq(struct efx_channel *channel);
|
extern void falcon_remove_eventq(struct efx_channel *channel);
|
||||||
extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota);
|
extern int falcon_process_eventq(struct efx_channel *channel, int rx_quota);
|
||||||
extern void falcon_eventq_read_ack(struct efx_channel *channel);
|
extern void falcon_eventq_read_ack(struct efx_channel *channel);
|
||||||
|
|
||||||
/* Ports */
|
/* Ports */
|
||||||
|
|
Loading…
Reference in a new issue