nfp: add a separate counter for packets with CHECKSUM_COMPLETE

We are currently counting packets with CHECKSUM_COMPLETE as
"hw_rx_csum_ok".  This is confusing.  Add a new counter.
To make sure it fits in the same cacheline move the less used
error counter to a different location.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jakub Kicinski 2018-04-02 13:31:20 -07:00 committed by David S. Miller
parent b714295abc
commit 0df57e604c
3 changed files with 13 additions and 9 deletions

View file

@ -391,6 +391,7 @@ struct nfp_net_rx_ring {
* @rx_drops: Number of packets dropped on RX due to lack of resources
* @hw_csum_rx_ok: Counter of packets where the HW checksum was OK
* @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
* @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported
* @hw_csum_rx_error: Counter of packets with bad checksums
* @tx_sync: Seqlock for atomic updates of TX stats
* @tx_pkts: Number of Transmitted packets
@ -434,7 +435,7 @@ struct nfp_net_r_vector {
u64 rx_drops;
u64 hw_csum_rx_ok;
u64 hw_csum_rx_inner_ok;
u64 hw_csum_rx_error;
u64 hw_csum_rx_complete;
struct nfp_net_tx_ring *xdp_ring;
@ -446,6 +447,7 @@ struct nfp_net_r_vector {
u64 tx_gather;
u64 tx_lso;
u64 hw_csum_rx_error;
u64 rx_replace_buf_alloc_fail;
u64 tx_errors;
u64 tx_busy;

View file

@ -1406,7 +1406,7 @@ static void nfp_net_rx_csum(struct nfp_net_dp *dp,
skb->ip_summed = meta->csum_type;
skb->csum = meta->csum;
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->hw_csum_rx_ok++;
r_vec->hw_csum_rx_complete++;
u64_stats_update_end(&r_vec->rx_sync);
return;
}

View file

@ -179,7 +179,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
#define NN_RVEC_GATHER_STATS 8
#define NN_RVEC_GATHER_STATS 9
#define NN_RVEC_PER_Q_STATS 3
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
@ -468,6 +468,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_ok");
data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
data = nfp_pr_et(data, "hw_rx_csum_complete");
data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
data = nfp_pr_et(data, "hw_tx_csum");
@ -493,18 +494,19 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[4] = nn->r_vecs[i].hw_csum_tx;
tmp[5] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[6] = nn->r_vecs[i].tx_gather;
tmp[7] = nn->r_vecs[i].tx_lso;
tmp[5] = nn->r_vecs[i].hw_csum_tx;
tmp[6] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[7] = nn->r_vecs[i].tx_gather;
tmp[8] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;