net: add a core netdev->rx_dropped counter
In various situations, a device provides a packet to our stack and we
drop it before it enters protocol stack :
- softnet backlog full (accounted in /proc/net/softnet_stat)
- bad vlan tag (not accounted)
- unknown/unregistered protocol (not accounted)
We can handle a per-device counter of such dropped frames at core level,
and automatically adds it to the device provided stats (rx_dropped), so
that standard tools can be used (ifconfig, ip link, cat /proc/net/dev)
This is a generalization of commit 8990f468a
(net: rx_dropped
accounting), thus reverting it.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
a00eac0c45
commit
caf586e5f2
11 changed files with 26 additions and 34 deletions
|
@ -64,7 +64,6 @@ struct pcpu_lstats {
|
|||
u64 packets;
|
||||
u64 bytes;
|
||||
struct u64_stats_sync syncp;
|
||||
unsigned long drops;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -90,8 +89,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
|
|||
lb_stats->bytes += len;
|
||||
lb_stats->packets++;
|
||||
u64_stats_update_end(&lb_stats->syncp);
|
||||
} else
|
||||
lb_stats->drops++;
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -101,7 +99,6 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
|
|||
{
|
||||
u64 bytes = 0;
|
||||
u64 packets = 0;
|
||||
u64 drops = 0;
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
|
@ -115,14 +112,11 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
|
|||
tbytes = lb_stats->bytes;
|
||||
tpackets = lb_stats->packets;
|
||||
} while (u64_stats_fetch_retry(&lb_stats->syncp, start));
|
||||
drops += lb_stats->drops;
|
||||
bytes += tbytes;
|
||||
packets += tpackets;
|
||||
}
|
||||
stats->rx_packets = packets;
|
||||
stats->tx_packets = packets;
|
||||
stats->rx_dropped = drops;
|
||||
stats->rx_errors = drops;
|
||||
stats->rx_bytes = bytes;
|
||||
stats->tx_bytes = bytes;
|
||||
return stats;
|
||||
|
|
|
@ -884,6 +884,9 @@ struct net_device {
|
|||
int iflink;
|
||||
|
||||
struct net_device_stats stats;
|
||||
atomic_long_t rx_dropped; /* dropped packets by core network
|
||||
* Do not use this in drivers.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_WIRELESS_EXT
|
||||
/* List of functions to handle Wireless Extensions (instead of ioctl).
|
||||
|
|
|
@ -25,7 +25,6 @@ struct vlan_priority_tci_mapping {
|
|||
* @rx_multicast: number of received multicast packets
|
||||
* @syncp: synchronization point for 64bit counters
|
||||
* @rx_errors: number of errors
|
||||
* @rx_dropped: number of dropped packets
|
||||
*/
|
||||
struct vlan_rx_stats {
|
||||
u64 rx_packets;
|
||||
|
@ -33,7 +32,6 @@ struct vlan_rx_stats {
|
|||
u64 rx_multicast;
|
||||
struct u64_stats_sync syncp;
|
||||
unsigned long rx_errors;
|
||||
unsigned long rx_dropped;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -33,6 +33,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
|
|||
return polling ? netif_receive_skb(skb) : netif_rx(skb);
|
||||
|
||||
drop:
|
||||
atomic_long_inc(&skb->dev->rx_dropped);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
@ -123,6 +124,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
|
|||
return dev_gro_receive(napi, skb);
|
||||
|
||||
drop:
|
||||
atomic_long_inc(&skb->dev->rx_dropped);
|
||||
return GRO_DROP;
|
||||
}
|
||||
|
||||
|
|
|
@ -225,16 +225,15 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
if (unlikely(netif_rx(skb) == NET_RX_DROP)) {
|
||||
if (rx_stats)
|
||||
rx_stats->rx_dropped++;
|
||||
}
|
||||
netif_rx(skb);
|
||||
|
||||
rcu_read_unlock();
|
||||
return NET_RX_SUCCESS;
|
||||
|
||||
err_unlock:
|
||||
rcu_read_unlock();
|
||||
err_free:
|
||||
atomic_long_inc(&dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
@ -846,15 +845,13 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
|
|||
accum.rx_packets += rxpackets;
|
||||
accum.rx_bytes += rxbytes;
|
||||
accum.rx_multicast += rxmulticast;
|
||||
/* rx_errors, rx_dropped are ulong, not protected by syncp */
|
||||
/* rx_errors is ulong, not protected by syncp */
|
||||
accum.rx_errors += p->rx_errors;
|
||||
accum.rx_dropped += p->rx_dropped;
|
||||
}
|
||||
stats->rx_packets = accum.rx_packets;
|
||||
stats->rx_bytes = accum.rx_bytes;
|
||||
stats->rx_errors = accum.rx_errors;
|
||||
stats->multicast = accum.rx_multicast;
|
||||
stats->rx_dropped = accum.rx_dropped;
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
|
|
|
@ -1483,8 +1483,9 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
|
|||
skb_orphan(skb);
|
||||
nf_reset(skb);
|
||||
|
||||
if (!(dev->flags & IFF_UP) ||
|
||||
(skb->len > (dev->mtu + dev->hard_header_len))) {
|
||||
if (unlikely(!(dev->flags & IFF_UP) ||
|
||||
(skb->len > (dev->mtu + dev->hard_header_len)))) {
|
||||
atomic_long_inc(&dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
@ -2548,6 +2549,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
|||
|
||||
local_irq_restore(flags);
|
||||
|
||||
atomic_long_inc(&skb->dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
@ -2995,6 +2997,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
|
|||
if (pt_prev) {
|
||||
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
|
||||
} else {
|
||||
atomic_long_inc(&skb->dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
/* Jamal, now you will not able to escape explaining
|
||||
* me how you were going to use this. :-)
|
||||
|
@ -5429,14 +5432,14 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
|
|||
|
||||
if (ops->ndo_get_stats64) {
|
||||
memset(storage, 0, sizeof(*storage));
|
||||
return ops->ndo_get_stats64(dev, storage);
|
||||
}
|
||||
if (ops->ndo_get_stats) {
|
||||
ops->ndo_get_stats64(dev, storage);
|
||||
} else if (ops->ndo_get_stats) {
|
||||
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
|
||||
return storage;
|
||||
} else {
|
||||
netdev_stats_to_stats64(storage, &dev->stats);
|
||||
dev_txq_stats_fold(dev, storage);
|
||||
}
|
||||
netdev_stats_to_stats64(storage, &dev->stats);
|
||||
dev_txq_stats_fold(dev, storage);
|
||||
storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
|
||||
return storage;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_get_stats);
|
||||
|
|
|
@ -679,8 +679,7 @@ static int ipgre_rcv(struct sk_buff *skb)
|
|||
skb_reset_network_header(skb);
|
||||
ipgre_ecn_decapsulate(iph, skb);
|
||||
|
||||
if (netif_rx(skb) == NET_RX_DROP)
|
||||
tunnel->dev->stats.rx_dropped++;
|
||||
netif_rx(skb);
|
||||
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
|
|
|
@ -414,8 +414,7 @@ static int ipip_rcv(struct sk_buff *skb)
|
|||
|
||||
ipip_ecn_decapsulate(iph, skb);
|
||||
|
||||
if (netif_rx(skb) == NET_RX_DROP)
|
||||
tunnel->dev->stats.rx_dropped++;
|
||||
netif_rx(skb);
|
||||
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
|
|
|
@ -768,8 +768,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
|
|||
|
||||
dscp_ecn_decapsulate(t, ipv6h, skb);
|
||||
|
||||
if (netif_rx(skb) == NET_RX_DROP)
|
||||
t->dev->stats.rx_dropped++;
|
||||
netif_rx(skb);
|
||||
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
|
|
|
@ -666,8 +666,7 @@ static int pim6_rcv(struct sk_buff *skb)
|
|||
|
||||
skb_tunnel_rx(skb, reg_dev);
|
||||
|
||||
if (netif_rx(skb) == NET_RX_DROP)
|
||||
reg_dev->stats.rx_dropped++;
|
||||
netif_rx(skb);
|
||||
|
||||
dev_put(reg_dev);
|
||||
return 0;
|
||||
|
|
|
@ -600,8 +600,7 @@ static int ipip6_rcv(struct sk_buff *skb)
|
|||
|
||||
ipip6_ecn_decapsulate(iph, skb);
|
||||
|
||||
if (netif_rx(skb) == NET_RX_DROP)
|
||||
tunnel->dev->stats.rx_dropped++;
|
||||
netif_rx(skb);
|
||||
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
|
|
Loading…
Reference in a new issue