udp: enable busy polling for all sockets
UDP busy polling is restricted to connected UDP sockets. This is because sk_busy_loop() only takes care of one NAPI context. There are cases where it could be extended. 1) Some hosts receive traffic on a single NIC, with one RX queue. 2) Some applications use SO_REUSEPORT and associated BPF filter to split the incoming traffic on one UDP socket per RX queue/thread/cpu 3) Some UDP sockets are used to send/receive traffic for one flow, but they do not bother with connect() This patch records the napi_id of first received skb, giving more reach to busy polling. Tested: lpaa23:~# echo 70 >/proc/sys/net/core/busy_read lpaa24:~# echo 70 >/proc/sys/net/core/busy_read lpaa23:~# for f in `seq 1 10`; do ./super_netperf 1 -H lpaa24 -t UDP_RR -l 5; done Before patch : 27867 28870 37324 41060 41215 36764 36838 44455 41282 43843 After patch : 73920 73213 70147 74845 71697 68315 68028 75219 70082 73707 Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
fcd2b0da73
commit
e68b6e50fa
3 changed files with 23 additions and 9 deletions
|
@ -80,11 +80,6 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
|
|||
skb->napi_id = napi->napi_id;
|
||||
}
|
||||
|
||||
/* used in the protocol hanlder to propagate the napi_id to the socket */
|
||||
static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
sk->sk_napi_id = skb->napi_id;
|
||||
}
|
||||
|
||||
#else /* CONFIG_NET_RX_BUSY_POLL */
|
||||
static inline unsigned long net_busy_loop_on(void)
|
||||
|
@ -107,10 +102,6 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool busy_loop_timeout(unsigned long end_time)
|
||||
{
|
||||
return true;
|
||||
|
@ -122,4 +113,23 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
|
|||
}
|
||||
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
|
||||
/* used in the protocol hanlder to propagate the napi_id to the socket */
|
||||
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
sk->sk_napi_id = skb->napi_id;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* variant used for unconnected sockets */
|
||||
static inline void sk_mark_napi_id_once(struct sock *sk,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
if (!sk->sk_napi_id)
|
||||
sk->sk_napi_id = skb->napi_id;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* _LINUX_NET_BUSY_POLL_H */
|
||||
|
|
|
@ -1569,6 +1569,8 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
sock_rps_save_rxhash(sk, skb);
|
||||
sk_mark_napi_id(sk, skb);
|
||||
sk_incoming_cpu_update(sk);
|
||||
} else {
|
||||
sk_mark_napi_id_once(sk, skb);
|
||||
}
|
||||
|
||||
rc = __udp_enqueue_schedule_skb(sk, skb);
|
||||
|
|
|
@ -519,6 +519,8 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
sock_rps_save_rxhash(sk, skb);
|
||||
sk_mark_napi_id(sk, skb);
|
||||
sk_incoming_cpu_update(sk);
|
||||
} else {
|
||||
sk_mark_napi_id_once(sk, skb);
|
||||
}
|
||||
|
||||
rc = __udp_enqueue_schedule_skb(sk, skb);
|
||||
|
|
Loading…
Reference in a new issue