net: speedup udp receive path
Since commit 95766fff
([UDP]: Add memory accounting.),
each received packet needs one extra sock_lock()/sock_release() pair.
This added latency because of possible backlog handling. Then later,
ticket spinlocks added yet another latency source in case of DDOS.
This patch introduces lock_sock_bh() and unlock_sock_bh()
synchronization primitives, avoiding one atomic operation and backlog
processing.
skb_free_datagram_locked() uses them instead of full blown
lock_sock()/release_sock(). skb is orphaned inside locked section for
proper socket memory reclaim, and finally freed outside of it.
UDP receive path now take the socket spinlock only once.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
cfc1fbb079
commit
4b0b72f7dd
4 changed files with 25 additions and 11 deletions
|
@ -1021,6 +1021,16 @@ extern void release_sock(struct sock *sk);
|
|||
SINGLE_DEPTH_NESTING)
|
||||
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
|
||||
|
||||
static inline void lock_sock_bh(struct sock *sk)
|
||||
{
|
||||
spin_lock_bh(&sk->sk_lock.slock);
|
||||
}
|
||||
|
||||
static inline void unlock_sock_bh(struct sock *sk)
|
||||
{
|
||||
spin_unlock_bh(&sk->sk_lock.slock);
|
||||
}
|
||||
|
||||
extern struct sock *sk_alloc(struct net *net, int family,
|
||||
gfp_t priority,
|
||||
struct proto *prot);
|
||||
|
|
|
@ -229,9 +229,13 @@ EXPORT_SYMBOL(skb_free_datagram);
|
|||
|
||||
void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
lock_sock(sk);
|
||||
skb_free_datagram(sk, skb);
|
||||
release_sock(sk);
|
||||
lock_sock_bh(sk);
|
||||
skb_orphan(skb);
|
||||
sk_mem_reclaim_partial(sk);
|
||||
unlock_sock_bh(sk);
|
||||
|
||||
/* skb is now orphaned, might be freed outside of locked section */
|
||||
consume_skb(skb);
|
||||
}
|
||||
EXPORT_SYMBOL(skb_free_datagram_locked);
|
||||
|
||||
|
|
|
@ -1062,10 +1062,10 @@ static unsigned int first_packet_length(struct sock *sk)
|
|||
spin_unlock_bh(&rcvq->lock);
|
||||
|
||||
if (!skb_queue_empty(&list_kill)) {
|
||||
lock_sock(sk);
|
||||
lock_sock_bh(sk);
|
||||
__skb_queue_purge(&list_kill);
|
||||
sk_mem_reclaim_partial(sk);
|
||||
release_sock(sk);
|
||||
unlock_sock_bh(sk);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -1196,10 +1196,10 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
return err;
|
||||
|
||||
csum_copy_err:
|
||||
lock_sock(sk);
|
||||
lock_sock_bh(sk);
|
||||
if (!skb_kill_datagram(sk, skb, flags))
|
||||
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
release_sock(sk);
|
||||
unlock_sock_bh(sk);
|
||||
|
||||
if (noblock)
|
||||
return -EAGAIN;
|
||||
|
@ -1624,9 +1624,9 @@ int udp_rcv(struct sk_buff *skb)
|
|||
|
||||
void udp_destroy_sock(struct sock *sk)
|
||||
{
|
||||
lock_sock(sk);
|
||||
lock_sock_bh(sk);
|
||||
udp_flush_pending_frames(sk);
|
||||
release_sock(sk);
|
||||
unlock_sock_bh(sk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -424,7 +424,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
|||
return err;
|
||||
|
||||
csum_copy_err:
|
||||
lock_sock(sk);
|
||||
lock_sock_bh(sk);
|
||||
if (!skb_kill_datagram(sk, skb, flags)) {
|
||||
if (is_udp4)
|
||||
UDP_INC_STATS_USER(sock_net(sk),
|
||||
|
@ -433,7 +433,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
|||
UDP6_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_INERRORS, is_udplite);
|
||||
}
|
||||
release_sock(sk);
|
||||
unlock_sock_bh(sk);
|
||||
|
||||
if (flags & MSG_DONTWAIT)
|
||||
return -EAGAIN;
|
||||
|
|
Loading…
Reference in a new issue