udp: Switch to ip_finish_skb

This patch converts UDP to use the new ip_finish_skb API.  This
would then allows us to more easily use ip_make_skb which allows
UDP to run without a socket lock.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Herbert Xu 2011-03-01 02:36:48 +00:00 committed by David S. Miller
parent 1c32c5ad6f
commit f6b9664f8b
3 changed files with 99 additions and 59 deletions

View file

@ -144,6 +144,17 @@ static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
return csum; return csum;
} }
static inline __wsum udp_csum(struct sk_buff *skb)
{
__wsum csum = csum_partial(skb_transport_header(skb),
sizeof(struct udphdr), skb->csum);
for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
csum = csum_add(csum, skb->csum);
}
return csum;
}
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
static inline void udp_lib_hash(struct sock *sk) static inline void udp_lib_hash(struct sock *sk)
{ {

View file

@ -115,6 +115,18 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
return csum; return csum;
} }
static inline __wsum udplite_csum(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
const int off = skb_transport_offset(skb);
const int len = skb->len - off;
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
return skb_checksum(skb, off, min(cscov, len), 0);
}
extern void udplite4_register(void); extern void udplite4_register(void);
extern int udplite_get_port(struct sock *sk, unsigned short snum, extern int udplite_get_port(struct sock *sk, unsigned short snum,
int (*scmp)(const struct sock *, const struct sock *)); int (*scmp)(const struct sock *, const struct sock *));

View file

@ -663,46 +663,106 @@ void udp_flush_pending_frames(struct sock *sk)
EXPORT_SYMBOL(udp_flush_pending_frames); EXPORT_SYMBOL(udp_flush_pending_frames);
/** /**
* udp4_hwcsum_outgoing - handle outgoing HW checksumming * udp4_hwcsum - handle outgoing HW checksumming
* @sk: socket we are sending on
* @skb: sk_buff containing the filled-in UDP header * @skb: sk_buff containing the filled-in UDP header
* (checksum field must be zeroed out) * (checksum field must be zeroed out)
* @src: source IP address
* @dst: destination IP address
*/ */
static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
__be32 src, __be32 dst, int len)
{ {
unsigned int offset;
struct udphdr *uh = udp_hdr(skb); struct udphdr *uh = udp_hdr(skb);
struct sk_buff *frags = skb_shinfo(skb)->frag_list;
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
int hlen = len;
__wsum csum = 0; __wsum csum = 0;
if (skb_queue_len(&sk->sk_write_queue) == 1) { if (!frags) {
/* /*
* Only one fragment on the socket. * Only one fragment on the socket.
*/ */
skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check); skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); uh->check = ~csum_tcpudp_magic(src, dst, len,
IPPROTO_UDP, 0);
} else { } else {
/* /*
* HW-checksum won't work as there are two or more * HW-checksum won't work as there are two or more
* fragments on the socket so that all csums of sk_buffs * fragments on the socket so that all csums of sk_buffs
* should be together * should be together
*/ */
offset = skb_transport_offset(skb); do {
skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); csum = csum_add(csum, frags->csum);
hlen -= frags->len;
} while ((frags = frags->next));
csum = skb_checksum(skb, offset, hlen, csum);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
skb_queue_walk(&sk->sk_write_queue, skb) {
csum = csum_add(csum, skb->csum);
}
uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
if (uh->check == 0) if (uh->check == 0)
uh->check = CSUM_MANGLED_0; uh->check = CSUM_MANGLED_0;
} }
} }
static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct udphdr *uh;
struct rtable *rt = (struct rtable *)skb_dst(skb);
int err = 0;
int is_udplite = IS_UDPLITE(sk);
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
__wsum csum = 0;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = inet->inet_sport;
uh->dest = dport;
uh->len = htons(len);
uh->check = 0;
if (is_udplite) /* UDP-Lite */
csum = udplite_csum(skb);
else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
skb->ip_summed = CHECKSUM_NONE;
goto send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp4_hwcsum(skb, rt->rt_src, daddr);
goto send;
} else
csum = udp_csum(skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_tcpudp_magic(rt->rt_src, daddr, len,
sk->sk_protocol, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip_send_skb(skb);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
return err;
}
/* /*
* Push out all pending data as one UDP datagram. Socket is locked. * Push out all pending data as one UDP datagram. Socket is locked.
*/ */
@ -712,57 +772,14 @@ static int udp_push_pending_frames(struct sock *sk)
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct flowi *fl = &inet->cork.fl; struct flowi *fl = &inet->cork.fl;
struct sk_buff *skb; struct sk_buff *skb;
struct udphdr *uh;
int err = 0; int err = 0;
int is_udplite = IS_UDPLITE(sk);
__wsum csum = 0;
/* Grab the skbuff where UDP header space exists. */ skb = ip_finish_skb(sk);
if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) if (!skb)
goto out; goto out;
/* err = udp_send_skb(skb, fl->fl4_dst, fl->fl_ip_dport);
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = fl->fl_ip_sport;
uh->dest = fl->fl_ip_dport;
uh->len = htons(up->len);
uh->check = 0;
if (is_udplite) /* UDP-Lite */
csum = udplite_csum_outgoing(sk, skb);
else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
skb->ip_summed = CHECKSUM_NONE;
goto send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len);
goto send;
} else /* `normal' UDP */
csum = udp_csum_outgoing(sk, skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
sk->sk_protocol, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip_push_pending_frames(sk);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
out: out:
up->len = 0; up->len = 0;
up->pending = 0; up->pending = 0;