I/OAT: Only offload copies for TCP when there will be a context switch
The performance wins come with having the DMA copy engine doing the copies in parallel with the context switch. If there is enough data ready on the socket at recv time just use a regular copy. Signed-off-by: Chris Leech <christopher.leech@intel.com>
This commit is contained in:
parent
72d0b7a81d
commit
2b1244a43b
1 changed files with 7 additions and 3 deletions
|
@ -1116,6 +1116,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
long timeo;
|
||||
struct task_struct *user_recv = NULL;
|
||||
int copied_early = 0;
|
||||
int available = 0;
|
||||
struct sk_buff *skb;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
|
@ -1142,7 +1144,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
#ifdef CONFIG_NET_DMA
|
||||
tp->ucopy.dma_chan = NULL;
|
||||
preempt_disable();
|
||||
if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
|
||||
skb = skb_peek_tail(&sk->sk_receive_queue);
|
||||
if (skb)
|
||||
available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
|
||||
if ((available < target) &&
|
||||
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
|
||||
!sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
|
||||
preempt_enable_no_resched();
|
||||
tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
|
||||
|
@ -1151,7 +1157,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
#endif
|
||||
|
||||
do {
|
||||
struct sk_buff *skb;
|
||||
u32 offset;
|
||||
|
||||
/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
|
||||
|
@ -1439,7 +1444,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
if (tp->ucopy.dma_chan) {
|
||||
struct sk_buff *skb;
|
||||
dma_cookie_t done, used;
|
||||
|
||||
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
|
||||
|
|
Loading…
Reference in a new issue