Commit 1a2449a8 authored by Chris Leech's avatar Chris Leech Committed by David S. Miller

[I/OAT]: TCP recv offload to I/OAT

Locks down user pages and sets up for DMA in tcp_recvmsg, then calls
dma_async_try_early_copy in tcp_v4_do_rcv
Signed-off-by: default avatarChris Leech <christopher.leech@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 95937825
...@@ -263,7 +263,7 @@ ...@@ -263,7 +263,7 @@
#include <net/tcp.h> #include <net/tcp.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/netdma.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/ioctls.h> #include <asm/ioctls.h>
...@@ -1110,6 +1110,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1110,6 +1110,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int target; /* Read at least this many bytes */ int target; /* Read at least this many bytes */
long timeo; long timeo;
struct task_struct *user_recv = NULL; struct task_struct *user_recv = NULL;
int copied_early = 0;
lock_sock(sk); lock_sock(sk);
...@@ -1133,6 +1134,17 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1133,6 +1134,17 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
#ifdef CONFIG_NET_DMA
tp->ucopy.dma_chan = NULL;
preempt_disable();
if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) {
preempt_enable_no_resched();
tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
} else
preempt_enable_no_resched();
#endif
do { do {
struct sk_buff *skb; struct sk_buff *skb;
u32 offset; u32 offset;
...@@ -1274,6 +1286,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1274,6 +1286,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
} else } else
sk_wait_data(sk, &timeo); sk_wait_data(sk, &timeo);
#ifdef CONFIG_NET_DMA
tp->ucopy.wakeup = 0;
#endif
if (user_recv) { if (user_recv) {
int chunk; int chunk;
...@@ -1329,13 +1345,39 @@ do_prequeue: ...@@ -1329,13 +1345,39 @@ do_prequeue:
} }
if (!(flags & MSG_TRUNC)) { if (!(flags & MSG_TRUNC)) {
err = skb_copy_datagram_iovec(skb, offset, #ifdef CONFIG_NET_DMA
msg->msg_iov, used); if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
if (err) { tp->ucopy.dma_chan = get_softnet_dma();
/* Exception. Bailout! */
if (!copied) if (tp->ucopy.dma_chan) {
copied = -EFAULT; tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
break; tp->ucopy.dma_chan, skb, offset,
msg->msg_iov, used,
tp->ucopy.pinned_list);
if (tp->ucopy.dma_cookie < 0) {
printk(KERN_ALERT "dma_cookie < 0\n");
/* Exception. Bailout! */
if (!copied)
copied = -EFAULT;
break;
}
if ((offset + used) == skb->len)
copied_early = 1;
} else
#endif
{
err = skb_copy_datagram_iovec(skb, offset,
msg->msg_iov, used);
if (err) {
/* Exception. Bailout! */
if (!copied)
copied = -EFAULT;
break;
}
} }
} }
...@@ -1355,15 +1397,19 @@ skip_copy: ...@@ -1355,15 +1397,19 @@ skip_copy:
if (skb->h.th->fin) if (skb->h.th->fin)
goto found_fin_ok; goto found_fin_ok;
if (!(flags & MSG_PEEK)) if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, 0); sk_eat_skb(sk, skb, copied_early);
copied_early = 0;
}
continue; continue;
found_fin_ok: found_fin_ok:
/* Process the FIN. */ /* Process the FIN. */
++*seq; ++*seq;
if (!(flags & MSG_PEEK)) if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, 0); sk_eat_skb(sk, skb, copied_early);
copied_early = 0;
}
break; break;
} while (len > 0); } while (len > 0);
...@@ -1386,6 +1432,36 @@ skip_copy: ...@@ -1386,6 +1432,36 @@ skip_copy:
tp->ucopy.len = 0; tp->ucopy.len = 0;
} }
#ifdef CONFIG_NET_DMA
if (tp->ucopy.dma_chan) {
struct sk_buff *skb;
dma_cookie_t done, used;
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
tp->ucopy.dma_cookie, &done,
&used) == DMA_IN_PROGRESS) {
/* do partial cleanup of sk_async_wait_queue */
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
(dma_async_is_complete(skb->dma_cookie, done,
used) == DMA_SUCCESS)) {
__skb_dequeue(&sk->sk_async_wait_queue);
kfree_skb(skb);
}
}
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
dma_chan_put(tp->ucopy.dma_chan);
tp->ucopy.dma_chan = NULL;
}
if (tp->ucopy.pinned_list) {
dma_unpin_iovec_pages(tp->ucopy.pinned_list);
tp->ucopy.pinned_list = NULL;
}
#endif
/* According to UNIX98, msg_name/msg_namelen are ignored /* According to UNIX98, msg_name/msg_namelen are ignored
* on connected socket. I was just happy when found this 8) --ANK * on connected socket. I was just happy when found this 8) --ANK
*/ */
...@@ -1658,6 +1734,9 @@ int tcp_disconnect(struct sock *sk, int flags) ...@@ -1658,6 +1734,9 @@ int tcp_disconnect(struct sock *sk, int flags)
__skb_queue_purge(&sk->sk_receive_queue); __skb_queue_purge(&sk->sk_receive_queue);
sk_stream_writequeue_purge(sk); sk_stream_writequeue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue); __skb_queue_purge(&tp->out_of_order_queue);
#ifdef CONFIG_NET_DMA
__skb_queue_purge(&sk->sk_async_wait_queue);
#endif
inet->dport = 0; inet->dport = 0;
......
...@@ -71,6 +71,7 @@ ...@@ -71,6 +71,7 @@
#include <net/inet_common.h> #include <net/inet_common.h>
#include <linux/ipsec.h> #include <linux/ipsec.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <net/netdma.h>
int sysctl_tcp_timestamps = 1; int sysctl_tcp_timestamps = 1;
int sysctl_tcp_window_scaling = 1; int sysctl_tcp_window_scaling = 1;
...@@ -3785,6 +3786,50 @@ static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *sk ...@@ -3785,6 +3786,50 @@ static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *sk
__tcp_checksum_complete_user(sk, skb); __tcp_checksum_complete_user(sk, skb);
} }
#ifdef CONFIG_NET_DMA
static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen)
{
struct tcp_sock *tp = tcp_sk(sk);
int chunk = skb->len - hlen;
int dma_cookie;
int copied_early = 0;
if (tp->ucopy.wakeup)
return 0;
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = get_softnet_dma();
if (tp->ucopy.dma_chan && skb->ip_summed == CHECKSUM_UNNECESSARY) {
dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list);
if (dma_cookie < 0)
goto out;
tp->ucopy.dma_cookie = dma_cookie;
copied_early = 1;
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
tcp_rcv_space_adjust(sk);
if ((tp->ucopy.len == 0) ||
(tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) ||
(atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
tp->ucopy.wakeup = 1;
sk->sk_data_ready(sk, 0);
}
} else if (chunk > 0) {
tp->ucopy.wakeup = 1;
sk->sk_data_ready(sk, 0);
}
out:
return copied_early;
}
#endif /* CONFIG_NET_DMA */
/* /*
* TCP receive function for the ESTABLISHED state. * TCP receive function for the ESTABLISHED state.
* *
...@@ -3901,14 +3946,23 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3901,14 +3946,23 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
} }
} else { } else {
int eaten = 0; int eaten = 0;
int copied_early = 0;
if (tp->ucopy.task == current && if (tp->copied_seq == tp->rcv_nxt &&
tp->copied_seq == tp->rcv_nxt && len - tcp_header_len <= tp->ucopy.len) {
len - tcp_header_len <= tp->ucopy.len && #ifdef CONFIG_NET_DMA
sock_owned_by_user(sk)) { if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
__set_current_state(TASK_RUNNING); copied_early = 1;
eaten = 1;
}
#endif
if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) {
__set_current_state(TASK_RUNNING);
if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
eaten = 1;
}
if (eaten) {
/* Predicted packet is in window by definition. /* Predicted packet is in window by definition.
* seq == rcv_nxt and rcv_wup <= rcv_nxt. * seq == rcv_nxt and rcv_wup <= rcv_nxt.
* Hence, check seq<=rcv_wup reduces to: * Hence, check seq<=rcv_wup reduces to:
...@@ -3924,8 +3978,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3924,8 +3978,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
__skb_pull(skb, tcp_header_len); __skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
eaten = 1;
} }
if (copied_early)
tcp_cleanup_rbuf(sk, skb->len);
} }
if (!eaten) { if (!eaten) {
if (tcp_checksum_complete_user(sk, skb)) if (tcp_checksum_complete_user(sk, skb))
...@@ -3966,6 +4021,11 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3966,6 +4021,11 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
__tcp_ack_snd_check(sk, 0); __tcp_ack_snd_check(sk, 0);
no_ack: no_ack:
#ifdef CONFIG_NET_DMA
if (copied_early)
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
else
#endif
if (eaten) if (eaten)
__kfree_skb(skb); __kfree_skb(skb);
else else
......
...@@ -71,6 +71,7 @@ ...@@ -71,6 +71,7 @@
#include <net/inet_common.h> #include <net/inet_common.h>
#include <net/timewait_sock.h> #include <net/timewait_sock.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/netdma.h>
#include <linux/inet.h> #include <linux/inet.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
...@@ -1091,8 +1092,18 @@ process: ...@@ -1091,8 +1092,18 @@ process:
bh_lock_sock(sk); bh_lock_sock(sk);
ret = 0; ret = 0;
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
if (!tcp_prequeue(sk, skb)) #ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = get_softnet_dma();
if (tp->ucopy.dma_chan)
ret = tcp_v4_do_rcv(sk, skb); ret = tcp_v4_do_rcv(sk, skb);
else
#endif
{
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
}
} else } else
sk_add_backlog(sk, skb); sk_add_backlog(sk, skb);
bh_unlock_sock(sk); bh_unlock_sock(sk);
...@@ -1296,6 +1307,11 @@ int tcp_v4_destroy_sock(struct sock *sk) ...@@ -1296,6 +1307,11 @@ int tcp_v4_destroy_sock(struct sock *sk)
/* Cleans up our, hopefully empty, out_of_order_queue. */ /* Cleans up our, hopefully empty, out_of_order_queue. */
__skb_queue_purge(&tp->out_of_order_queue); __skb_queue_purge(&tp->out_of_order_queue);
#ifdef CONFIG_NET_DMA
/* Cleans up our sk_async_wait_queue */
__skb_queue_purge(&sk->sk_async_wait_queue);
#endif
/* Clean prequeue, it must be empty really */ /* Clean prequeue, it must be empty really */
__skb_queue_purge(&tp->ucopy.prequeue); __skb_queue_purge(&tp->ucopy.prequeue);
......
...@@ -1218,8 +1218,16 @@ process: ...@@ -1218,8 +1218,16 @@ process:
bh_lock_sock(sk); bh_lock_sock(sk);
ret = 0; ret = 0;
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
if (!tcp_prequeue(sk, skb)) #ifdef CONFIG_NET_DMA
ret = tcp_v6_do_rcv(sk, skb); struct tcp_sock *tp = tcp_sk(sk);
if (tp->ucopy.dma_chan)
ret = tcp_v6_do_rcv(sk, skb);
else
#endif
{
if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb);
}
} else } else
sk_add_backlog(sk, skb); sk_add_backlog(sk, skb);
bh_unlock_sock(sk); bh_unlock_sock(sk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment