]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0008-vhost-fix-skb-leak-in-handle_rx.patch
KPTI: add follow-up fixes
[pve-kernel.git] / patches / kernel / 0008-vhost-fix-skb-leak-in-handle_rx.patch
CommitLineData
19894df4 1From 8ddb7f99e8c2ad80dbe3f9de01e8af5c310ae52d Mon Sep 17 00:00:00 2001
ddad99c9
FG
2From: Wei Xu <wexu@redhat.com>
3Date: Fri, 1 Dec 2017 05:10:36 -0500
e4cdf2a5 4Subject: [PATCH 008/241] vhost: fix skb leak in handle_rx()
ddad99c9
FG
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9Matthew found a roughly 40% tcp throughput regression with commit
10c67df11f(vhost_net: try batch dequing from skb array) as discussed
11in the following thread:
12https://www.mail-archive.com/netdev@vger.kernel.org/msg187936.html
13
14Eventually we figured out that it was a skb leak in handle_rx()
15when sending packets to the VM. This usually happens when a guest
16can not drain out vq as fast as vhost fills in, afterwards it sets
17off the traffic jam and leaks skb(s) which occurs as no headcount
18to send on the vq from vhost side.
19
20This can be avoided by making sure we have got enough headcount
21before actually consuming a skb from the batched rx array while
22transmitting, which is simply done by moving checking the zero
23headcount a bit ahead.
24
25Signed-off-by: Wei Xu <wexu@redhat.com>
26Reported-by: Matthew Rosato <mjrosato@linux.vnet.ibm.com>
27Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
28---
29 drivers/vhost/net.c | 20 ++++++++++----------
30 1 file changed, 10 insertions(+), 10 deletions(-)
31
32diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
33index 1c75572f5a3f..010253847022 100644
34--- a/drivers/vhost/net.c
35+++ b/drivers/vhost/net.c
36@@ -781,16 +781,6 @@ static void handle_rx(struct vhost_net *net)
37 /* On error, stop handling until the next kick. */
38 if (unlikely(headcount < 0))
39 goto out;
40- if (nvq->rx_array)
41- msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
42- /* On overrun, truncate and discard */
43- if (unlikely(headcount > UIO_MAXIOV)) {
44- iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
45- err = sock->ops->recvmsg(sock, &msg,
46- 1, MSG_DONTWAIT | MSG_TRUNC);
47- pr_debug("Discarded rx packet: len %zd\n", sock_len);
48- continue;
49- }
50 /* OK, now we need to know about added descriptors. */
51 if (!headcount) {
52 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
53@@ -803,6 +793,16 @@ static void handle_rx(struct vhost_net *net)
54 * they refilled. */
55 goto out;
56 }
57+ if (nvq->rx_array)
58+ msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
59+ /* On overrun, truncate and discard */
60+ if (unlikely(headcount > UIO_MAXIOV)) {
61+ iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
62+ err = sock->ops->recvmsg(sock, &msg,
63+ 1, MSG_DONTWAIT | MSG_TRUNC);
64+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
65+ continue;
66+ }
67 /* We don't need to be notified again. */
68 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
69 fixup = msg.msg_iter;
70--
712.14.2
72