]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
virtio-net: fix page miscount during XDP linearizing
authorJason Wang <jasowang@redhat.com>
Fri, 23 Dec 2016 14:37:26 +0000 (22:37 +0800)
committerDavid S. Miller <davem@davemloft.net>
Fri, 23 Dec 2016 18:48:54 +0000 (13:48 -0500)
We don't put page during linearizing, the would cause leaking when
xmit through XDP_TX or the packet exceeds PAGE_SIZE. Fix them by
put page accordingly. Also decrease the number of buffers during
linearizing to make sure caller can free buffers correctly when packet
exceeds PAGE_SIZE. With this patch, we won't get OOM after linearize
huge number of packets.

Cc: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Acked-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/virtio_net.c

index fe4562d395e3f09a33d1bdfc7beb175680453732..58ad40e17a74fb455dd956ab06e83601c8c2310e 100644 (file)
@@ -483,7 +483,7 @@ xdp_xmit:
  * anymore.
  */
 static struct page *xdp_linearize_page(struct receive_queue *rq,
-                                      u16 num_buf,
+                                      u16 *num_buf,
                                       struct page *p,
                                       int offset,
                                       unsigned int *len)
@@ -497,7 +497,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
        memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
        page_off += *len;
 
-       while (--num_buf) {
+       while (--*num_buf) {
                unsigned int buflen;
                unsigned long ctx;
                void *buf;
@@ -507,19 +507,22 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
                if (unlikely(!ctx))
                        goto err_buf;
 
+               buf = mergeable_ctx_to_buf_address(ctx);
+               p = virt_to_head_page(buf);
+               off = buf - page_address(p);
+
                /* guard against a misconfigured or uncooperative backend that
                 * is sending packet larger than the MTU.
                 */
-               if ((page_off + buflen) > PAGE_SIZE)
+               if ((page_off + buflen) > PAGE_SIZE) {
+                       put_page(p);
                        goto err_buf;
-
-               buf = mergeable_ctx_to_buf_address(ctx);
-               p = virt_to_head_page(buf);
-               off = buf - page_address(p);
+               }
 
                memcpy(page_address(page) + page_off,
                       page_address(p) + off, buflen);
                page_off += buflen;
+               put_page(p);
        }
 
        *len = page_off;
@@ -555,7 +558,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                /* This happens when rx buffer size is underestimated */
                if (unlikely(num_buf > 1)) {
                        /* linearize data for XDP */
-                       xdp_page = xdp_linearize_page(rq, num_buf,
+                       xdp_page = xdp_linearize_page(rq, &num_buf,
                                                      page, offset, &len);
                        if (!xdp_page)
                                goto err_xdp;