]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/net/virtio_net.c
i40e: free the skb after clearing the bitlock
[mirror_ubuntu-bionic-kernel.git] / drivers / net / virtio_net.c
index 559b215c016967f2b6525e7f3bc4fca2a2ee9e90..c70441f45e086836f3f9a46c47441c35c2a0f539 100644 (file)
@@ -117,6 +117,17 @@ struct receive_queue {
        char name[40];
 };
 
+/* Control VQ buffers: protected by the rtnl lock */
+struct control_buf {
+       struct virtio_net_ctrl_hdr hdr;
+       virtio_net_ctrl_ack status;
+       struct virtio_net_ctrl_mq mq;
+       u8 promisc;
+       u8 allmulti;
+       __virtio16 vid;
+       u64 offloads;
+};
+
 struct virtnet_info {
        struct virtio_device *vdev;
        struct virtqueue *cvq;
@@ -165,14 +176,7 @@ struct virtnet_info {
        struct hlist_node node;
        struct hlist_node node_dead;
 
-       /* Control VQ buffers: protected by the rtnl lock */
-       struct virtio_net_ctrl_hdr ctrl_hdr;
-       virtio_net_ctrl_ack ctrl_status;
-       struct virtio_net_ctrl_mq ctrl_mq;
-       u8 ctrl_promisc;
-       u8 ctrl_allmulti;
-       u16 ctrl_vid;
-       u64 ctrl_offloads;
+       struct control_buf *ctrl;
 
        /* Ethtool settings */
        u8 duplex;
@@ -261,9 +265,12 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
        int opaque;
 
        opaque = virtqueue_enable_cb_prepare(vq);
-       if (napi_complete_done(napi, processed) &&
-           unlikely(virtqueue_poll(vq, opaque)))
-               virtqueue_napi_schedule(napi, vq);
+       if (napi_complete_done(napi, processed)) {
+               if (unlikely(virtqueue_poll(vq, opaque)))
+                       virtqueue_napi_schedule(napi, vq);
+       } else {
+               virtqueue_disable_cb(vq);
+       }
 }
 
 static void skb_xmit_done(struct virtqueue *vq)
@@ -413,12 +420,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
        sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
 
        err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
-       if (unlikely(err)) {
-               struct page *page = virt_to_head_page(xdp->data);
-
-               put_page(page);
-               return false;
-       }
+       if (unlikely(err))
+               return false; /* Caller handle free/refcnt */
 
        return true;
 }
@@ -516,8 +519,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
        unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        struct page *page = virt_to_head_page(buf);
-       unsigned int delta = 0, err;
+       unsigned int delta = 0;
        struct page *xdp_page;
+       bool sent;
+       int err;
+
        len -= vi->hdr_len;
 
        rcu_read_lock();
@@ -528,7 +534,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
                void *orig_data;
                u32 act;
 
-               if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
+               if (unlikely(hdr->hdr.gso_type))
                        goto err_xdp;
 
                if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
@@ -565,16 +571,19 @@ static struct sk_buff *receive_small(struct net_device *dev,
                        delta = orig_data - xdp.data;
                        break;
                case XDP_TX:
-                       if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
+                       sent = __virtnet_xdp_xmit(vi, &xdp);
+                       if (unlikely(!sent)) {
                                trace_xdp_exception(vi->dev, xdp_prog, act);
-                       else
-                               *xdp_xmit = true;
+                               goto err_xdp;
+                       }
+                       *xdp_xmit = true;
                        rcu_read_unlock();
                        goto xdp_xmit;
                case XDP_REDIRECT:
                        err = xdp_do_redirect(dev, &xdp, xdp_prog);
-                       if (!err)
-                               *xdp_xmit = true;
+                       if (err)
+                               goto err_xdp;
+                       *xdp_xmit = true;
                        rcu_read_unlock();
                        goto xdp_xmit;
                default:
@@ -647,6 +656,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
        unsigned int truesize;
        unsigned int headroom = mergeable_ctx_to_headroom(ctx);
        int err;
+       bool sent;
 
        head_skb = NULL;
 
@@ -658,6 +668,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                void *data;
                u32 act;
 
+               /* Transient failure which in theory could occur if
+                * in-flight packets from before XDP was enabled reach
+                * the receive path after XDP is loaded.
+                */
+               if (unlikely(hdr->hdr.gso_type))
+                       goto err_xdp;
+
                /* This happens when rx buffer size is underestimated */
                if (unlikely(num_buf > 1 ||
                             headroom < virtnet_get_headroom(vi))) {
@@ -673,14 +690,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                        xdp_page = page;
                }
 
-               /* Transient failure which in theory could occur if
-                * in-flight packets from before XDP was enabled reach
-                * the receive path after XDP is loaded. In practice I
-                * was not able to create this condition.
-                */
-               if (unlikely(hdr->hdr.gso_type))
-                       goto err_xdp;
-
                /* Allow consuming headroom but reserve enough space to push
                 * the descriptor on if we get an XDP_TX return code.
                 */
@@ -713,12 +722,16 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                        }
                        break;
                case XDP_TX:
-                       if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
+                       sent = __virtnet_xdp_xmit(vi, &xdp);
+                       if (unlikely(!sent)) {
                                trace_xdp_exception(vi->dev, xdp_prog, act);
-                       else
-                               *xdp_xmit = true;
-                       if (unlikely(xdp_page != page))
+                               if (unlikely(xdp_page != page))
+                                       put_page(xdp_page);
                                goto err_xdp;
+                       }
+                       *xdp_xmit = true;
+                       if (unlikely(xdp_page != page))
+                               put_page(page);
                        rcu_read_unlock();
                        goto xdp_xmit;
                case XDP_REDIRECT:
@@ -812,7 +825,7 @@ err_xdp:
        rcu_read_unlock();
 err_skb:
        put_page(page);
-       while (--num_buf) {
+       while (num_buf-- > 1) {
                buf = virtqueue_get_buf(rq->vq, &len);
                if (unlikely(!buf)) {
                        pr_debug("%s: rx error: %d buffers missing\n",
@@ -1202,7 +1215,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 {
        struct receive_queue *rq =
                container_of(napi, struct receive_queue, napi);
-       unsigned int received;
+       struct virtnet_info *vi = rq->vq->vdev->priv;
+       struct send_queue *sq;
+       unsigned int received, qp;
        bool xdp_xmit = false;
 
        virtnet_poll_cleantx(rq);
@@ -1213,8 +1228,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        if (received < budget)
                virtqueue_napi_complete(napi, rq->vq, received);
 
-       if (xdp_xmit)
+       if (xdp_xmit) {
+               qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
+                    smp_processor_id();
+               sq = &vi->sq[qp];
+               virtqueue_kick(sq->vq);
                xdp_do_flush_map();
+       }
 
        return received;
 }
@@ -1276,7 +1296,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
                hdr = skb_vnet_hdr(skb);
 
        if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
-                                   virtio_is_little_endian(vi->vdev), false))
+                                   virtio_is_little_endian(vi->vdev), false,
+                                   0))
                BUG();
 
        if (vi->mergeable_rx_bufs)
@@ -1382,25 +1403,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
        /* Caller should know better */
        BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
 
-       vi->ctrl_status = ~0;
-       vi->ctrl_hdr.class = class;
-       vi->ctrl_hdr.cmd = cmd;
+       vi->ctrl->status = ~0;
+       vi->ctrl->hdr.class = class;
+       vi->ctrl->hdr.cmd = cmd;
        /* Add header */
-       sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
+       sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
        sgs[out_num++] = &hdr;
 
        if (out)
                sgs[out_num++] = out;
 
        /* Add return status. */
-       sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
+       sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
        sgs[out_num] = &stat;
 
        BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
        virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
 
        if (unlikely(!virtqueue_kick(vi->cvq)))
-               return vi->ctrl_status == VIRTIO_NET_OK;
+               return vi->ctrl->status == VIRTIO_NET_OK;
 
        /* Spin for a response, the kick causes an ioport write, trapping
         * into the hypervisor, so the request should be handled immediately.
@@ -1409,7 +1430,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
               !virtqueue_is_broken(vi->cvq))
                cpu_relax();
 
-       return vi->ctrl_status == VIRTIO_NET_OK;
+       return vi->ctrl->status == VIRTIO_NET_OK;
 }
 
 static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -1520,8 +1541,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
        if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
                return 0;
 
-       vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
-       sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
+       vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+       sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
                                  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -1579,22 +1600,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
        if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
                return;
 
-       vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
-       vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+       vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
+       vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
 
-       sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
+       sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
                                  VIRTIO_NET_CTRL_RX_PROMISC, sg))
                dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
-                        vi->ctrl_promisc ? "en" : "dis");
+                        vi->ctrl->promisc ? "en" : "dis");
 
-       sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
+       sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
                                  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
                dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
-                        vi->ctrl_allmulti ? "en" : "dis");
+                        vi->ctrl->allmulti ? "en" : "dis");
 
        uc_count = netdev_uc_count(dev);
        mc_count = netdev_mc_count(dev);
@@ -1640,8 +1661,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg;
 
-       vi->ctrl_vid = vid;
-       sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
+       vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
+       sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
                                  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -1655,8 +1676,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg;
 
-       vi->ctrl_vid = vid;
-       sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
+       vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
+       sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
                                  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -1954,9 +1975,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
 {
        struct scatterlist sg;
-       vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads);
+       vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
 
-       sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads));
+       sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
                                  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
@@ -2040,8 +2061,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
        }
 
        /* Make sure NAPI is not using any XDP TX queues for RX. */
-       for (i = 0; i < vi->max_queue_pairs; i++)
-               napi_disable(&vi->rq[i].napi);
+       if (netif_running(dev))
+               for (i = 0; i < vi->max_queue_pairs; i++)
+                       napi_disable(&vi->rq[i].napi);
 
        netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
        err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
@@ -2060,7 +2082,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                }
                if (old_prog)
                        bpf_prog_put(old_prog);
-               virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+               if (netif_running(dev))
+                       virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
        }
 
        return 0;
@@ -2176,6 +2199,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
 
        kfree(vi->rq);
        kfree(vi->sq);
+       kfree(vi->ctrl);
 }
 
 static void _free_receive_bufs(struct virtnet_info *vi)
@@ -2368,6 +2392,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
 {
        int i;
 
+       vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
+       if (!vi->ctrl)
+               goto err_ctrl;
        vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
        if (!vi->sq)
                goto err_sq;
@@ -2393,6 +2420,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
 err_rq:
        kfree(vi->sq);
 err_sq:
+       kfree(vi->ctrl);
+err_ctrl:
        return -ENOMEM;
 }
 
@@ -2687,8 +2716,8 @@ static int virtnet_probe(struct virtio_device *vdev)
 
        /* Assume link up if device can't report link status,
           otherwise get link status from config. */
+       netif_carrier_off(dev);
        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
-               netif_carrier_off(dev);
                schedule_work(&vi->config_work);
        } else {
                vi->status = VIRTIO_NET_S_LINK_UP;