]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
vhost_net: tx batching
authorJason Wang <jasowang@redhat.com>
Wed, 18 Jan 2017 07:02:02 +0000 (15:02 +0800)
committerDavid S. Miller <davem@davemloft.net>
Wed, 18 Jan 2017 21:35:30 +0000 (16:35 -0500)
This patch tries to utilize tuntap rx batching by peeking the tx
virtqueue during transmission, if there's more available buffers in
the virtqueue, set MSG_MORE flag for a hint for backend (e.g tuntap)
to batch the packets.

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/vhost/net.c

index 5dc34653274ae3655cf2ba97ab9c8dce9e8a2632..c42e9c30513417815ccc72a47ed298b11c0d5211 100644 (file)
@@ -351,6 +351,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
        return r;
 }
 
+static bool vhost_exceeds_maxpend(struct vhost_net *net)
+{
+       struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+       struct vhost_virtqueue *vq = &nvq->vq;
+
+       return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
+               == nvq->done_idx;
+}
+
 /* Expects to be always run from workqueue - which acts as
  * read-size critical section for our kind of RCU. */
 static void handle_tx(struct vhost_net *net)
@@ -394,8 +403,7 @@ static void handle_tx(struct vhost_net *net)
                /* If more outstanding DMAs, queue the work.
                 * Handle upend_idx wrap around
                 */
-               if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
-                             % UIO_MAXIOV == nvq->done_idx))
+               if (unlikely(vhost_exceeds_maxpend(net)))
                        break;
 
                head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
@@ -454,6 +462,16 @@ static void handle_tx(struct vhost_net *net)
                        msg.msg_control = NULL;
                        ubufs = NULL;
                }
+
+               total_len += len;
+               if (total_len < VHOST_NET_WEIGHT &&
+                   !vhost_vq_avail_empty(&net->dev, vq) &&
+                   likely(!vhost_exceeds_maxpend(net))) {
+                       msg.msg_flags |= MSG_MORE;
+               } else {
+                       msg.msg_flags &= ~MSG_MORE;
+               }
+
                /* TODO: Check specific error and bomb out unless ENOBUFS? */
                err = sock->ops->sendmsg(sock, &msg, len);
                if (unlikely(err < 0)) {
@@ -472,7 +490,6 @@ static void handle_tx(struct vhost_net *net)
                        vhost_add_used_and_signal(&net->dev, vq, head, 0);
                else
                        vhost_zerocopy_signal_used(net, vq);
-               total_len += len;
                vhost_net_tx_packet(net);
                if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
                        vhost_poll_queue(&vq->poll);