]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
vhost: log dirty page correctly
authorJason Wang <jasowang@redhat.com>
Wed, 16 Jan 2019 08:54:42 +0000 (16:54 +0800)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Wed, 14 Aug 2019 09:18:49 +0000 (11:18 +0200)
BugLink: https://bugs.launchpad.net/bugs/1837664
[ Upstream commit cc5e710759470bc7f3c61d11fd54586f15fdbdf4 ]

Vhost dirty page logging API is designed to sync through GPA. But we
try to log GIOVA when device IOTLB is enabled. This is wrong and may
lead to missing data after migration.

To solve this issue, when logging with device IOTLB enabled, we will:

1) reuse the device IOTLB translation result of GIOVA->HVA mapping to
   get HVA, for writable descriptor, get HVA through iovec. For used
   ring update, translate its GIOVA to HVA
2) traverse the GPA->HVA mapping to get the possible GPA and log
   through GPA. Pay attention this reverse mapping is not guaranteed
   to be unique, so we should log each possible GPA in this case.

This fix the failure of scp to guest during migration. In -next, we
will probably support passing GIOVA->GPA instead of GIOVA->HVA.

Fixes: 6b1e6cc7855b ("vhost: new device IOTLB API")
Reported-by: Jintack Lim <jintack@cs.columbia.edu>
Cc: Jintack Lim <jintack@cs.columbia.edu>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Connor Kuehl <connor.kuehl@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h

index abf2f1a2f1253a1004db73f30b49241941077717..4a7a0d77dfba6b2ae7fc05dca373935fce250cac 100644 (file)
@@ -856,7 +856,8 @@ static void handle_rx(struct vhost_net *net)
                vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
                                            headcount);
                if (unlikely(vq_log))
-                       vhost_log_write(vq, vq_log, log, vhost_len);
+                       vhost_log_write(vq, vq_log, log, vhost_len,
+                                       vq->iov, in);
                total_len += vhost_len;
                if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
                        vhost_poll_queue(&vq->poll);
index c0c2f94d32a409ac6572f83f6930fd364e99d6a8..d83ead14b16a1c87993d7ba572e179ac63a17481 100644 (file)
@@ -1730,13 +1730,87 @@ static int log_write(void __user *log_base,
        return r;
 }
 
+static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
+{
+       struct vhost_umem *umem = vq->umem;
+       struct vhost_umem_node *u;
+       u64 start, end, l, min;
+       int r;
+       bool hit = false;
+
+       while (len) {
+               min = len;
+               /* More than one GPAs can be mapped into a single HVA. So
+                * iterate all possible umems here to be safe.
+                */
+               list_for_each_entry(u, &umem->umem_list, link) {
+                       if (u->userspace_addr > hva - 1 + len ||
+                           u->userspace_addr - 1 + u->size < hva)
+                               continue;
+                       start = max(u->userspace_addr, hva);
+                       end = min(u->userspace_addr - 1 + u->size,
+                                 hva - 1 + len);
+                       l = end - start + 1;
+                       r = log_write(vq->log_base,
+                                     u->start + start - u->userspace_addr,
+                                     l);
+                       if (r < 0)
+                               return r;
+                       hit = true;
+                       min = min(l, min);
+               }
+
+               if (!hit)
+                       return -EFAULT;
+
+               len -= min;
+               hva += min;
+       }
+
+       return 0;
+}
+
+static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
+{
+       struct iovec iov[64];
+       int i, ret;
+
+       if (!vq->iotlb)
+               return log_write(vq->log_base, vq->log_addr + used_offset, len);
+
+       ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
+                            len, iov, 64, VHOST_ACCESS_WO);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < ret; i++) {
+               ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+                                   iov[i].iov_len);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
-                   unsigned int log_num, u64 len)
+                   unsigned int log_num, u64 len, struct iovec *iov, int count)
 {
        int i, r;
 
        /* Make sure data written is seen before log. */
        smp_wmb();
+
+       if (vq->iotlb) {
+               for (i = 0; i < count; i++) {
+                       r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+                                         iov[i].iov_len);
+                       if (r < 0)
+                               return r;
+               }
+               return 0;
+       }
+
        for (i = 0; i < log_num; ++i) {
                u64 l = min(log[i].len, len);
                r = log_write(vq->log_base, log[i].addr, l);
@@ -1766,9 +1840,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
                smp_wmb();
                /* Log used flag write. */
                used = &vq->used->flags;
-               log_write(vq->log_base, vq->log_addr +
-                         (used - (void __user *)vq->used),
-                         sizeof vq->used->flags);
+               log_used(vq, (used - (void __user *)vq->used),
+                        sizeof vq->used->flags);
                if (vq->log_ctx)
                        eventfd_signal(vq->log_ctx, 1);
        }
@@ -1786,9 +1859,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
                smp_wmb();
                /* Log avail event write */
                used = vhost_avail_event(vq);
-               log_write(vq->log_base, vq->log_addr +
-                         (used - (void __user *)vq->used),
-                         sizeof *vhost_avail_event(vq));
+               log_used(vq, (used - (void __user *)vq->used),
+                        sizeof *vhost_avail_event(vq));
                if (vq->log_ctx)
                        eventfd_signal(vq->log_ctx, 1);
        }
@@ -2193,10 +2265,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
                /* Make sure data is seen before log. */
                smp_wmb();
                /* Log used ring entry write. */
-               log_write(vq->log_base,
-                         vq->log_addr +
-                          ((void __user *)used - (void __user *)vq->used),
-                         count * sizeof *used);
+               log_used(vq, ((void __user *)used - (void __user *)vq->used),
+                        count * sizeof *used);
        }
        old = vq->last_used_idx;
        new = (vq->last_used_idx += count);
@@ -2238,9 +2308,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
                /* Make sure used idx is seen before log. */
                smp_wmb();
                /* Log used index update. */
-               log_write(vq->log_base,
-                         vq->log_addr + offsetof(struct vring_used, idx),
-                         sizeof vq->used->idx);
+               log_used(vq, offsetof(struct vring_used, idx),
+                        sizeof vq->used->idx);
                if (vq->log_ctx)
                        eventfd_signal(vq->log_ctx, 1);
        }
index 79c6e7a60a5ec8e7122b1d3282415c0b506c4f96..75d21d4a83541c534998d1d1aa8b7650eca4415f 100644 (file)
@@ -208,7 +208,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
 
 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
-                   unsigned int log_num, u64 len);
+                   unsigned int log_num, u64 len,
+                   struct iovec *iov, int count);
 int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
 
 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);