]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/vhost.c
cpus: join thread when removing a vCPU
[mirror_qemu.git] / hw / virtio / vhost.c
CommitLineData
d5970055
MT
1/*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
6b620ca3
PB
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
d5970055
MT
14 */
15
9b8bfe21 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
0d09e41a 18#include "hw/virtio/vhost.h"
d5970055 19#include "hw/hw.h"
5444e768 20#include "qemu/atomic.h"
1de7afc9 21#include "qemu/range.h"
04b7a152 22#include "qemu/error-report.h"
15324404 23#include "qemu/memfd.h"
11078ae3 24#include <linux/vhost.h>
022c62cb 25#include "exec/address-spaces.h"
1c819449 26#include "hw/virtio/virtio-bus.h"
04b7a152 27#include "hw/virtio/virtio-access.h"
795c40b8 28#include "migration/blocker.h"
c471ad0e 29#include "sysemu/dma.h"
0750b060 30#include "trace.h"
d5970055 31
162bba7f
MAL
32/* enabled until disconnected backend stabilizes */
33#define _VHOST_DEBUG 1
34
35#ifdef _VHOST_DEBUG
36#define VHOST_OPS_DEBUG(fmt, ...) \
37 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38 strerror(errno), errno); } while (0)
39#else
40#define VHOST_OPS_DEBUG(fmt, ...) \
41 do { } while (0)
42#endif
43
309750fa 44static struct vhost_log *vhost_log;
15324404 45static struct vhost_log *vhost_log_shm;
309750fa 46
2ce68e4c
IM
47static unsigned int used_memslots;
48static QLIST_HEAD(, vhost_dev) vhost_devices =
49 QLIST_HEAD_INITIALIZER(vhost_devices);
50
51bool vhost_has_free_slot(void)
52{
53 unsigned int slots_limit = ~0U;
54 struct vhost_dev *hdev;
55
56 QLIST_FOREACH(hdev, &vhost_devices, entry) {
57 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
58 slots_limit = MIN(slots_limit, r);
59 }
60 return slots_limit > used_memslots;
61}
62
d5970055 63static void vhost_dev_sync_region(struct vhost_dev *dev,
2817b260 64 MemoryRegionSection *section,
d5970055
MT
65 uint64_t mfirst, uint64_t mlast,
66 uint64_t rfirst, uint64_t rlast)
67{
309750fa
JW
68 vhost_log_chunk_t *log = dev->log->log;
69
d5970055
MT
70 uint64_t start = MAX(mfirst, rfirst);
71 uint64_t end = MIN(mlast, rlast);
309750fa
JW
72 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
73 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
33c5793b 74 uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
d5970055 75
d5970055
MT
76 if (end < start) {
77 return;
78 }
e314672a 79 assert(end / VHOST_LOG_CHUNK < dev->log_size);
fbbaf9ae 80 assert(start / VHOST_LOG_CHUNK < dev->log_size);
e314672a 81
d5970055
MT
82 for (;from < to; ++from) {
83 vhost_log_chunk_t log;
d5970055
MT
84 /* We first check with non-atomic: much cheaper,
85 * and we expect non-dirty to be the common case. */
86 if (!*from) {
0c600ce2 87 addr += VHOST_LOG_CHUNK;
d5970055
MT
88 continue;
89 }
5444e768
PB
90 /* Data must be read atomically. We don't really need barrier semantics
91 * but it's easier to use atomic_* than roll our own. */
92 log = atomic_xchg(from, 0);
747eb78b
NC
93 while (log) {
94 int bit = ctzl(log);
6b37a23d
MT
95 hwaddr page_addr;
96 hwaddr section_offset;
97 hwaddr mr_offset;
6b37a23d
MT
98 page_addr = addr + bit * VHOST_LOG_PAGE;
99 section_offset = page_addr - section->offset_within_address_space;
100 mr_offset = section_offset + section->offset_within_region;
101 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
d5970055
MT
102 log &= ~(0x1ull << bit);
103 }
104 addr += VHOST_LOG_CHUNK;
105 }
106}
107
04097f7c 108static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
2817b260 109 MemoryRegionSection *section,
6b37a23d
MT
110 hwaddr first,
111 hwaddr last)
d5970055 112{
d5970055 113 int i;
6b37a23d
MT
114 hwaddr start_addr;
115 hwaddr end_addr;
04097f7c 116
d5970055
MT
117 if (!dev->log_enabled || !dev->started) {
118 return 0;
119 }
6b37a23d 120 start_addr = section->offset_within_address_space;
052e87b0 121 end_addr = range_get_last(start_addr, int128_get64(section->size));
6b37a23d
MT
122 start_addr = MAX(first, start_addr);
123 end_addr = MIN(last, end_addr);
124
d5970055
MT
125 for (i = 0; i < dev->mem->nregions; ++i) {
126 struct vhost_memory_region *reg = dev->mem->regions + i;
2817b260 127 vhost_dev_sync_region(dev, section, start_addr, end_addr,
d5970055
MT
128 reg->guest_phys_addr,
129 range_get_last(reg->guest_phys_addr,
130 reg->memory_size));
131 }
132 for (i = 0; i < dev->nvqs; ++i) {
133 struct vhost_virtqueue *vq = dev->vqs + i;
2817b260 134 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
d5970055
MT
135 range_get_last(vq->used_phys, vq->used_size));
136 }
137 return 0;
138}
139
04097f7c
AK
140static void vhost_log_sync(MemoryListener *listener,
141 MemoryRegionSection *section)
142{
143 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
144 memory_listener);
6b37a23d
MT
145 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
146}
04097f7c 147
6b37a23d
MT
148static void vhost_log_sync_range(struct vhost_dev *dev,
149 hwaddr first, hwaddr last)
150{
151 int i;
152 /* FIXME: this is N^2 in number of sections */
153 for (i = 0; i < dev->n_mem_sections; ++i) {
154 MemoryRegionSection *section = &dev->mem_sections[i];
155 vhost_sync_dirty_bitmap(dev, section, first, last);
156 }
04097f7c
AK
157}
158
d5970055
MT
159/* Assign/unassign. Keep an unsorted array of non-overlapping
160 * memory regions in dev->mem. */
161static void vhost_dev_unassign_memory(struct vhost_dev *dev,
162 uint64_t start_addr,
163 uint64_t size)
164{
165 int from, to, n = dev->mem->nregions;
166 /* Track overlapping/split regions for sanity checking. */
167 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
168
169 for (from = 0, to = 0; from < n; ++from, ++to) {
170 struct vhost_memory_region *reg = dev->mem->regions + to;
171 uint64_t reglast;
172 uint64_t memlast;
173 uint64_t change;
174
175 /* clone old region */
176 if (to != from) {
177 memcpy(reg, dev->mem->regions + from, sizeof *reg);
178 }
179
180 /* No overlap is simple */
181 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
182 start_addr, size)) {
183 continue;
184 }
185
186 /* Split only happens if supplied region
187 * is in the middle of an existing one. Thus it can not
188 * overlap with any other existing region. */
189 assert(!split);
190
191 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
192 memlast = range_get_last(start_addr, size);
193
194 /* Remove whole region */
195 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
196 --dev->mem->nregions;
197 --to;
d5970055
MT
198 ++overlap_middle;
199 continue;
200 }
201
202 /* Shrink region */
203 if (memlast >= reglast) {
204 reg->memory_size = start_addr - reg->guest_phys_addr;
205 assert(reg->memory_size);
206 assert(!overlap_end);
207 ++overlap_end;
208 continue;
209 }
210
211 /* Shift region */
212 if (start_addr <= reg->guest_phys_addr) {
213 change = memlast + 1 - reg->guest_phys_addr;
214 reg->memory_size -= change;
215 reg->guest_phys_addr += change;
216 reg->userspace_addr += change;
217 assert(reg->memory_size);
218 assert(!overlap_start);
219 ++overlap_start;
220 continue;
221 }
222
223 /* This only happens if supplied region
224 * is in the middle of an existing one. Thus it can not
225 * overlap with any other existing region. */
226 assert(!overlap_start);
227 assert(!overlap_end);
228 assert(!overlap_middle);
229 /* Split region: shrink first part, shift second part. */
230 memcpy(dev->mem->regions + n, reg, sizeof *reg);
231 reg->memory_size = start_addr - reg->guest_phys_addr;
232 assert(reg->memory_size);
233 change = memlast + 1 - reg->guest_phys_addr;
234 reg = dev->mem->regions + n;
235 reg->memory_size -= change;
236 assert(reg->memory_size);
237 reg->guest_phys_addr += change;
238 reg->userspace_addr += change;
239 /* Never add more than 1 region */
240 assert(dev->mem->nregions == n);
241 ++dev->mem->nregions;
242 ++split;
243 }
244}
245
246/* Called after unassign, so no regions overlap the given range. */
247static void vhost_dev_assign_memory(struct vhost_dev *dev,
248 uint64_t start_addr,
249 uint64_t size,
250 uint64_t uaddr)
251{
252 int from, to;
253 struct vhost_memory_region *merged = NULL;
254 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
255 struct vhost_memory_region *reg = dev->mem->regions + to;
256 uint64_t prlast, urlast;
257 uint64_t pmlast, umlast;
258 uint64_t s, e, u;
259
260 /* clone old region */
261 if (to != from) {
262 memcpy(reg, dev->mem->regions + from, sizeof *reg);
263 }
264 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
265 pmlast = range_get_last(start_addr, size);
266 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
267 umlast = range_get_last(uaddr, size);
268
269 /* check for overlapping regions: should never happen. */
270 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
271 /* Not an adjacent or overlapping region - do not merge. */
272 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
273 (pmlast + 1 != reg->guest_phys_addr ||
274 umlast + 1 != reg->userspace_addr)) {
275 continue;
276 }
277
ffe42cc1
MT
278 if (dev->vhost_ops->vhost_backend_can_merge &&
279 !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
280 reg->userspace_addr,
281 reg->memory_size)) {
282 continue;
283 }
284
d5970055
MT
285 if (merged) {
286 --to;
287 assert(to >= 0);
288 } else {
289 merged = reg;
290 }
291 u = MIN(uaddr, reg->userspace_addr);
292 s = MIN(start_addr, reg->guest_phys_addr);
293 e = MAX(pmlast, prlast);
294 uaddr = merged->userspace_addr = u;
295 start_addr = merged->guest_phys_addr = s;
296 size = merged->memory_size = e - s + 1;
297 assert(merged->memory_size);
298 }
299
300 if (!merged) {
301 struct vhost_memory_region *reg = dev->mem->regions + to;
302 memset(reg, 0, sizeof *reg);
303 reg->memory_size = size;
304 assert(reg->memory_size);
305 reg->guest_phys_addr = start_addr;
306 reg->userspace_addr = uaddr;
307 ++to;
308 }
309 assert(to <= dev->mem->nregions + 1);
310 dev->mem->nregions = to;
311}
312
313static uint64_t vhost_get_log_size(struct vhost_dev *dev)
314{
315 uint64_t log_size = 0;
316 int i;
317 for (i = 0; i < dev->mem->nregions; ++i) {
318 struct vhost_memory_region *reg = dev->mem->regions + i;
319 uint64_t last = range_get_last(reg->guest_phys_addr,
320 reg->memory_size);
321 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
322 }
323 for (i = 0; i < dev->nvqs; ++i) {
324 struct vhost_virtqueue *vq = dev->vqs + i;
325 uint64_t last = vq->used_phys + vq->used_size - 1;
326 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
327 }
328 return log_size;
329}
15324404
MAL
330
331static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
309750fa 332{
15324404
MAL
333 struct vhost_log *log;
334 uint64_t logsize = size * sizeof(*(log->log));
335 int fd = -1;
336
337 log = g_new0(struct vhost_log, 1);
338 if (share) {
339 log->log = qemu_memfd_alloc("vhost-log", logsize,
340 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
341 &fd);
342 memset(log->log, 0, logsize);
343 } else {
344 log->log = g_malloc0(logsize);
345 }
309750fa
JW
346
347 log->size = size;
348 log->refcnt = 1;
15324404 349 log->fd = fd;
309750fa
JW
350
351 return log;
352}
353
15324404 354static struct vhost_log *vhost_log_get(uint64_t size, bool share)
309750fa 355{
15324404
MAL
356 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
357
358 if (!log || log->size != size) {
359 log = vhost_log_alloc(size, share);
360 if (share) {
361 vhost_log_shm = log;
362 } else {
363 vhost_log = log;
364 }
309750fa 365 } else {
15324404 366 ++log->refcnt;
309750fa
JW
367 }
368
15324404 369 return log;
309750fa
JW
370}
371
372static void vhost_log_put(struct vhost_dev *dev, bool sync)
373{
374 struct vhost_log *log = dev->log;
375
376 if (!log) {
377 return;
378 }
379
380 --log->refcnt;
381 if (log->refcnt == 0) {
382 /* Sync only the range covered by the old log */
383 if (dev->log_size && sync) {
384 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
385 }
15324404 386
309750fa 387 if (vhost_log == log) {
15324404 388 g_free(log->log);
309750fa 389 vhost_log = NULL;
15324404
MAL
390 } else if (vhost_log_shm == log) {
391 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
392 log->fd);
393 vhost_log_shm = NULL;
309750fa 394 }
15324404 395
309750fa
JW
396 g_free(log);
397 }
5c0ba1be
FF
398
399 dev->log = NULL;
400 dev->log_size = 0;
309750fa 401}
d5970055 402
15324404
MAL
403static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
404{
405 return dev->vhost_ops->vhost_requires_shm_log &&
406 dev->vhost_ops->vhost_requires_shm_log(dev);
407}
408
409static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
d5970055 410{
15324404 411 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
309750fa 412 uint64_t log_base = (uintptr_t)log->log;
6b37a23d 413 int r;
6528499f 414
636f4ddd
MAL
415 /* inform backend of log switching, this must be done before
416 releasing the current log, to ensure no logging is lost */
9a78a5dd 417 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
162bba7f
MAL
418 if (r < 0) {
419 VHOST_OPS_DEBUG("vhost_set_log_base failed");
420 }
421
309750fa 422 vhost_log_put(dev, true);
d5970055
MT
423 dev->log = log;
424 dev->log_size = size;
425}
426
c471ad0e
JW
427static int vhost_dev_has_iommu(struct vhost_dev *dev)
428{
429 VirtIODevice *vdev = dev->vdev;
c471ad0e 430
375f74f4 431 return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
c471ad0e
JW
432}
433
434static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
435 hwaddr *plen, int is_write)
436{
437 if (!vhost_dev_has_iommu(dev)) {
438 return cpu_physical_memory_map(addr, plen, is_write);
439 } else {
440 return (void *)(uintptr_t)addr;
441 }
442}
443
444static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
445 hwaddr len, int is_write,
446 hwaddr access_len)
447{
448 if (!vhost_dev_has_iommu(dev)) {
449 cpu_physical_memory_unmap(buffer, len, is_write, access_len);
450 }
451}
f1f9e6c5 452
c471ad0e
JW
453static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
454 void *part,
f1f9e6c5
GK
455 uint64_t part_addr,
456 uint64_t part_size,
457 uint64_t start_addr,
458 uint64_t size)
459{
460 hwaddr l;
461 void *p;
462 int r = 0;
463
464 if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
465 return 0;
466 }
467 l = part_size;
c471ad0e 468 p = vhost_memory_map(dev, part_addr, &l, 1);
f1f9e6c5
GK
469 if (!p || l != part_size) {
470 r = -ENOMEM;
471 }
472 if (p != part) {
473 r = -EBUSY;
474 }
c471ad0e 475 vhost_memory_unmap(dev, p, l, 0, 0);
f1f9e6c5
GK
476 return r;
477}
478
d5970055
MT
479static int vhost_verify_ring_mappings(struct vhost_dev *dev,
480 uint64_t start_addr,
481 uint64_t size)
482{
f1f9e6c5 483 int i, j;
8617343f 484 int r = 0;
f1f9e6c5
GK
485 const char *part_name[] = {
486 "descriptor table",
487 "available ring",
488 "used ring"
489 };
8617343f 490
f1f9e6c5 491 for (i = 0; i < dev->nvqs; ++i) {
d5970055 492 struct vhost_virtqueue *vq = dev->vqs + i;
d5970055 493
f1f9e6c5 494 j = 0;
c471ad0e 495 r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
f1f9e6c5 496 vq->desc_size, start_addr, size);
2fe45ec3 497 if (r) {
f1f9e6c5 498 break;
d5970055 499 }
f1f9e6c5
GK
500
501 j++;
c471ad0e 502 r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
f1f9e6c5 503 vq->avail_size, start_addr, size);
2fe45ec3 504 if (r) {
f1f9e6c5 505 break;
d5970055 506 }
f1f9e6c5
GK
507
508 j++;
c471ad0e 509 r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
f1f9e6c5 510 vq->used_size, start_addr, size);
2fe45ec3 511 if (r) {
f1f9e6c5 512 break;
d5970055 513 }
f1f9e6c5
GK
514 }
515
516 if (r == -ENOMEM) {
517 error_report("Unable to map %s for ring %d", part_name[j], i);
518 } else if (r == -EBUSY) {
519 error_report("%s relocated for ring %d", part_name[j], i);
d5970055 520 }
8617343f 521 return r;
d5970055
MT
522}
523
4e789564
MT
524static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
525 uint64_t start_addr,
526 uint64_t size)
527{
528 int i, n = dev->mem->nregions;
529 for (i = 0; i < n; ++i) {
530 struct vhost_memory_region *reg = dev->mem->regions + i;
531 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
532 start_addr, size)) {
533 return reg;
534 }
535 }
536 return NULL;
537}
538
539static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
540 uint64_t start_addr,
541 uint64_t size,
542 uint64_t uaddr)
543{
544 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
545 uint64_t reglast;
546 uint64_t memlast;
547
548 if (!reg) {
549 return true;
550 }
551
552 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
553 memlast = range_get_last(start_addr, size);
554
555 /* Need to extend region? */
556 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
557 return true;
558 }
559 /* userspace_addr changed? */
560 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
561}
562
04097f7c
AK
563static void vhost_set_memory(MemoryListener *listener,
564 MemoryRegionSection *section,
565 bool add)
d5970055 566{
04097f7c
AK
567 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
568 memory_listener);
a8170e5e 569 hwaddr start_addr = section->offset_within_address_space;
052e87b0 570 ram_addr_t size = int128_get64(section->size);
2d1a35be
PB
571 bool log_dirty =
572 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
d5970055
MT
573 int s = offsetof(struct vhost_memory, regions) +
574 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
04097f7c
AK
575 void *ram;
576
7267c094 577 dev->mem = g_realloc(dev->mem, s);
d5970055 578
f5a4e64f 579 if (log_dirty) {
04097f7c 580 add = false;
f5a4e64f
MT
581 }
582
d5970055
MT
583 assert(size);
584
4e789564 585 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
d743c382 586 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
04097f7c
AK
587 if (add) {
588 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
4e789564
MT
589 /* Region exists with same address. Nothing to do. */
590 return;
591 }
592 } else {
593 if (!vhost_dev_find_reg(dev, start_addr, size)) {
594 /* Removing region that we don't access. Nothing to do. */
595 return;
596 }
597 }
598
d5970055 599 vhost_dev_unassign_memory(dev, start_addr, size);
04097f7c 600 if (add) {
d5970055 601 /* Add given mapping, merging adjacent regions if any */
04097f7c 602 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
d5970055
MT
603 } else {
604 /* Remove old mapping for this memory, if any. */
605 vhost_dev_unassign_memory(dev, start_addr, size);
606 }
af603142
NB
607 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
608 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
609 dev->memory_changed = true;
2ce68e4c 610 used_memslots = dev->mem->nregions;
af603142
NB
611}
612
613static bool vhost_section(MemoryRegionSection *section)
614{
d56ec1e9
MT
615 return memory_region_is_ram(section->mr) &&
616 !memory_region_is_rom(section->mr);
af603142
NB
617}
618
619static void vhost_begin(MemoryListener *listener)
620{
621 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
622 memory_listener);
623 dev->mem_changed_end_addr = 0;
624 dev->mem_changed_start_addr = -1;
625}
d5970055 626
af603142
NB
627static void vhost_commit(MemoryListener *listener)
628{
629 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
630 memory_listener);
631 hwaddr start_addr = 0;
632 ram_addr_t size = 0;
633 uint64_t log_size;
634 int r;
635
636 if (!dev->memory_changed) {
637 return;
638 }
d5970055
MT
639 if (!dev->started) {
640 return;
641 }
af603142
NB
642 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
643 return;
644 }
d5970055
MT
645
646 if (dev->started) {
af603142
NB
647 start_addr = dev->mem_changed_start_addr;
648 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
649
d5970055
MT
650 r = vhost_verify_ring_mappings(dev, start_addr, size);
651 assert(r >= 0);
652 }
653
654 if (!dev->log_enabled) {
21e70425 655 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
162bba7f
MAL
656 if (r < 0) {
657 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
658 }
af603142 659 dev->memory_changed = false;
d5970055
MT
660 return;
661 }
662 log_size = vhost_get_log_size(dev);
663 /* We allocate an extra 4K bytes to log,
664 * to reduce the * number of reallocations. */
665#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
666 /* To log more, must increase log size before table update. */
667 if (dev->log_size < log_size) {
668 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
669 }
21e70425 670 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
162bba7f
MAL
671 if (r < 0) {
672 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
673 }
d5970055
MT
674 /* To log less, can only decrease log size after table update. */
675 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
676 vhost_dev_log_resize(dev, log_size);
677 }
af603142 678 dev->memory_changed = false;
50c1e149
AK
679}
680
04097f7c
AK
681static void vhost_region_add(MemoryListener *listener,
682 MemoryRegionSection *section)
683{
2817b260
AK
684 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
685 memory_listener);
686
c49450b9
AK
687 if (!vhost_section(section)) {
688 return;
689 }
690
0750b060 691 trace_vhost_region_add(dev, section->mr->name ?: NULL);
2817b260
AK
692 ++dev->n_mem_sections;
693 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
694 dev->n_mem_sections);
695 dev->mem_sections[dev->n_mem_sections - 1] = *section;
dfde4e6e 696 memory_region_ref(section->mr);
04097f7c
AK
697 vhost_set_memory(listener, section, true);
698}
699
700static void vhost_region_del(MemoryListener *listener,
701 MemoryRegionSection *section)
702{
2817b260
AK
703 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
704 memory_listener);
705 int i;
706
c49450b9
AK
707 if (!vhost_section(section)) {
708 return;
709 }
710
0750b060 711 trace_vhost_region_del(dev, section->mr->name ?: NULL);
04097f7c 712 vhost_set_memory(listener, section, false);
dfde4e6e 713 memory_region_unref(section->mr);
2817b260
AK
714 for (i = 0; i < dev->n_mem_sections; ++i) {
715 if (dev->mem_sections[i].offset_within_address_space
716 == section->offset_within_address_space) {
717 --dev->n_mem_sections;
718 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
637f7a6a 719 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
2817b260
AK
720 break;
721 }
722 }
04097f7c
AK
723}
724
375f74f4
JW
725static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
726{
727 struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
728 struct vhost_dev *hdev = iommu->hdev;
729 hwaddr iova = iotlb->iova + iommu->iommu_offset;
730
020e571b
MC
731 if (vhost_backend_invalidate_device_iotlb(hdev, iova,
732 iotlb->addr_mask + 1)) {
375f74f4
JW
733 error_report("Fail to invalidate device iotlb");
734 }
735}
736
737static void vhost_iommu_region_add(MemoryListener *listener,
738 MemoryRegionSection *section)
739{
740 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
741 iommu_listener);
742 struct vhost_iommu *iommu;
698feb5e 743 Int128 end;
375f74f4
JW
744
745 if (!memory_region_is_iommu(section->mr)) {
746 return;
747 }
748
0750b060
PX
749 trace_vhost_iommu_region_add(dev, section->mr->name ?: NULL);
750
375f74f4 751 iommu = g_malloc0(sizeof(*iommu));
698feb5e
PX
752 end = int128_add(int128_make64(section->offset_within_region),
753 section->size);
754 end = int128_sub(end, int128_one());
755 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
756 IOMMU_NOTIFIER_UNMAP,
757 section->offset_within_region,
758 int128_get64(end));
375f74f4
JW
759 iommu->mr = section->mr;
760 iommu->iommu_offset = section->offset_within_address_space -
761 section->offset_within_region;
762 iommu->hdev = dev;
763 memory_region_register_iommu_notifier(section->mr, &iommu->n);
764 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
765 /* TODO: can replay help performance here? */
766}
767
768static void vhost_iommu_region_del(MemoryListener *listener,
769 MemoryRegionSection *section)
770{
771 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
772 iommu_listener);
773 struct vhost_iommu *iommu;
774
775 if (!memory_region_is_iommu(section->mr)) {
776 return;
777 }
778
0750b060
PX
779 trace_vhost_iommu_region_del(dev, section->mr->name ?: NULL);
780
375f74f4 781 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
698feb5e
PX
782 if (iommu->mr == section->mr &&
783 iommu->n.start == section->offset_within_region) {
375f74f4
JW
784 memory_region_unregister_iommu_notifier(iommu->mr,
785 &iommu->n);
786 QLIST_REMOVE(iommu, iommu_next);
787 g_free(iommu);
788 break;
789 }
790 }
791}
792
50c1e149
AK
793static void vhost_region_nop(MemoryListener *listener,
794 MemoryRegionSection *section)
795{
796}
797
d5970055
MT
798static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
799 struct vhost_virtqueue *vq,
800 unsigned idx, bool enable_log)
801{
802 struct vhost_vring_addr addr = {
803 .index = idx,
2b3af999
SW
804 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
805 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
806 .used_user_addr = (uint64_t)(unsigned long)vq->used,
d5970055
MT
807 .log_guest_addr = vq->used_phys,
808 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
809 };
21e70425 810 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
d5970055 811 if (r < 0) {
c6409692 812 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
d5970055
MT
813 return -errno;
814 }
815 return 0;
816}
817
c471ad0e
JW
818static int vhost_dev_set_features(struct vhost_dev *dev,
819 bool enable_log)
d5970055
MT
820{
821 uint64_t features = dev->acked_features;
822 int r;
823 if (enable_log) {
9a2ba823 824 features |= 0x1ULL << VHOST_F_LOG_ALL;
d5970055 825 }
21e70425 826 r = dev->vhost_ops->vhost_set_features(dev, features);
c6409692
MAL
827 if (r < 0) {
828 VHOST_OPS_DEBUG("vhost_set_features failed");
829 }
d5970055
MT
830 return r < 0 ? -errno : 0;
831}
832
833static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
834{
162bba7f 835 int r, i, idx;
d5970055
MT
836 r = vhost_dev_set_features(dev, enable_log);
837 if (r < 0) {
838 goto err_features;
839 }
840 for (i = 0; i < dev->nvqs; ++i) {
25a2a920
TC
841 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
842 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
d5970055
MT
843 enable_log);
844 if (r < 0) {
845 goto err_vq;
846 }
847 }
848 return 0;
849err_vq:
850 for (; i >= 0; --i) {
25a2a920 851 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
162bba7f
MAL
852 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
853 dev->log_enabled);
d5970055 854 }
162bba7f 855 vhost_dev_set_features(dev, dev->log_enabled);
d5970055
MT
856err_features:
857 return r;
858}
859
04097f7c 860static int vhost_migration_log(MemoryListener *listener, int enable)
d5970055 861{
04097f7c
AK
862 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
863 memory_listener);
d5970055
MT
864 int r;
865 if (!!enable == dev->log_enabled) {
866 return 0;
867 }
868 if (!dev->started) {
869 dev->log_enabled = enable;
870 return 0;
871 }
872 if (!enable) {
873 r = vhost_dev_set_log(dev, false);
874 if (r < 0) {
875 return r;
876 }
309750fa 877 vhost_log_put(dev, false);
d5970055
MT
878 } else {
879 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
880 r = vhost_dev_set_log(dev, true);
881 if (r < 0) {
882 return r;
883 }
884 }
885 dev->log_enabled = enable;
886 return 0;
887}
888
04097f7c
AK
889static void vhost_log_global_start(MemoryListener *listener)
890{
891 int r;
892
893 r = vhost_migration_log(listener, true);
894 if (r < 0) {
895 abort();
896 }
897}
898
899static void vhost_log_global_stop(MemoryListener *listener)
900{
901 int r;
902
903 r = vhost_migration_log(listener, false);
904 if (r < 0) {
905 abort();
906 }
907}
908
909static void vhost_log_start(MemoryListener *listener,
b2dfd71c
PB
910 MemoryRegionSection *section,
911 int old, int new)
04097f7c
AK
912{
913 /* FIXME: implement */
914}
915
916static void vhost_log_stop(MemoryListener *listener,
b2dfd71c
PB
917 MemoryRegionSection *section,
918 int old, int new)
04097f7c
AK
919{
920 /* FIXME: implement */
921}
922
46f70ff1
GK
923/* The vhost driver natively knows how to handle the vrings of non
924 * cross-endian legacy devices and modern devices. Only legacy devices
925 * exposed to a bi-endian guest may require the vhost driver to use a
926 * specific endianness.
927 */
a122ab24
GK
928static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
929{
e5848123
GK
930 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
931 return false;
932 }
a122ab24 933#ifdef HOST_WORDS_BIGENDIAN
46f70ff1 934 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
a122ab24 935#else
46f70ff1 936 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
a122ab24 937#endif
a122ab24
GK
938}
939
04b7a152
GK
940static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
941 bool is_big_endian,
942 int vhost_vq_index)
943{
944 struct vhost_vring_state s = {
945 .index = vhost_vq_index,
946 .num = is_big_endian
947 };
948
21e70425 949 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
04b7a152
GK
950 return 0;
951 }
952
c6409692 953 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
04b7a152
GK
954 if (errno == ENOTTY) {
955 error_report("vhost does not support cross-endian");
956 return -ENOSYS;
957 }
958
959 return -errno;
960}
961
c471ad0e
JW
962static int vhost_memory_region_lookup(struct vhost_dev *hdev,
963 uint64_t gpa, uint64_t *uaddr,
964 uint64_t *len)
965{
966 int i;
967
968 for (i = 0; i < hdev->mem->nregions; i++) {
969 struct vhost_memory_region *reg = hdev->mem->regions + i;
970
971 if (gpa >= reg->guest_phys_addr &&
972 reg->guest_phys_addr + reg->memory_size > gpa) {
973 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
974 *len = reg->guest_phys_addr + reg->memory_size - gpa;
975 return 0;
976 }
977 }
978
979 return -EFAULT;
980}
981
fc58bd0d 982int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
c471ad0e
JW
983{
984 IOMMUTLBEntry iotlb;
985 uint64_t uaddr, len;
fc58bd0d 986 int ret = -EFAULT;
c471ad0e
JW
987
988 rcu_read_lock();
989
990 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
991 iova, write);
992 if (iotlb.target_as != NULL) {
fc58bd0d
MC
993 ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
994 &uaddr, &len);
995 if (ret) {
c471ad0e
JW
996 error_report("Fail to lookup the translated address "
997 "%"PRIx64, iotlb.translated_addr);
998 goto out;
999 }
1000
1001 len = MIN(iotlb.addr_mask + 1, len);
1002 iova = iova & ~iotlb.addr_mask;
1003
020e571b
MC
1004 ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
1005 len, iotlb.perm);
fc58bd0d 1006 if (ret) {
c471ad0e
JW
1007 error_report("Fail to update device iotlb");
1008 goto out;
1009 }
1010 }
1011out:
1012 rcu_read_unlock();
fc58bd0d
MC
1013
1014 return ret;
c471ad0e
JW
1015}
1016
f56a1247 1017static int vhost_virtqueue_start(struct vhost_dev *dev,
d5970055
MT
1018 struct VirtIODevice *vdev,
1019 struct vhost_virtqueue *vq,
1020 unsigned idx)
1021{
96a3d98d
JW
1022 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1023 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1024 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
a8170e5e 1025 hwaddr s, l, a;
d5970055 1026 int r;
21e70425 1027 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 1028 struct vhost_vring_file file = {
a9f98bb5 1029 .index = vhost_vq_index
d5970055
MT
1030 };
1031 struct vhost_vring_state state = {
a9f98bb5 1032 .index = vhost_vq_index
d5970055
MT
1033 };
1034 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
1035
a9f98bb5 1036
d5970055 1037 vq->num = state.num = virtio_queue_get_num(vdev, idx);
21e70425 1038 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
d5970055 1039 if (r) {
c6409692 1040 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
d5970055
MT
1041 return -errno;
1042 }
1043
1044 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
21e70425 1045 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
d5970055 1046 if (r) {
c6409692 1047 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
d5970055
MT
1048 return -errno;
1049 }
1050
e5848123 1051 if (vhost_needs_vring_endian(vdev)) {
04b7a152
GK
1052 r = vhost_virtqueue_set_vring_endian_legacy(dev,
1053 virtio_is_big_endian(vdev),
1054 vhost_vq_index);
1055 if (r) {
1056 return -errno;
1057 }
1058 }
1059
f1f9e6c5
GK
1060 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1061 vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
c471ad0e 1062 vq->desc = vhost_memory_map(dev, a, &l, 0);
d5970055
MT
1063 if (!vq->desc || l != s) {
1064 r = -ENOMEM;
1065 goto fail_alloc_desc;
1066 }
f1f9e6c5
GK
1067 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1068 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
c471ad0e 1069 vq->avail = vhost_memory_map(dev, a, &l, 0);
d5970055
MT
1070 if (!vq->avail || l != s) {
1071 r = -ENOMEM;
1072 goto fail_alloc_avail;
1073 }
1074 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1075 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
c471ad0e 1076 vq->used = vhost_memory_map(dev, a, &l, 1);
d5970055
MT
1077 if (!vq->used || l != s) {
1078 r = -ENOMEM;
1079 goto fail_alloc_used;
1080 }
1081
a9f98bb5 1082 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
d5970055
MT
1083 if (r < 0) {
1084 r = -errno;
1085 goto fail_alloc;
1086 }
a9f98bb5 1087
d5970055 1088 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
21e70425 1089 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
d5970055 1090 if (r) {
c6409692 1091 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
c8852121 1092 r = -errno;
d5970055
MT
1093 goto fail_kick;
1094 }
1095
f56a1247
MT
1096 /* Clear and discard previous events if any. */
1097 event_notifier_test_and_clear(&vq->masked_notifier);
d5970055 1098
5669655a
VK
1099 /* Init vring in unmasked state, unless guest_notifier_mask
1100 * will do it later.
1101 */
1102 if (!vdev->use_guest_notifier_mask) {
1103 /* TODO: check and handle errors. */
1104 vhost_virtqueue_mask(dev, vdev, idx, false);
1105 }
1106
96a3d98d
JW
1107 if (k->query_guest_notifiers &&
1108 k->query_guest_notifiers(qbus->parent) &&
1109 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1110 file.fd = -1;
1111 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1112 if (r) {
1113 goto fail_vector;
1114 }
1115 }
1116
d5970055
MT
1117 return 0;
1118
96a3d98d 1119fail_vector:
d5970055 1120fail_kick:
d5970055 1121fail_alloc:
c471ad0e
JW
1122 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1123 0, 0);
d5970055 1124fail_alloc_used:
c471ad0e
JW
1125 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1126 0, 0);
d5970055 1127fail_alloc_avail:
c471ad0e
JW
1128 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1129 0, 0);
d5970055
MT
1130fail_alloc_desc:
1131 return r;
1132}
1133
f56a1247 1134static void vhost_virtqueue_stop(struct vhost_dev *dev,
d5970055
MT
1135 struct VirtIODevice *vdev,
1136 struct vhost_virtqueue *vq,
1137 unsigned idx)
1138{
21e70425 1139 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 1140 struct vhost_vring_state state = {
04b7a152 1141 .index = vhost_vq_index,
d5970055
MT
1142 };
1143 int r;
fc57fd99 1144
21e70425 1145 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
d5970055 1146 if (r < 0) {
c6409692 1147 VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
2ae39a11
MC
1148 /* Connection to the backend is broken, so let's sync internal
1149 * last avail idx to the device used idx.
1150 */
1151 virtio_queue_restore_last_avail_idx(vdev, idx);
499c5579
MAL
1152 } else {
1153 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
d5970055 1154 }
3561ba14 1155 virtio_queue_invalidate_signalled_used(vdev, idx);
aa94d521 1156 virtio_queue_update_used_idx(vdev, idx);
04b7a152
GK
1157
1158 /* In the cross-endian case, we need to reset the vring endianness to
1159 * native as legacy devices expect so by default.
1160 */
e5848123 1161 if (vhost_needs_vring_endian(vdev)) {
162bba7f
MAL
1162 vhost_virtqueue_set_vring_endian_legacy(dev,
1163 !virtio_is_big_endian(vdev),
1164 vhost_vq_index);
04b7a152
GK
1165 }
1166
c471ad0e
JW
1167 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1168 1, virtio_queue_get_used_size(vdev, idx));
1169 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1170 0, virtio_queue_get_avail_size(vdev, idx));
1171 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1172 0, virtio_queue_get_desc_size(vdev, idx));
d5970055
MT
1173}
1174
80a1ea37
AK
1175static void vhost_eventfd_add(MemoryListener *listener,
1176 MemoryRegionSection *section,
753d5e14 1177 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
1178{
1179}
1180
1181static void vhost_eventfd_del(MemoryListener *listener,
1182 MemoryRegionSection *section,
753d5e14 1183 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
1184{
1185}
1186
69e87b32
JW
1187static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1188 int n, uint32_t timeout)
1189{
1190 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1191 struct vhost_vring_state state = {
1192 .index = vhost_vq_index,
1193 .num = timeout,
1194 };
1195 int r;
1196
1197 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1198 return -EINVAL;
1199 }
1200
1201 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1202 if (r) {
c6409692 1203 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
69e87b32
JW
1204 return r;
1205 }
1206
1207 return 0;
1208}
1209
f56a1247
MT
1210static int vhost_virtqueue_init(struct vhost_dev *dev,
1211 struct vhost_virtqueue *vq, int n)
1212{
21e70425 1213 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
f56a1247 1214 struct vhost_vring_file file = {
b931bfbf 1215 .index = vhost_vq_index,
f56a1247
MT
1216 };
1217 int r = event_notifier_init(&vq->masked_notifier, 0);
1218 if (r < 0) {
1219 return r;
1220 }
1221
1222 file.fd = event_notifier_get_fd(&vq->masked_notifier);
21e70425 1223 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
f56a1247 1224 if (r) {
c6409692 1225 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
f56a1247
MT
1226 r = -errno;
1227 goto fail_call;
1228 }
c471ad0e
JW
1229
1230 vq->dev = dev;
1231
f56a1247
MT
1232 return 0;
1233fail_call:
1234 event_notifier_cleanup(&vq->masked_notifier);
1235 return r;
1236}
1237
1238static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1239{
1240 event_notifier_cleanup(&vq->masked_notifier);
1241}
1242
81647a65 1243int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
69e87b32 1244 VhostBackendType backend_type, uint32_t busyloop_timeout)
d5970055
MT
1245{
1246 uint64_t features;
a06db3ec 1247 int i, r, n_initialized_vqs = 0;
fe44dc91 1248 Error *local_err = NULL;
81647a65 1249
c471ad0e 1250 hdev->vdev = NULL;
d2fc4402
MAL
1251 hdev->migration_blocker = NULL;
1252
7cb8a9b9
MAL
1253 r = vhost_set_backend_type(hdev, backend_type);
1254 assert(r >= 0);
1a1bfac9 1255
7cb8a9b9
MAL
1256 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1257 if (r < 0) {
1258 goto fail;
24d1eb33
NN
1259 }
1260
aebf8168 1261 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
4afba631
MAL
1262 error_report("vhost backend memory slots limit is less"
1263 " than current number of present memory slots");
7cb8a9b9
MAL
1264 r = -1;
1265 goto fail;
aebf8168 1266 }
2ce68e4c 1267
21e70425 1268 r = hdev->vhost_ops->vhost_set_owner(hdev);
d5970055 1269 if (r < 0) {
c6409692 1270 VHOST_OPS_DEBUG("vhost_set_owner failed");
d5970055
MT
1271 goto fail;
1272 }
1273
21e70425 1274 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
d5970055 1275 if (r < 0) {
c6409692 1276 VHOST_OPS_DEBUG("vhost_get_features failed");
d5970055
MT
1277 goto fail;
1278 }
f56a1247 1279
a06db3ec 1280 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
b931bfbf 1281 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
f56a1247 1282 if (r < 0) {
a06db3ec 1283 goto fail;
f56a1247
MT
1284 }
1285 }
69e87b32
JW
1286
1287 if (busyloop_timeout) {
1288 for (i = 0; i < hdev->nvqs; ++i) {
1289 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1290 busyloop_timeout);
1291 if (r < 0) {
1292 goto fail_busyloop;
1293 }
1294 }
1295 }
1296
d5970055
MT
1297 hdev->features = features;
1298
04097f7c 1299 hdev->memory_listener = (MemoryListener) {
50c1e149
AK
1300 .begin = vhost_begin,
1301 .commit = vhost_commit,
04097f7c
AK
1302 .region_add = vhost_region_add,
1303 .region_del = vhost_region_del,
50c1e149 1304 .region_nop = vhost_region_nop,
04097f7c
AK
1305 .log_start = vhost_log_start,
1306 .log_stop = vhost_log_stop,
1307 .log_sync = vhost_log_sync,
1308 .log_global_start = vhost_log_global_start,
1309 .log_global_stop = vhost_log_global_stop,
80a1ea37
AK
1310 .eventfd_add = vhost_eventfd_add,
1311 .eventfd_del = vhost_eventfd_del,
72e22d2f 1312 .priority = 10
04097f7c 1313 };
d2fc4402 1314
375f74f4
JW
1315 hdev->iommu_listener = (MemoryListener) {
1316 .region_add = vhost_iommu_region_add,
1317 .region_del = vhost_iommu_region_del,
1318 };
c471ad0e 1319
d2fc4402
MAL
1320 if (hdev->migration_blocker == NULL) {
1321 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1322 error_setg(&hdev->migration_blocker,
1323 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
0d34fbab 1324 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
31190ed7
MAL
1325 error_setg(&hdev->migration_blocker,
1326 "Migration disabled: failed to allocate shared memory");
d2fc4402
MAL
1327 }
1328 }
1329
1330 if (hdev->migration_blocker != NULL) {
fe44dc91
AA
1331 r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1332 if (local_err) {
1333 error_report_err(local_err);
1334 error_free(hdev->migration_blocker);
1335 goto fail_busyloop;
1336 }
7145872e 1337 }
d2fc4402 1338
7267c094 1339 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
2817b260
AK
1340 hdev->n_mem_sections = 0;
1341 hdev->mem_sections = NULL;
d5970055
MT
1342 hdev->log = NULL;
1343 hdev->log_size = 0;
1344 hdev->log_enabled = false;
1345 hdev->started = false;
af603142 1346 hdev->memory_changed = false;
f6790af6 1347 memory_listener_register(&hdev->memory_listener, &address_space_memory);
5be5f9be 1348 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
d5970055 1349 return 0;
a06db3ec 1350
69e87b32
JW
1351fail_busyloop:
1352 while (--i >= 0) {
1353 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1354 }
d5970055 1355fail:
a06db3ec
MAL
1356 hdev->nvqs = n_initialized_vqs;
1357 vhost_dev_cleanup(hdev);
d5970055
MT
1358 return r;
1359}
1360
1361void vhost_dev_cleanup(struct vhost_dev *hdev)
1362{
f56a1247 1363 int i;
e0547b59 1364
f56a1247
MT
1365 for (i = 0; i < hdev->nvqs; ++i) {
1366 vhost_virtqueue_cleanup(hdev->vqs + i);
1367 }
5be5f9be
MAL
1368 if (hdev->mem) {
1369 /* those are only safe after successful init */
1370 memory_listener_unregister(&hdev->memory_listener);
1371 QLIST_REMOVE(hdev, entry);
1372 }
7145872e
MT
1373 if (hdev->migration_blocker) {
1374 migrate_del_blocker(hdev->migration_blocker);
1375 error_free(hdev->migration_blocker);
1376 }
7267c094 1377 g_free(hdev->mem);
2817b260 1378 g_free(hdev->mem_sections);
e0547b59
MAL
1379 if (hdev->vhost_ops) {
1380 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1381 }
7b527247 1382 assert(!hdev->log);
e0547b59
MAL
1383
1384 memset(hdev, 0, sizeof(struct vhost_dev));
d5970055
MT
1385}
1386
b0b3db79
MT
1387/* Stop processing guest IO notifications in qemu.
1388 * Start processing them in vhost in kernel.
1389 */
1390int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1391{
1c819449 1392 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
16617e36 1393 int i, r, e;
4afba631 1394
310837de
PB
1395 /* We will pass the notifiers to the kernel, make sure that QEMU
1396 * doesn't interfere.
1397 */
1398 r = virtio_device_grab_ioeventfd(vdev);
1399 if (r < 0) {
4afba631 1400 error_report("binding does not support host notifiers");
b0b3db79
MT
1401 goto fail;
1402 }
1403
1404 for (i = 0; i < hdev->nvqs; ++i) {
b1f0a33d
CH
1405 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1406 true);
b0b3db79 1407 if (r < 0) {
4afba631 1408 error_report("vhost VQ %d notifier binding failed: %d", i, -r);
b0b3db79
MT
1409 goto fail_vq;
1410 }
1411 }
1412
1413 return 0;
1414fail_vq:
1415 while (--i >= 0) {
b1f0a33d
CH
1416 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1417 false);
16617e36 1418 if (e < 0) {
4afba631 1419 error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
b0b3db79 1420 }
16617e36 1421 assert (e >= 0);
b0b3db79 1422 }
310837de 1423 virtio_device_release_ioeventfd(vdev);
b0b3db79
MT
1424fail:
1425 return r;
1426}
1427
1428/* Stop processing guest IO notifications in vhost.
1429 * Start processing them in qemu.
1430 * This might actually run the qemu handlers right away,
1431 * so virtio in qemu must be completely setup when this is called.
1432 */
1433void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1434{
1c819449 1435 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
b0b3db79
MT
1436 int i, r;
1437
1438 for (i = 0; i < hdev->nvqs; ++i) {
b1f0a33d
CH
1439 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1440 false);
b0b3db79 1441 if (r < 0) {
4afba631 1442 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
b0b3db79
MT
1443 }
1444 assert (r >= 0);
1445 }
310837de 1446 virtio_device_release_ioeventfd(vdev);
b0b3db79
MT
1447}
1448
f56a1247
MT
1449/* Test and clear event pending status.
1450 * Should be called after unmask to avoid losing events.
1451 */
1452bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1453{
a9f98bb5 1454 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
a9f98bb5 1455 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
f56a1247
MT
1456 return event_notifier_test_and_clear(&vq->masked_notifier);
1457}
1458
1459/* Mask/unmask events from this vq. */
1460void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1461 bool mask)
1462{
1463 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
a9f98bb5 1464 int r, index = n - hdev->vq_index;
fc57fd99 1465 struct vhost_vring_file file;
f56a1247 1466
8695de0f
MAL
1467 /* should only be called after backend is connected */
1468 assert(hdev->vhost_ops);
1469
f56a1247 1470 if (mask) {
5669655a 1471 assert(vdev->use_guest_notifier_mask);
a9f98bb5 1472 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
f56a1247
MT
1473 } else {
1474 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1475 }
fc57fd99 1476
21e70425
MAL
1477 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1478 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
162bba7f
MAL
1479 if (r < 0) {
1480 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1481 }
f56a1247
MT
1482}
1483
9a2ba823
CH
1484uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1485 uint64_t features)
2e6d46d7
NN
1486{
1487 const int *bit = feature_bits;
1488 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1489 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1490 if (!(hdev->features & bit_mask)) {
1491 features &= ~bit_mask;
1492 }
1493 bit++;
1494 }
1495 return features;
1496}
1497
1498void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
9a2ba823 1499 uint64_t features)
2e6d46d7
NN
1500{
1501 const int *bit = feature_bits;
1502 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1503 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1504 if (features & bit_mask) {
1505 hdev->acked_features |= bit_mask;
1506 }
1507 bit++;
1508 }
1509}
1510
4c3e257b
CL
1511int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1512 uint32_t config_len)
1513{
1514 assert(hdev->vhost_ops);
1515
1516 if (hdev->vhost_ops->vhost_get_config) {
1517 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1518 }
1519
1520 return -1;
1521}
1522
1523int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1524 uint32_t offset, uint32_t size, uint32_t flags)
1525{
1526 assert(hdev->vhost_ops);
1527
1528 if (hdev->vhost_ops->vhost_set_config) {
1529 return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1530 size, flags);
1531 }
1532
1533 return -1;
1534}
1535
1536void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1537 const VhostDevConfigOps *ops)
1538{
1539 assert(hdev->vhost_ops);
1540 hdev->config_ops = ops;
1541}
1542
b0b3db79 1543/* Host notifiers must be enabled at this point. */
d5970055
MT
1544int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1545{
1546 int i, r;
24f4fe34 1547
8695de0f
MAL
1548 /* should only be called after backend is connected */
1549 assert(hdev->vhost_ops);
1550
24f4fe34 1551 hdev->started = true;
c471ad0e 1552 hdev->vdev = vdev;
24f4fe34 1553
d5970055
MT
1554 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1555 if (r < 0) {
54dd9321 1556 goto fail_features;
d5970055 1557 }
c471ad0e
JW
1558
1559 if (vhost_dev_has_iommu(hdev)) {
375f74f4 1560 memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
c471ad0e
JW
1561 }
1562
21e70425 1563 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
d5970055 1564 if (r < 0) {
c6409692 1565 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
d5970055 1566 r = -errno;
54dd9321 1567 goto fail_mem;
d5970055 1568 }
d154e0ba 1569 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1570 r = vhost_virtqueue_start(hdev,
a9f98bb5
JW
1571 vdev,
1572 hdev->vqs + i,
1573 hdev->vq_index + i);
d154e0ba
MT
1574 if (r < 0) {
1575 goto fail_vq;
1576 }
1577 }
1578
d5970055 1579 if (hdev->log_enabled) {
e05ca820
MT
1580 uint64_t log_base;
1581
d5970055 1582 hdev->log_size = vhost_get_log_size(hdev);
15324404
MAL
1583 hdev->log = vhost_log_get(hdev->log_size,
1584 vhost_dev_log_is_shared(hdev));
309750fa 1585 log_base = (uintptr_t)hdev->log->log;
c2bea314 1586 r = hdev->vhost_ops->vhost_set_log_base(hdev,
9a78a5dd
MAL
1587 hdev->log_size ? log_base : 0,
1588 hdev->log);
d5970055 1589 if (r < 0) {
c6409692 1590 VHOST_OPS_DEBUG("vhost_set_log_base failed");
d5970055 1591 r = -errno;
54dd9321 1592 goto fail_log;
d5970055
MT
1593 }
1594 }
d154e0ba 1595
c471ad0e
JW
1596 if (vhost_dev_has_iommu(hdev)) {
1597 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1598
1599 /* Update used ring information for IOTLB to work correctly,
1600 * vhost-kernel code requires for this.*/
1601 for (i = 0; i < hdev->nvqs; ++i) {
1602 struct vhost_virtqueue *vq = hdev->vqs + i;
1603 vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1604 }
1605 }
d5970055 1606 return 0;
54dd9321 1607fail_log:
24bfa207 1608 vhost_log_put(hdev, false);
d5970055
MT
1609fail_vq:
1610 while (--i >= 0) {
f56a1247 1611 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1612 vdev,
1613 hdev->vqs + i,
1614 hdev->vq_index + i);
d5970055 1615 }
a9f98bb5 1616 i = hdev->nvqs;
c471ad0e 1617
54dd9321
MT
1618fail_mem:
1619fail_features:
24f4fe34
MT
1620
1621 hdev->started = false;
d5970055
MT
1622 return r;
1623}
1624
b0b3db79 1625/* Host notifiers must be enabled at this point. */
d5970055
MT
1626void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1627{
a9f98bb5 1628 int i;
54dd9321 1629
8695de0f
MAL
1630 /* should only be called after backend is connected */
1631 assert(hdev->vhost_ops);
1632
d5970055 1633 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1634 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1635 vdev,
1636 hdev->vqs + i,
1637 hdev->vq_index + i);
d5970055 1638 }
54dd9321 1639
c471ad0e
JW
1640 if (vhost_dev_has_iommu(hdev)) {
1641 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
375f74f4 1642 memory_listener_unregister(&hdev->iommu_listener);
c471ad0e 1643 }
309750fa 1644 vhost_log_put(hdev, true);
d5970055 1645 hdev->started = false;
c471ad0e 1646 hdev->vdev = NULL;
d5970055 1647}
950d94ba
MAL
1648
1649int vhost_net_set_backend(struct vhost_dev *hdev,
1650 struct vhost_vring_file *file)
1651{
1652 if (hdev->vhost_ops->vhost_net_set_backend) {
1653 return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1654 }
1655
1656 return -1;
1657}