]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/vhost.c
migration: disallow migrate_add_blocker during migration
[mirror_qemu.git] / hw / virtio / vhost.c
CommitLineData
d5970055
MT
1/*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
6b620ca3
PB
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
d5970055
MT
14 */
15
9b8bfe21 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
0d09e41a 18#include "hw/virtio/vhost.h"
d5970055 19#include "hw/hw.h"
5444e768 20#include "qemu/atomic.h"
1de7afc9 21#include "qemu/range.h"
04b7a152 22#include "qemu/error-report.h"
15324404 23#include "qemu/memfd.h"
11078ae3 24#include <linux/vhost.h>
022c62cb 25#include "exec/address-spaces.h"
1c819449 26#include "hw/virtio/virtio-bus.h"
04b7a152 27#include "hw/virtio/virtio-access.h"
7145872e 28#include "migration/migration.h"
c471ad0e 29#include "sysemu/dma.h"
d5970055 30
162bba7f
MAL
31/* enabled until disconnected backend stabilizes */
32#define _VHOST_DEBUG 1
33
34#ifdef _VHOST_DEBUG
35#define VHOST_OPS_DEBUG(fmt, ...) \
36 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
37 strerror(errno), errno); } while (0)
38#else
39#define VHOST_OPS_DEBUG(fmt, ...) \
40 do { } while (0)
41#endif
42
309750fa 43static struct vhost_log *vhost_log;
15324404 44static struct vhost_log *vhost_log_shm;
309750fa 45
2ce68e4c
IM
46static unsigned int used_memslots;
47static QLIST_HEAD(, vhost_dev) vhost_devices =
48 QLIST_HEAD_INITIALIZER(vhost_devices);
49
50bool vhost_has_free_slot(void)
51{
52 unsigned int slots_limit = ~0U;
53 struct vhost_dev *hdev;
54
55 QLIST_FOREACH(hdev, &vhost_devices, entry) {
56 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
57 slots_limit = MIN(slots_limit, r);
58 }
59 return slots_limit > used_memslots;
60}
61
d5970055 62static void vhost_dev_sync_region(struct vhost_dev *dev,
2817b260 63 MemoryRegionSection *section,
d5970055
MT
64 uint64_t mfirst, uint64_t mlast,
65 uint64_t rfirst, uint64_t rlast)
66{
309750fa
JW
67 vhost_log_chunk_t *log = dev->log->log;
68
d5970055
MT
69 uint64_t start = MAX(mfirst, rfirst);
70 uint64_t end = MIN(mlast, rlast);
309750fa
JW
71 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
72 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
d5970055
MT
73 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
74
d5970055
MT
75 if (end < start) {
76 return;
77 }
e314672a 78 assert(end / VHOST_LOG_CHUNK < dev->log_size);
fbbaf9ae 79 assert(start / VHOST_LOG_CHUNK < dev->log_size);
e314672a 80
d5970055
MT
81 for (;from < to; ++from) {
82 vhost_log_chunk_t log;
d5970055
MT
83 /* We first check with non-atomic: much cheaper,
84 * and we expect non-dirty to be the common case. */
85 if (!*from) {
0c600ce2 86 addr += VHOST_LOG_CHUNK;
d5970055
MT
87 continue;
88 }
5444e768
PB
89 /* Data must be read atomically. We don't really need barrier semantics
90 * but it's easier to use atomic_* than roll our own. */
91 log = atomic_xchg(from, 0);
747eb78b
NC
92 while (log) {
93 int bit = ctzl(log);
6b37a23d
MT
94 hwaddr page_addr;
95 hwaddr section_offset;
96 hwaddr mr_offset;
6b37a23d
MT
97 page_addr = addr + bit * VHOST_LOG_PAGE;
98 section_offset = page_addr - section->offset_within_address_space;
99 mr_offset = section_offset + section->offset_within_region;
100 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
d5970055
MT
101 log &= ~(0x1ull << bit);
102 }
103 addr += VHOST_LOG_CHUNK;
104 }
105}
106
04097f7c 107static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
2817b260 108 MemoryRegionSection *section,
6b37a23d
MT
109 hwaddr first,
110 hwaddr last)
d5970055 111{
d5970055 112 int i;
6b37a23d
MT
113 hwaddr start_addr;
114 hwaddr end_addr;
04097f7c 115
d5970055
MT
116 if (!dev->log_enabled || !dev->started) {
117 return 0;
118 }
6b37a23d 119 start_addr = section->offset_within_address_space;
052e87b0 120 end_addr = range_get_last(start_addr, int128_get64(section->size));
6b37a23d
MT
121 start_addr = MAX(first, start_addr);
122 end_addr = MIN(last, end_addr);
123
d5970055
MT
124 for (i = 0; i < dev->mem->nregions; ++i) {
125 struct vhost_memory_region *reg = dev->mem->regions + i;
2817b260 126 vhost_dev_sync_region(dev, section, start_addr, end_addr,
d5970055
MT
127 reg->guest_phys_addr,
128 range_get_last(reg->guest_phys_addr,
129 reg->memory_size));
130 }
131 for (i = 0; i < dev->nvqs; ++i) {
132 struct vhost_virtqueue *vq = dev->vqs + i;
2817b260 133 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
d5970055
MT
134 range_get_last(vq->used_phys, vq->used_size));
135 }
136 return 0;
137}
138
04097f7c
AK
139static void vhost_log_sync(MemoryListener *listener,
140 MemoryRegionSection *section)
141{
142 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
143 memory_listener);
6b37a23d
MT
144 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
145}
04097f7c 146
6b37a23d
MT
147static void vhost_log_sync_range(struct vhost_dev *dev,
148 hwaddr first, hwaddr last)
149{
150 int i;
151 /* FIXME: this is N^2 in number of sections */
152 for (i = 0; i < dev->n_mem_sections; ++i) {
153 MemoryRegionSection *section = &dev->mem_sections[i];
154 vhost_sync_dirty_bitmap(dev, section, first, last);
155 }
04097f7c
AK
156}
157
d5970055
MT
158/* Assign/unassign. Keep an unsorted array of non-overlapping
159 * memory regions in dev->mem. */
160static void vhost_dev_unassign_memory(struct vhost_dev *dev,
161 uint64_t start_addr,
162 uint64_t size)
163{
164 int from, to, n = dev->mem->nregions;
165 /* Track overlapping/split regions for sanity checking. */
166 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
167
168 for (from = 0, to = 0; from < n; ++from, ++to) {
169 struct vhost_memory_region *reg = dev->mem->regions + to;
170 uint64_t reglast;
171 uint64_t memlast;
172 uint64_t change;
173
174 /* clone old region */
175 if (to != from) {
176 memcpy(reg, dev->mem->regions + from, sizeof *reg);
177 }
178
179 /* No overlap is simple */
180 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
181 start_addr, size)) {
182 continue;
183 }
184
185 /* Split only happens if supplied region
186 * is in the middle of an existing one. Thus it can not
187 * overlap with any other existing region. */
188 assert(!split);
189
190 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
191 memlast = range_get_last(start_addr, size);
192
193 /* Remove whole region */
194 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
195 --dev->mem->nregions;
196 --to;
d5970055
MT
197 ++overlap_middle;
198 continue;
199 }
200
201 /* Shrink region */
202 if (memlast >= reglast) {
203 reg->memory_size = start_addr - reg->guest_phys_addr;
204 assert(reg->memory_size);
205 assert(!overlap_end);
206 ++overlap_end;
207 continue;
208 }
209
210 /* Shift region */
211 if (start_addr <= reg->guest_phys_addr) {
212 change = memlast + 1 - reg->guest_phys_addr;
213 reg->memory_size -= change;
214 reg->guest_phys_addr += change;
215 reg->userspace_addr += change;
216 assert(reg->memory_size);
217 assert(!overlap_start);
218 ++overlap_start;
219 continue;
220 }
221
222 /* This only happens if supplied region
223 * is in the middle of an existing one. Thus it can not
224 * overlap with any other existing region. */
225 assert(!overlap_start);
226 assert(!overlap_end);
227 assert(!overlap_middle);
228 /* Split region: shrink first part, shift second part. */
229 memcpy(dev->mem->regions + n, reg, sizeof *reg);
230 reg->memory_size = start_addr - reg->guest_phys_addr;
231 assert(reg->memory_size);
232 change = memlast + 1 - reg->guest_phys_addr;
233 reg = dev->mem->regions + n;
234 reg->memory_size -= change;
235 assert(reg->memory_size);
236 reg->guest_phys_addr += change;
237 reg->userspace_addr += change;
238 /* Never add more than 1 region */
239 assert(dev->mem->nregions == n);
240 ++dev->mem->nregions;
241 ++split;
242 }
243}
244
245/* Called after unassign, so no regions overlap the given range. */
246static void vhost_dev_assign_memory(struct vhost_dev *dev,
247 uint64_t start_addr,
248 uint64_t size,
249 uint64_t uaddr)
250{
251 int from, to;
252 struct vhost_memory_region *merged = NULL;
253 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
254 struct vhost_memory_region *reg = dev->mem->regions + to;
255 uint64_t prlast, urlast;
256 uint64_t pmlast, umlast;
257 uint64_t s, e, u;
258
259 /* clone old region */
260 if (to != from) {
261 memcpy(reg, dev->mem->regions + from, sizeof *reg);
262 }
263 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
264 pmlast = range_get_last(start_addr, size);
265 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
266 umlast = range_get_last(uaddr, size);
267
268 /* check for overlapping regions: should never happen. */
269 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
270 /* Not an adjacent or overlapping region - do not merge. */
271 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
272 (pmlast + 1 != reg->guest_phys_addr ||
273 umlast + 1 != reg->userspace_addr)) {
274 continue;
275 }
276
ffe42cc1
MT
277 if (dev->vhost_ops->vhost_backend_can_merge &&
278 !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
279 reg->userspace_addr,
280 reg->memory_size)) {
281 continue;
282 }
283
d5970055
MT
284 if (merged) {
285 --to;
286 assert(to >= 0);
287 } else {
288 merged = reg;
289 }
290 u = MIN(uaddr, reg->userspace_addr);
291 s = MIN(start_addr, reg->guest_phys_addr);
292 e = MAX(pmlast, prlast);
293 uaddr = merged->userspace_addr = u;
294 start_addr = merged->guest_phys_addr = s;
295 size = merged->memory_size = e - s + 1;
296 assert(merged->memory_size);
297 }
298
299 if (!merged) {
300 struct vhost_memory_region *reg = dev->mem->regions + to;
301 memset(reg, 0, sizeof *reg);
302 reg->memory_size = size;
303 assert(reg->memory_size);
304 reg->guest_phys_addr = start_addr;
305 reg->userspace_addr = uaddr;
306 ++to;
307 }
308 assert(to <= dev->mem->nregions + 1);
309 dev->mem->nregions = to;
310}
311
312static uint64_t vhost_get_log_size(struct vhost_dev *dev)
313{
314 uint64_t log_size = 0;
315 int i;
316 for (i = 0; i < dev->mem->nregions; ++i) {
317 struct vhost_memory_region *reg = dev->mem->regions + i;
318 uint64_t last = range_get_last(reg->guest_phys_addr,
319 reg->memory_size);
320 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
321 }
322 for (i = 0; i < dev->nvqs; ++i) {
323 struct vhost_virtqueue *vq = dev->vqs + i;
324 uint64_t last = vq->used_phys + vq->used_size - 1;
325 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
326 }
327 return log_size;
328}
15324404
MAL
329
330static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
309750fa 331{
15324404
MAL
332 struct vhost_log *log;
333 uint64_t logsize = size * sizeof(*(log->log));
334 int fd = -1;
335
336 log = g_new0(struct vhost_log, 1);
337 if (share) {
338 log->log = qemu_memfd_alloc("vhost-log", logsize,
339 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
340 &fd);
341 memset(log->log, 0, logsize);
342 } else {
343 log->log = g_malloc0(logsize);
344 }
309750fa
JW
345
346 log->size = size;
347 log->refcnt = 1;
15324404 348 log->fd = fd;
309750fa
JW
349
350 return log;
351}
352
15324404 353static struct vhost_log *vhost_log_get(uint64_t size, bool share)
309750fa 354{
15324404
MAL
355 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
356
357 if (!log || log->size != size) {
358 log = vhost_log_alloc(size, share);
359 if (share) {
360 vhost_log_shm = log;
361 } else {
362 vhost_log = log;
363 }
309750fa 364 } else {
15324404 365 ++log->refcnt;
309750fa
JW
366 }
367
15324404 368 return log;
309750fa
JW
369}
370
371static void vhost_log_put(struct vhost_dev *dev, bool sync)
372{
373 struct vhost_log *log = dev->log;
374
375 if (!log) {
376 return;
377 }
9e0bc24f
MAL
378 dev->log = NULL;
379 dev->log_size = 0;
309750fa
JW
380
381 --log->refcnt;
382 if (log->refcnt == 0) {
383 /* Sync only the range covered by the old log */
384 if (dev->log_size && sync) {
385 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
386 }
15324404 387
309750fa 388 if (vhost_log == log) {
15324404 389 g_free(log->log);
309750fa 390 vhost_log = NULL;
15324404
MAL
391 } else if (vhost_log_shm == log) {
392 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
393 log->fd);
394 vhost_log_shm = NULL;
309750fa 395 }
15324404 396
309750fa
JW
397 g_free(log);
398 }
399}
d5970055 400
15324404
MAL
401static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
402{
403 return dev->vhost_ops->vhost_requires_shm_log &&
404 dev->vhost_ops->vhost_requires_shm_log(dev);
405}
406
407static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
d5970055 408{
15324404 409 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
309750fa 410 uint64_t log_base = (uintptr_t)log->log;
6b37a23d 411 int r;
6528499f 412
636f4ddd
MAL
413 /* inform backend of log switching, this must be done before
414 releasing the current log, to ensure no logging is lost */
9a78a5dd 415 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
162bba7f
MAL
416 if (r < 0) {
417 VHOST_OPS_DEBUG("vhost_set_log_base failed");
418 }
419
309750fa 420 vhost_log_put(dev, true);
d5970055
MT
421 dev->log = log;
422 dev->log_size = size;
423}
424
c471ad0e
JW
425static int vhost_dev_has_iommu(struct vhost_dev *dev)
426{
427 VirtIODevice *vdev = dev->vdev;
428 AddressSpace *dma_as = vdev->dma_as;
429
430 return memory_region_is_iommu(dma_as->root) &&
431 virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
432}
433
434static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
435 hwaddr *plen, int is_write)
436{
437 if (!vhost_dev_has_iommu(dev)) {
438 return cpu_physical_memory_map(addr, plen, is_write);
439 } else {
440 return (void *)(uintptr_t)addr;
441 }
442}
443
444static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
445 hwaddr len, int is_write,
446 hwaddr access_len)
447{
448 if (!vhost_dev_has_iommu(dev)) {
449 cpu_physical_memory_unmap(buffer, len, is_write, access_len);
450 }
451}
f1f9e6c5 452
c471ad0e
JW
453static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
454 void *part,
f1f9e6c5
GK
455 uint64_t part_addr,
456 uint64_t part_size,
457 uint64_t start_addr,
458 uint64_t size)
459{
460 hwaddr l;
461 void *p;
462 int r = 0;
463
464 if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
465 return 0;
466 }
467 l = part_size;
c471ad0e 468 p = vhost_memory_map(dev, part_addr, &l, 1);
f1f9e6c5
GK
469 if (!p || l != part_size) {
470 r = -ENOMEM;
471 }
472 if (p != part) {
473 r = -EBUSY;
474 }
c471ad0e 475 vhost_memory_unmap(dev, p, l, 0, 0);
f1f9e6c5
GK
476 return r;
477}
478
d5970055
MT
479static int vhost_verify_ring_mappings(struct vhost_dev *dev,
480 uint64_t start_addr,
481 uint64_t size)
482{
f1f9e6c5 483 int i, j;
8617343f 484 int r = 0;
f1f9e6c5
GK
485 const char *part_name[] = {
486 "descriptor table",
487 "available ring",
488 "used ring"
489 };
8617343f 490
f1f9e6c5 491 for (i = 0; i < dev->nvqs; ++i) {
d5970055 492 struct vhost_virtqueue *vq = dev->vqs + i;
d5970055 493
f1f9e6c5 494 j = 0;
c471ad0e 495 r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
f1f9e6c5
GK
496 vq->desc_size, start_addr, size);
497 if (!r) {
498 break;
d5970055 499 }
f1f9e6c5
GK
500
501 j++;
c471ad0e 502 r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
f1f9e6c5
GK
503 vq->avail_size, start_addr, size);
504 if (!r) {
505 break;
d5970055 506 }
f1f9e6c5
GK
507
508 j++;
c471ad0e 509 r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
f1f9e6c5
GK
510 vq->used_size, start_addr, size);
511 if (!r) {
512 break;
d5970055 513 }
f1f9e6c5
GK
514 }
515
516 if (r == -ENOMEM) {
517 error_report("Unable to map %s for ring %d", part_name[j], i);
518 } else if (r == -EBUSY) {
519 error_report("%s relocated for ring %d", part_name[j], i);
d5970055 520 }
8617343f 521 return r;
d5970055
MT
522}
523
4e789564
MT
524static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
525 uint64_t start_addr,
526 uint64_t size)
527{
528 int i, n = dev->mem->nregions;
529 for (i = 0; i < n; ++i) {
530 struct vhost_memory_region *reg = dev->mem->regions + i;
531 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
532 start_addr, size)) {
533 return reg;
534 }
535 }
536 return NULL;
537}
538
539static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
540 uint64_t start_addr,
541 uint64_t size,
542 uint64_t uaddr)
543{
544 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
545 uint64_t reglast;
546 uint64_t memlast;
547
548 if (!reg) {
549 return true;
550 }
551
552 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
553 memlast = range_get_last(start_addr, size);
554
555 /* Need to extend region? */
556 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
557 return true;
558 }
559 /* userspace_addr changed? */
560 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
561}
562
04097f7c
AK
563static void vhost_set_memory(MemoryListener *listener,
564 MemoryRegionSection *section,
565 bool add)
d5970055 566{
04097f7c
AK
567 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
568 memory_listener);
a8170e5e 569 hwaddr start_addr = section->offset_within_address_space;
052e87b0 570 ram_addr_t size = int128_get64(section->size);
2d1a35be
PB
571 bool log_dirty =
572 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
d5970055
MT
573 int s = offsetof(struct vhost_memory, regions) +
574 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
04097f7c
AK
575 void *ram;
576
7267c094 577 dev->mem = g_realloc(dev->mem, s);
d5970055 578
f5a4e64f 579 if (log_dirty) {
04097f7c 580 add = false;
f5a4e64f
MT
581 }
582
d5970055
MT
583 assert(size);
584
4e789564 585 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
d743c382 586 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
04097f7c
AK
587 if (add) {
588 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
4e789564
MT
589 /* Region exists with same address. Nothing to do. */
590 return;
591 }
592 } else {
593 if (!vhost_dev_find_reg(dev, start_addr, size)) {
594 /* Removing region that we don't access. Nothing to do. */
595 return;
596 }
597 }
598
d5970055 599 vhost_dev_unassign_memory(dev, start_addr, size);
04097f7c 600 if (add) {
d5970055 601 /* Add given mapping, merging adjacent regions if any */
04097f7c 602 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
d5970055
MT
603 } else {
604 /* Remove old mapping for this memory, if any. */
605 vhost_dev_unassign_memory(dev, start_addr, size);
606 }
af603142
NB
607 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
608 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
609 dev->memory_changed = true;
2ce68e4c 610 used_memslots = dev->mem->nregions;
af603142
NB
611}
612
613static bool vhost_section(MemoryRegionSection *section)
614{
615 return memory_region_is_ram(section->mr);
616}
617
618static void vhost_begin(MemoryListener *listener)
619{
620 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
621 memory_listener);
622 dev->mem_changed_end_addr = 0;
623 dev->mem_changed_start_addr = -1;
624}
d5970055 625
af603142
NB
626static void vhost_commit(MemoryListener *listener)
627{
628 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
629 memory_listener);
630 hwaddr start_addr = 0;
631 ram_addr_t size = 0;
632 uint64_t log_size;
633 int r;
634
635 if (!dev->memory_changed) {
636 return;
637 }
d5970055
MT
638 if (!dev->started) {
639 return;
640 }
af603142
NB
641 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
642 return;
643 }
d5970055
MT
644
645 if (dev->started) {
af603142
NB
646 start_addr = dev->mem_changed_start_addr;
647 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
648
d5970055
MT
649 r = vhost_verify_ring_mappings(dev, start_addr, size);
650 assert(r >= 0);
651 }
652
653 if (!dev->log_enabled) {
21e70425 654 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
162bba7f
MAL
655 if (r < 0) {
656 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
657 }
af603142 658 dev->memory_changed = false;
d5970055
MT
659 return;
660 }
661 log_size = vhost_get_log_size(dev);
662 /* We allocate an extra 4K bytes to log,
663 * to reduce the * number of reallocations. */
664#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
665 /* To log more, must increase log size before table update. */
666 if (dev->log_size < log_size) {
667 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
668 }
21e70425 669 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
162bba7f
MAL
670 if (r < 0) {
671 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
672 }
d5970055
MT
673 /* To log less, can only decrease log size after table update. */
674 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
675 vhost_dev_log_resize(dev, log_size);
676 }
af603142 677 dev->memory_changed = false;
50c1e149
AK
678}
679
04097f7c
AK
680static void vhost_region_add(MemoryListener *listener,
681 MemoryRegionSection *section)
682{
2817b260
AK
683 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
684 memory_listener);
685
c49450b9
AK
686 if (!vhost_section(section)) {
687 return;
688 }
689
2817b260
AK
690 ++dev->n_mem_sections;
691 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
692 dev->n_mem_sections);
693 dev->mem_sections[dev->n_mem_sections - 1] = *section;
dfde4e6e 694 memory_region_ref(section->mr);
04097f7c
AK
695 vhost_set_memory(listener, section, true);
696}
697
698static void vhost_region_del(MemoryListener *listener,
699 MemoryRegionSection *section)
700{
2817b260
AK
701 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
702 memory_listener);
703 int i;
704
c49450b9
AK
705 if (!vhost_section(section)) {
706 return;
707 }
708
04097f7c 709 vhost_set_memory(listener, section, false);
dfde4e6e 710 memory_region_unref(section->mr);
2817b260
AK
711 for (i = 0; i < dev->n_mem_sections; ++i) {
712 if (dev->mem_sections[i].offset_within_address_space
713 == section->offset_within_address_space) {
714 --dev->n_mem_sections;
715 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
637f7a6a 716 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
2817b260
AK
717 break;
718 }
719 }
04097f7c
AK
720}
721
50c1e149
AK
722static void vhost_region_nop(MemoryListener *listener,
723 MemoryRegionSection *section)
724{
725}
726
d5970055
MT
727static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
728 struct vhost_virtqueue *vq,
729 unsigned idx, bool enable_log)
730{
731 struct vhost_vring_addr addr = {
732 .index = idx,
2b3af999
SW
733 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
734 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
735 .used_user_addr = (uint64_t)(unsigned long)vq->used,
d5970055
MT
736 .log_guest_addr = vq->used_phys,
737 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
738 };
21e70425 739 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
d5970055 740 if (r < 0) {
c6409692 741 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
d5970055
MT
742 return -errno;
743 }
744 return 0;
745}
746
c471ad0e
JW
747static int vhost_dev_set_features(struct vhost_dev *dev,
748 bool enable_log)
d5970055
MT
749{
750 uint64_t features = dev->acked_features;
751 int r;
752 if (enable_log) {
9a2ba823 753 features |= 0x1ULL << VHOST_F_LOG_ALL;
d5970055 754 }
21e70425 755 r = dev->vhost_ops->vhost_set_features(dev, features);
c6409692
MAL
756 if (r < 0) {
757 VHOST_OPS_DEBUG("vhost_set_features failed");
758 }
d5970055
MT
759 return r < 0 ? -errno : 0;
760}
761
762static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
763{
162bba7f 764 int r, i, idx;
d5970055
MT
765 r = vhost_dev_set_features(dev, enable_log);
766 if (r < 0) {
767 goto err_features;
768 }
769 for (i = 0; i < dev->nvqs; ++i) {
25a2a920
TC
770 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
771 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
d5970055
MT
772 enable_log);
773 if (r < 0) {
774 goto err_vq;
775 }
776 }
777 return 0;
778err_vq:
779 for (; i >= 0; --i) {
25a2a920 780 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
162bba7f
MAL
781 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
782 dev->log_enabled);
d5970055 783 }
162bba7f 784 vhost_dev_set_features(dev, dev->log_enabled);
d5970055
MT
785err_features:
786 return r;
787}
788
04097f7c 789static int vhost_migration_log(MemoryListener *listener, int enable)
d5970055 790{
04097f7c
AK
791 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
792 memory_listener);
d5970055
MT
793 int r;
794 if (!!enable == dev->log_enabled) {
795 return 0;
796 }
797 if (!dev->started) {
798 dev->log_enabled = enable;
799 return 0;
800 }
801 if (!enable) {
802 r = vhost_dev_set_log(dev, false);
803 if (r < 0) {
804 return r;
805 }
309750fa 806 vhost_log_put(dev, false);
d5970055
MT
807 } else {
808 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
809 r = vhost_dev_set_log(dev, true);
810 if (r < 0) {
811 return r;
812 }
813 }
814 dev->log_enabled = enable;
815 return 0;
816}
817
04097f7c
AK
818static void vhost_log_global_start(MemoryListener *listener)
819{
820 int r;
821
822 r = vhost_migration_log(listener, true);
823 if (r < 0) {
824 abort();
825 }
826}
827
828static void vhost_log_global_stop(MemoryListener *listener)
829{
830 int r;
831
832 r = vhost_migration_log(listener, false);
833 if (r < 0) {
834 abort();
835 }
836}
837
838static void vhost_log_start(MemoryListener *listener,
b2dfd71c
PB
839 MemoryRegionSection *section,
840 int old, int new)
04097f7c
AK
841{
842 /* FIXME: implement */
843}
844
845static void vhost_log_stop(MemoryListener *listener,
b2dfd71c
PB
846 MemoryRegionSection *section,
847 int old, int new)
04097f7c
AK
848{
849 /* FIXME: implement */
850}
851
46f70ff1
GK
852/* The vhost driver natively knows how to handle the vrings of non
853 * cross-endian legacy devices and modern devices. Only legacy devices
854 * exposed to a bi-endian guest may require the vhost driver to use a
855 * specific endianness.
856 */
a122ab24
GK
857static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
858{
e5848123
GK
859 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
860 return false;
861 }
a122ab24 862#ifdef HOST_WORDS_BIGENDIAN
46f70ff1 863 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
a122ab24 864#else
46f70ff1 865 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
a122ab24 866#endif
a122ab24
GK
867}
868
04b7a152
GK
869static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
870 bool is_big_endian,
871 int vhost_vq_index)
872{
873 struct vhost_vring_state s = {
874 .index = vhost_vq_index,
875 .num = is_big_endian
876 };
877
21e70425 878 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
04b7a152
GK
879 return 0;
880 }
881
c6409692 882 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
04b7a152
GK
883 if (errno == ENOTTY) {
884 error_report("vhost does not support cross-endian");
885 return -ENOSYS;
886 }
887
888 return -errno;
889}
890
c471ad0e
JW
891static int vhost_memory_region_lookup(struct vhost_dev *hdev,
892 uint64_t gpa, uint64_t *uaddr,
893 uint64_t *len)
894{
895 int i;
896
897 for (i = 0; i < hdev->mem->nregions; i++) {
898 struct vhost_memory_region *reg = hdev->mem->regions + i;
899
900 if (gpa >= reg->guest_phys_addr &&
901 reg->guest_phys_addr + reg->memory_size > gpa) {
902 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
903 *len = reg->guest_phys_addr + reg->memory_size - gpa;
904 return 0;
905 }
906 }
907
908 return -EFAULT;
909}
910
911void vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
912{
913 IOMMUTLBEntry iotlb;
914 uint64_t uaddr, len;
915
916 rcu_read_lock();
917
918 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
919 iova, write);
920 if (iotlb.target_as != NULL) {
921 if (vhost_memory_region_lookup(dev, iotlb.translated_addr,
922 &uaddr, &len)) {
923 error_report("Fail to lookup the translated address "
924 "%"PRIx64, iotlb.translated_addr);
925 goto out;
926 }
927
928 len = MIN(iotlb.addr_mask + 1, len);
929 iova = iova & ~iotlb.addr_mask;
930
931 if (dev->vhost_ops->vhost_update_device_iotlb(dev, iova, uaddr,
932 len, iotlb.perm)) {
933 error_report("Fail to update device iotlb");
934 goto out;
935 }
936 }
937out:
938 rcu_read_unlock();
939}
940
f56a1247 941static int vhost_virtqueue_start(struct vhost_dev *dev,
d5970055
MT
942 struct VirtIODevice *vdev,
943 struct vhost_virtqueue *vq,
944 unsigned idx)
945{
96a3d98d
JW
946 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
947 VirtioBusState *vbus = VIRTIO_BUS(qbus);
948 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
a8170e5e 949 hwaddr s, l, a;
d5970055 950 int r;
21e70425 951 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 952 struct vhost_vring_file file = {
a9f98bb5 953 .index = vhost_vq_index
d5970055
MT
954 };
955 struct vhost_vring_state state = {
a9f98bb5 956 .index = vhost_vq_index
d5970055
MT
957 };
958 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
959
a9f98bb5 960
d5970055 961 vq->num = state.num = virtio_queue_get_num(vdev, idx);
21e70425 962 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
d5970055 963 if (r) {
c6409692 964 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
d5970055
MT
965 return -errno;
966 }
967
968 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
21e70425 969 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
d5970055 970 if (r) {
c6409692 971 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
d5970055
MT
972 return -errno;
973 }
974
e5848123 975 if (vhost_needs_vring_endian(vdev)) {
04b7a152
GK
976 r = vhost_virtqueue_set_vring_endian_legacy(dev,
977 virtio_is_big_endian(vdev),
978 vhost_vq_index);
979 if (r) {
980 return -errno;
981 }
982 }
983
f1f9e6c5
GK
984 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
985 vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
c471ad0e 986 vq->desc = vhost_memory_map(dev, a, &l, 0);
d5970055
MT
987 if (!vq->desc || l != s) {
988 r = -ENOMEM;
989 goto fail_alloc_desc;
990 }
f1f9e6c5
GK
991 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
992 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
c471ad0e 993 vq->avail = vhost_memory_map(dev, a, &l, 0);
d5970055
MT
994 if (!vq->avail || l != s) {
995 r = -ENOMEM;
996 goto fail_alloc_avail;
997 }
998 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
999 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
c471ad0e 1000 vq->used = vhost_memory_map(dev, a, &l, 1);
d5970055
MT
1001 if (!vq->used || l != s) {
1002 r = -ENOMEM;
1003 goto fail_alloc_used;
1004 }
1005
a9f98bb5 1006 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
d5970055
MT
1007 if (r < 0) {
1008 r = -errno;
1009 goto fail_alloc;
1010 }
a9f98bb5 1011
d5970055 1012 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
21e70425 1013 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
d5970055 1014 if (r) {
c6409692 1015 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
c8852121 1016 r = -errno;
d5970055
MT
1017 goto fail_kick;
1018 }
1019
f56a1247
MT
1020 /* Clear and discard previous events if any. */
1021 event_notifier_test_and_clear(&vq->masked_notifier);
d5970055 1022
5669655a
VK
1023 /* Init vring in unmasked state, unless guest_notifier_mask
1024 * will do it later.
1025 */
1026 if (!vdev->use_guest_notifier_mask) {
1027 /* TODO: check and handle errors. */
1028 vhost_virtqueue_mask(dev, vdev, idx, false);
1029 }
1030
96a3d98d
JW
1031 if (k->query_guest_notifiers &&
1032 k->query_guest_notifiers(qbus->parent) &&
1033 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1034 file.fd = -1;
1035 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1036 if (r) {
1037 goto fail_vector;
1038 }
1039 }
1040
d5970055
MT
1041 return 0;
1042
96a3d98d 1043fail_vector:
d5970055 1044fail_kick:
d5970055 1045fail_alloc:
c471ad0e
JW
1046 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1047 0, 0);
d5970055 1048fail_alloc_used:
c471ad0e
JW
1049 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1050 0, 0);
d5970055 1051fail_alloc_avail:
c471ad0e
JW
1052 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1053 0, 0);
d5970055
MT
1054fail_alloc_desc:
1055 return r;
1056}
1057
f56a1247 1058static void vhost_virtqueue_stop(struct vhost_dev *dev,
d5970055
MT
1059 struct VirtIODevice *vdev,
1060 struct vhost_virtqueue *vq,
1061 unsigned idx)
1062{
21e70425 1063 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 1064 struct vhost_vring_state state = {
04b7a152 1065 .index = vhost_vq_index,
d5970055
MT
1066 };
1067 int r;
fc57fd99 1068
21e70425 1069 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
d5970055 1070 if (r < 0) {
c6409692 1071 VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
499c5579
MAL
1072 } else {
1073 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
d5970055 1074 }
3561ba14 1075 virtio_queue_invalidate_signalled_used(vdev, idx);
aa94d521 1076 virtio_queue_update_used_idx(vdev, idx);
04b7a152
GK
1077
1078 /* In the cross-endian case, we need to reset the vring endianness to
1079 * native as legacy devices expect so by default.
1080 */
e5848123 1081 if (vhost_needs_vring_endian(vdev)) {
162bba7f
MAL
1082 vhost_virtqueue_set_vring_endian_legacy(dev,
1083 !virtio_is_big_endian(vdev),
1084 vhost_vq_index);
04b7a152
GK
1085 }
1086
c471ad0e
JW
1087 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1088 1, virtio_queue_get_used_size(vdev, idx));
1089 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1090 0, virtio_queue_get_avail_size(vdev, idx));
1091 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1092 0, virtio_queue_get_desc_size(vdev, idx));
d5970055
MT
1093}
1094
80a1ea37
AK
1095static void vhost_eventfd_add(MemoryListener *listener,
1096 MemoryRegionSection *section,
753d5e14 1097 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
1098{
1099}
1100
1101static void vhost_eventfd_del(MemoryListener *listener,
1102 MemoryRegionSection *section,
753d5e14 1103 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
1104{
1105}
1106
69e87b32
JW
1107static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1108 int n, uint32_t timeout)
1109{
1110 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1111 struct vhost_vring_state state = {
1112 .index = vhost_vq_index,
1113 .num = timeout,
1114 };
1115 int r;
1116
1117 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1118 return -EINVAL;
1119 }
1120
1121 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1122 if (r) {
c6409692 1123 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
69e87b32
JW
1124 return r;
1125 }
1126
1127 return 0;
1128}
1129
f56a1247
MT
1130static int vhost_virtqueue_init(struct vhost_dev *dev,
1131 struct vhost_virtqueue *vq, int n)
1132{
21e70425 1133 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
f56a1247 1134 struct vhost_vring_file file = {
b931bfbf 1135 .index = vhost_vq_index,
f56a1247
MT
1136 };
1137 int r = event_notifier_init(&vq->masked_notifier, 0);
1138 if (r < 0) {
1139 return r;
1140 }
1141
1142 file.fd = event_notifier_get_fd(&vq->masked_notifier);
21e70425 1143 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
f56a1247 1144 if (r) {
c6409692 1145 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
f56a1247
MT
1146 r = -errno;
1147 goto fail_call;
1148 }
c471ad0e
JW
1149
1150 vq->dev = dev;
1151
f56a1247
MT
1152 return 0;
1153fail_call:
1154 event_notifier_cleanup(&vq->masked_notifier);
1155 return r;
1156}
1157
1158static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1159{
1160 event_notifier_cleanup(&vq->masked_notifier);
1161}
1162
c471ad0e
JW
1163static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
1164{
1165 struct vhost_dev *hdev = container_of(n, struct vhost_dev, n);
1166
1167 if (hdev->vhost_ops->vhost_invalidate_device_iotlb(hdev,
1168 iotlb->iova,
1169 iotlb->addr_mask + 1)) {
1170 error_report("Fail to invalidate device iotlb");
1171 }
1172}
1173
81647a65 1174int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
69e87b32 1175 VhostBackendType backend_type, uint32_t busyloop_timeout)
d5970055
MT
1176{
1177 uint64_t features;
a06db3ec 1178 int i, r, n_initialized_vqs = 0;
fe44dc91 1179 Error *local_err = NULL;
81647a65 1180
c471ad0e 1181 hdev->vdev = NULL;
d2fc4402
MAL
1182 hdev->migration_blocker = NULL;
1183
7cb8a9b9
MAL
1184 r = vhost_set_backend_type(hdev, backend_type);
1185 assert(r >= 0);
1a1bfac9 1186
7cb8a9b9
MAL
1187 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1188 if (r < 0) {
1189 goto fail;
24d1eb33
NN
1190 }
1191
aebf8168 1192 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
4afba631
MAL
1193 error_report("vhost backend memory slots limit is less"
1194 " than current number of present memory slots");
7cb8a9b9
MAL
1195 r = -1;
1196 goto fail;
aebf8168 1197 }
2ce68e4c 1198
21e70425 1199 r = hdev->vhost_ops->vhost_set_owner(hdev);
d5970055 1200 if (r < 0) {
c6409692 1201 VHOST_OPS_DEBUG("vhost_set_owner failed");
d5970055
MT
1202 goto fail;
1203 }
1204
21e70425 1205 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
d5970055 1206 if (r < 0) {
c6409692 1207 VHOST_OPS_DEBUG("vhost_get_features failed");
d5970055
MT
1208 goto fail;
1209 }
f56a1247 1210
a06db3ec 1211 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
b931bfbf 1212 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
f56a1247 1213 if (r < 0) {
a06db3ec 1214 goto fail;
f56a1247
MT
1215 }
1216 }
69e87b32
JW
1217
1218 if (busyloop_timeout) {
1219 for (i = 0; i < hdev->nvqs; ++i) {
1220 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1221 busyloop_timeout);
1222 if (r < 0) {
1223 goto fail_busyloop;
1224 }
1225 }
1226 }
1227
d5970055
MT
1228 hdev->features = features;
1229
04097f7c 1230 hdev->memory_listener = (MemoryListener) {
50c1e149
AK
1231 .begin = vhost_begin,
1232 .commit = vhost_commit,
04097f7c
AK
1233 .region_add = vhost_region_add,
1234 .region_del = vhost_region_del,
50c1e149 1235 .region_nop = vhost_region_nop,
04097f7c
AK
1236 .log_start = vhost_log_start,
1237 .log_stop = vhost_log_stop,
1238 .log_sync = vhost_log_sync,
1239 .log_global_start = vhost_log_global_start,
1240 .log_global_stop = vhost_log_global_stop,
80a1ea37
AK
1241 .eventfd_add = vhost_eventfd_add,
1242 .eventfd_del = vhost_eventfd_del,
72e22d2f 1243 .priority = 10
04097f7c 1244 };
d2fc4402 1245
c471ad0e
JW
1246 hdev->n.notify = vhost_iommu_unmap_notify;
1247 hdev->n.notifier_flags = IOMMU_NOTIFIER_UNMAP;
1248
d2fc4402
MAL
1249 if (hdev->migration_blocker == NULL) {
1250 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1251 error_setg(&hdev->migration_blocker,
1252 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
0d34fbab 1253 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
31190ed7
MAL
1254 error_setg(&hdev->migration_blocker,
1255 "Migration disabled: failed to allocate shared memory");
d2fc4402
MAL
1256 }
1257 }
1258
1259 if (hdev->migration_blocker != NULL) {
fe44dc91
AA
1260 r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1261 if (local_err) {
1262 error_report_err(local_err);
1263 error_free(hdev->migration_blocker);
1264 goto fail_busyloop;
1265 }
7145872e 1266 }
d2fc4402 1267
7267c094 1268 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
2817b260
AK
1269 hdev->n_mem_sections = 0;
1270 hdev->mem_sections = NULL;
d5970055
MT
1271 hdev->log = NULL;
1272 hdev->log_size = 0;
1273 hdev->log_enabled = false;
1274 hdev->started = false;
af603142 1275 hdev->memory_changed = false;
f6790af6 1276 memory_listener_register(&hdev->memory_listener, &address_space_memory);
5be5f9be 1277 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
d5970055 1278 return 0;
a06db3ec 1279
69e87b32
JW
1280fail_busyloop:
1281 while (--i >= 0) {
1282 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1283 }
d5970055 1284fail:
a06db3ec
MAL
1285 hdev->nvqs = n_initialized_vqs;
1286 vhost_dev_cleanup(hdev);
d5970055
MT
1287 return r;
1288}
1289
1290void vhost_dev_cleanup(struct vhost_dev *hdev)
1291{
f56a1247 1292 int i;
e0547b59 1293
f56a1247
MT
1294 for (i = 0; i < hdev->nvqs; ++i) {
1295 vhost_virtqueue_cleanup(hdev->vqs + i);
1296 }
5be5f9be
MAL
1297 if (hdev->mem) {
1298 /* those are only safe after successful init */
1299 memory_listener_unregister(&hdev->memory_listener);
1300 QLIST_REMOVE(hdev, entry);
1301 }
7145872e
MT
1302 if (hdev->migration_blocker) {
1303 migrate_del_blocker(hdev->migration_blocker);
1304 error_free(hdev->migration_blocker);
1305 }
7267c094 1306 g_free(hdev->mem);
2817b260 1307 g_free(hdev->mem_sections);
e0547b59
MAL
1308 if (hdev->vhost_ops) {
1309 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1310 }
7b527247 1311 assert(!hdev->log);
e0547b59
MAL
1312
1313 memset(hdev, 0, sizeof(struct vhost_dev));
d5970055
MT
1314}
1315
b0b3db79
MT
1316/* Stop processing guest IO notifications in qemu.
1317 * Start processing them in vhost in kernel.
1318 */
1319int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1320{
1c819449 1321 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
16617e36 1322 int i, r, e;
4afba631 1323
310837de
PB
1324 /* We will pass the notifiers to the kernel, make sure that QEMU
1325 * doesn't interfere.
1326 */
1327 r = virtio_device_grab_ioeventfd(vdev);
1328 if (r < 0) {
4afba631 1329 error_report("binding does not support host notifiers");
b0b3db79
MT
1330 goto fail;
1331 }
1332
1333 for (i = 0; i < hdev->nvqs; ++i) {
b1f0a33d
CH
1334 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1335 true);
b0b3db79 1336 if (r < 0) {
4afba631 1337 error_report("vhost VQ %d notifier binding failed: %d", i, -r);
b0b3db79
MT
1338 goto fail_vq;
1339 }
1340 }
1341
1342 return 0;
1343fail_vq:
1344 while (--i >= 0) {
b1f0a33d
CH
1345 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1346 false);
16617e36 1347 if (e < 0) {
4afba631 1348 error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
b0b3db79 1349 }
16617e36 1350 assert (e >= 0);
b0b3db79 1351 }
310837de 1352 virtio_device_release_ioeventfd(vdev);
b0b3db79
MT
1353fail:
1354 return r;
1355}
1356
1357/* Stop processing guest IO notifications in vhost.
1358 * Start processing them in qemu.
1359 * This might actually run the qemu handlers right away,
1360 * so virtio in qemu must be completely setup when this is called.
1361 */
1362void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1363{
1c819449 1364 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
b0b3db79
MT
1365 int i, r;
1366
1367 for (i = 0; i < hdev->nvqs; ++i) {
b1f0a33d
CH
1368 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1369 false);
b0b3db79 1370 if (r < 0) {
4afba631 1371 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
b0b3db79
MT
1372 }
1373 assert (r >= 0);
1374 }
310837de 1375 virtio_device_release_ioeventfd(vdev);
b0b3db79
MT
1376}
1377
f56a1247
MT
1378/* Test and clear event pending status.
1379 * Should be called after unmask to avoid losing events.
1380 */
1381bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1382{
a9f98bb5 1383 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
a9f98bb5 1384 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
f56a1247
MT
1385 return event_notifier_test_and_clear(&vq->masked_notifier);
1386}
1387
1388/* Mask/unmask events from this vq. */
1389void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1390 bool mask)
1391{
1392 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
a9f98bb5 1393 int r, index = n - hdev->vq_index;
fc57fd99 1394 struct vhost_vring_file file;
f56a1247 1395
8695de0f
MAL
1396 /* should only be called after backend is connected */
1397 assert(hdev->vhost_ops);
1398
f56a1247 1399 if (mask) {
5669655a 1400 assert(vdev->use_guest_notifier_mask);
a9f98bb5 1401 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
f56a1247
MT
1402 } else {
1403 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1404 }
fc57fd99 1405
21e70425
MAL
1406 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1407 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
162bba7f
MAL
1408 if (r < 0) {
1409 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1410 }
f56a1247
MT
1411}
1412
9a2ba823
CH
1413uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1414 uint64_t features)
2e6d46d7
NN
1415{
1416 const int *bit = feature_bits;
1417 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1418 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1419 if (!(hdev->features & bit_mask)) {
1420 features &= ~bit_mask;
1421 }
1422 bit++;
1423 }
1424 return features;
1425}
1426
1427void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
9a2ba823 1428 uint64_t features)
2e6d46d7
NN
1429{
1430 const int *bit = feature_bits;
1431 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1432 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1433 if (features & bit_mask) {
1434 hdev->acked_features |= bit_mask;
1435 }
1436 bit++;
1437 }
1438}
1439
b0b3db79 1440/* Host notifiers must be enabled at this point. */
d5970055
MT
1441int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1442{
1443 int i, r;
24f4fe34 1444
8695de0f
MAL
1445 /* should only be called after backend is connected */
1446 assert(hdev->vhost_ops);
1447
24f4fe34 1448 hdev->started = true;
c471ad0e 1449 hdev->vdev = vdev;
24f4fe34 1450
d5970055
MT
1451 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1452 if (r < 0) {
54dd9321 1453 goto fail_features;
d5970055 1454 }
c471ad0e
JW
1455
1456 if (vhost_dev_has_iommu(hdev)) {
1457 memory_region_register_iommu_notifier(vdev->dma_as->root,
1458 &hdev->n);
1459 }
1460
21e70425 1461 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
d5970055 1462 if (r < 0) {
c6409692 1463 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
d5970055 1464 r = -errno;
54dd9321 1465 goto fail_mem;
d5970055 1466 }
d154e0ba 1467 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1468 r = vhost_virtqueue_start(hdev,
a9f98bb5
JW
1469 vdev,
1470 hdev->vqs + i,
1471 hdev->vq_index + i);
d154e0ba
MT
1472 if (r < 0) {
1473 goto fail_vq;
1474 }
1475 }
1476
d5970055 1477 if (hdev->log_enabled) {
e05ca820
MT
1478 uint64_t log_base;
1479
d5970055 1480 hdev->log_size = vhost_get_log_size(hdev);
15324404
MAL
1481 hdev->log = vhost_log_get(hdev->log_size,
1482 vhost_dev_log_is_shared(hdev));
309750fa 1483 log_base = (uintptr_t)hdev->log->log;
c2bea314 1484 r = hdev->vhost_ops->vhost_set_log_base(hdev,
9a78a5dd
MAL
1485 hdev->log_size ? log_base : 0,
1486 hdev->log);
d5970055 1487 if (r < 0) {
c6409692 1488 VHOST_OPS_DEBUG("vhost_set_log_base failed");
d5970055 1489 r = -errno;
54dd9321 1490 goto fail_log;
d5970055
MT
1491 }
1492 }
d154e0ba 1493
c471ad0e
JW
1494 if (vhost_dev_has_iommu(hdev)) {
1495 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1496
1497 /* Update used ring information for IOTLB to work correctly,
1498 * vhost-kernel code requires for this.*/
1499 for (i = 0; i < hdev->nvqs; ++i) {
1500 struct vhost_virtqueue *vq = hdev->vqs + i;
1501 vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1502 }
1503 }
d5970055 1504 return 0;
54dd9321 1505fail_log:
24bfa207 1506 vhost_log_put(hdev, false);
d5970055
MT
1507fail_vq:
1508 while (--i >= 0) {
f56a1247 1509 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1510 vdev,
1511 hdev->vqs + i,
1512 hdev->vq_index + i);
d5970055 1513 }
a9f98bb5 1514 i = hdev->nvqs;
c471ad0e 1515
54dd9321
MT
1516fail_mem:
1517fail_features:
24f4fe34
MT
1518
1519 hdev->started = false;
d5970055
MT
1520 return r;
1521}
1522
b0b3db79 1523/* Host notifiers must be enabled at this point. */
d5970055
MT
1524void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1525{
a9f98bb5 1526 int i;
54dd9321 1527
8695de0f
MAL
1528 /* should only be called after backend is connected */
1529 assert(hdev->vhost_ops);
1530
d5970055 1531 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1532 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1533 vdev,
1534 hdev->vqs + i,
1535 hdev->vq_index + i);
d5970055 1536 }
54dd9321 1537
c471ad0e
JW
1538 if (vhost_dev_has_iommu(hdev)) {
1539 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1540 memory_region_unregister_iommu_notifier(vdev->dma_as->root,
1541 &hdev->n);
1542 }
309750fa 1543 vhost_log_put(hdev, true);
d5970055 1544 hdev->started = false;
c471ad0e 1545 hdev->vdev = NULL;
d5970055 1546}
950d94ba
MAL
1547
1548int vhost_net_set_backend(struct vhost_dev *hdev,
1549 struct vhost_vring_file *file)
1550{
1551 if (hdev->vhost_ops->vhost_net_set_backend) {
1552 return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1553 }
1554
1555 return -1;
1556}