]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/vhost.c
Revert "vhost: send SET_VRING_ENABLE at start/stop"
[mirror_qemu.git] / hw / virtio / vhost.c
CommitLineData
d5970055
MT
1/*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
6b620ca3
PB
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
d5970055
MT
14 */
15
0d09e41a 16#include "hw/virtio/vhost.h"
d5970055 17#include "hw/hw.h"
5444e768 18#include "qemu/atomic.h"
1de7afc9 19#include "qemu/range.h"
04b7a152 20#include "qemu/error-report.h"
15324404 21#include "qemu/memfd.h"
11078ae3 22#include <linux/vhost.h>
022c62cb 23#include "exec/address-spaces.h"
1c819449 24#include "hw/virtio/virtio-bus.h"
04b7a152 25#include "hw/virtio/virtio-access.h"
7145872e 26#include "migration/migration.h"
d5970055 27
309750fa 28static struct vhost_log *vhost_log;
15324404 29static struct vhost_log *vhost_log_shm;
309750fa 30
2ce68e4c
IM
31static unsigned int used_memslots;
32static QLIST_HEAD(, vhost_dev) vhost_devices =
33 QLIST_HEAD_INITIALIZER(vhost_devices);
34
35bool vhost_has_free_slot(void)
36{
37 unsigned int slots_limit = ~0U;
38 struct vhost_dev *hdev;
39
40 QLIST_FOREACH(hdev, &vhost_devices, entry) {
41 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
42 slots_limit = MIN(slots_limit, r);
43 }
44 return slots_limit > used_memslots;
45}
46
d5970055 47static void vhost_dev_sync_region(struct vhost_dev *dev,
2817b260 48 MemoryRegionSection *section,
d5970055
MT
49 uint64_t mfirst, uint64_t mlast,
50 uint64_t rfirst, uint64_t rlast)
51{
309750fa
JW
52 vhost_log_chunk_t *log = dev->log->log;
53
d5970055
MT
54 uint64_t start = MAX(mfirst, rfirst);
55 uint64_t end = MIN(mlast, rlast);
309750fa
JW
56 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
57 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
d5970055
MT
58 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
59
d5970055
MT
60 if (end < start) {
61 return;
62 }
e314672a 63 assert(end / VHOST_LOG_CHUNK < dev->log_size);
fbbaf9ae 64 assert(start / VHOST_LOG_CHUNK < dev->log_size);
e314672a 65
d5970055
MT
66 for (;from < to; ++from) {
67 vhost_log_chunk_t log;
d5970055
MT
68 /* We first check with non-atomic: much cheaper,
69 * and we expect non-dirty to be the common case. */
70 if (!*from) {
0c600ce2 71 addr += VHOST_LOG_CHUNK;
d5970055
MT
72 continue;
73 }
5444e768
PB
74 /* Data must be read atomically. We don't really need barrier semantics
75 * but it's easier to use atomic_* than roll our own. */
76 log = atomic_xchg(from, 0);
747eb78b
NC
77 while (log) {
78 int bit = ctzl(log);
6b37a23d
MT
79 hwaddr page_addr;
80 hwaddr section_offset;
81 hwaddr mr_offset;
6b37a23d
MT
82 page_addr = addr + bit * VHOST_LOG_PAGE;
83 section_offset = page_addr - section->offset_within_address_space;
84 mr_offset = section_offset + section->offset_within_region;
85 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
d5970055
MT
86 log &= ~(0x1ull << bit);
87 }
88 addr += VHOST_LOG_CHUNK;
89 }
90}
91
04097f7c 92static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
2817b260 93 MemoryRegionSection *section,
6b37a23d
MT
94 hwaddr first,
95 hwaddr last)
d5970055 96{
d5970055 97 int i;
6b37a23d
MT
98 hwaddr start_addr;
99 hwaddr end_addr;
04097f7c 100
d5970055
MT
101 if (!dev->log_enabled || !dev->started) {
102 return 0;
103 }
6b37a23d 104 start_addr = section->offset_within_address_space;
052e87b0 105 end_addr = range_get_last(start_addr, int128_get64(section->size));
6b37a23d
MT
106 start_addr = MAX(first, start_addr);
107 end_addr = MIN(last, end_addr);
108
d5970055
MT
109 for (i = 0; i < dev->mem->nregions; ++i) {
110 struct vhost_memory_region *reg = dev->mem->regions + i;
2817b260 111 vhost_dev_sync_region(dev, section, start_addr, end_addr,
d5970055
MT
112 reg->guest_phys_addr,
113 range_get_last(reg->guest_phys_addr,
114 reg->memory_size));
115 }
116 for (i = 0; i < dev->nvqs; ++i) {
117 struct vhost_virtqueue *vq = dev->vqs + i;
2817b260 118 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
d5970055
MT
119 range_get_last(vq->used_phys, vq->used_size));
120 }
121 return 0;
122}
123
04097f7c
AK
124static void vhost_log_sync(MemoryListener *listener,
125 MemoryRegionSection *section)
126{
127 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
128 memory_listener);
6b37a23d
MT
129 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
130}
04097f7c 131
6b37a23d
MT
132static void vhost_log_sync_range(struct vhost_dev *dev,
133 hwaddr first, hwaddr last)
134{
135 int i;
136 /* FIXME: this is N^2 in number of sections */
137 for (i = 0; i < dev->n_mem_sections; ++i) {
138 MemoryRegionSection *section = &dev->mem_sections[i];
139 vhost_sync_dirty_bitmap(dev, section, first, last);
140 }
04097f7c
AK
141}
142
d5970055
MT
143/* Assign/unassign. Keep an unsorted array of non-overlapping
144 * memory regions in dev->mem. */
145static void vhost_dev_unassign_memory(struct vhost_dev *dev,
146 uint64_t start_addr,
147 uint64_t size)
148{
149 int from, to, n = dev->mem->nregions;
150 /* Track overlapping/split regions for sanity checking. */
151 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
152
153 for (from = 0, to = 0; from < n; ++from, ++to) {
154 struct vhost_memory_region *reg = dev->mem->regions + to;
155 uint64_t reglast;
156 uint64_t memlast;
157 uint64_t change;
158
159 /* clone old region */
160 if (to != from) {
161 memcpy(reg, dev->mem->regions + from, sizeof *reg);
162 }
163
164 /* No overlap is simple */
165 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
166 start_addr, size)) {
167 continue;
168 }
169
170 /* Split only happens if supplied region
171 * is in the middle of an existing one. Thus it can not
172 * overlap with any other existing region. */
173 assert(!split);
174
175 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
176 memlast = range_get_last(start_addr, size);
177
178 /* Remove whole region */
179 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
180 --dev->mem->nregions;
181 --to;
d5970055
MT
182 ++overlap_middle;
183 continue;
184 }
185
186 /* Shrink region */
187 if (memlast >= reglast) {
188 reg->memory_size = start_addr - reg->guest_phys_addr;
189 assert(reg->memory_size);
190 assert(!overlap_end);
191 ++overlap_end;
192 continue;
193 }
194
195 /* Shift region */
196 if (start_addr <= reg->guest_phys_addr) {
197 change = memlast + 1 - reg->guest_phys_addr;
198 reg->memory_size -= change;
199 reg->guest_phys_addr += change;
200 reg->userspace_addr += change;
201 assert(reg->memory_size);
202 assert(!overlap_start);
203 ++overlap_start;
204 continue;
205 }
206
207 /* This only happens if supplied region
208 * is in the middle of an existing one. Thus it can not
209 * overlap with any other existing region. */
210 assert(!overlap_start);
211 assert(!overlap_end);
212 assert(!overlap_middle);
213 /* Split region: shrink first part, shift second part. */
214 memcpy(dev->mem->regions + n, reg, sizeof *reg);
215 reg->memory_size = start_addr - reg->guest_phys_addr;
216 assert(reg->memory_size);
217 change = memlast + 1 - reg->guest_phys_addr;
218 reg = dev->mem->regions + n;
219 reg->memory_size -= change;
220 assert(reg->memory_size);
221 reg->guest_phys_addr += change;
222 reg->userspace_addr += change;
223 /* Never add more than 1 region */
224 assert(dev->mem->nregions == n);
225 ++dev->mem->nregions;
226 ++split;
227 }
228}
229
230/* Called after unassign, so no regions overlap the given range. */
231static void vhost_dev_assign_memory(struct vhost_dev *dev,
232 uint64_t start_addr,
233 uint64_t size,
234 uint64_t uaddr)
235{
236 int from, to;
237 struct vhost_memory_region *merged = NULL;
238 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
239 struct vhost_memory_region *reg = dev->mem->regions + to;
240 uint64_t prlast, urlast;
241 uint64_t pmlast, umlast;
242 uint64_t s, e, u;
243
244 /* clone old region */
245 if (to != from) {
246 memcpy(reg, dev->mem->regions + from, sizeof *reg);
247 }
248 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
249 pmlast = range_get_last(start_addr, size);
250 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
251 umlast = range_get_last(uaddr, size);
252
253 /* check for overlapping regions: should never happen. */
254 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
255 /* Not an adjacent or overlapping region - do not merge. */
256 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
257 (pmlast + 1 != reg->guest_phys_addr ||
258 umlast + 1 != reg->userspace_addr)) {
259 continue;
260 }
261
262 if (merged) {
263 --to;
264 assert(to >= 0);
265 } else {
266 merged = reg;
267 }
268 u = MIN(uaddr, reg->userspace_addr);
269 s = MIN(start_addr, reg->guest_phys_addr);
270 e = MAX(pmlast, prlast);
271 uaddr = merged->userspace_addr = u;
272 start_addr = merged->guest_phys_addr = s;
273 size = merged->memory_size = e - s + 1;
274 assert(merged->memory_size);
275 }
276
277 if (!merged) {
278 struct vhost_memory_region *reg = dev->mem->regions + to;
279 memset(reg, 0, sizeof *reg);
280 reg->memory_size = size;
281 assert(reg->memory_size);
282 reg->guest_phys_addr = start_addr;
283 reg->userspace_addr = uaddr;
284 ++to;
285 }
286 assert(to <= dev->mem->nregions + 1);
287 dev->mem->nregions = to;
288}
289
290static uint64_t vhost_get_log_size(struct vhost_dev *dev)
291{
292 uint64_t log_size = 0;
293 int i;
294 for (i = 0; i < dev->mem->nregions; ++i) {
295 struct vhost_memory_region *reg = dev->mem->regions + i;
296 uint64_t last = range_get_last(reg->guest_phys_addr,
297 reg->memory_size);
298 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
299 }
300 for (i = 0; i < dev->nvqs; ++i) {
301 struct vhost_virtqueue *vq = dev->vqs + i;
302 uint64_t last = vq->used_phys + vq->used_size - 1;
303 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
304 }
305 return log_size;
306}
15324404
MAL
307
308static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
309750fa 309{
15324404
MAL
310 struct vhost_log *log;
311 uint64_t logsize = size * sizeof(*(log->log));
312 int fd = -1;
313
314 log = g_new0(struct vhost_log, 1);
315 if (share) {
316 log->log = qemu_memfd_alloc("vhost-log", logsize,
317 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
318 &fd);
319 memset(log->log, 0, logsize);
320 } else {
321 log->log = g_malloc0(logsize);
322 }
309750fa
JW
323
324 log->size = size;
325 log->refcnt = 1;
15324404 326 log->fd = fd;
309750fa
JW
327
328 return log;
329}
330
15324404 331static struct vhost_log *vhost_log_get(uint64_t size, bool share)
309750fa 332{
15324404
MAL
333 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
334
335 if (!log || log->size != size) {
336 log = vhost_log_alloc(size, share);
337 if (share) {
338 vhost_log_shm = log;
339 } else {
340 vhost_log = log;
341 }
309750fa 342 } else {
15324404 343 ++log->refcnt;
309750fa
JW
344 }
345
15324404 346 return log;
309750fa
JW
347}
348
349static void vhost_log_put(struct vhost_dev *dev, bool sync)
350{
351 struct vhost_log *log = dev->log;
352
353 if (!log) {
354 return;
355 }
356
357 --log->refcnt;
358 if (log->refcnt == 0) {
359 /* Sync only the range covered by the old log */
360 if (dev->log_size && sync) {
361 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
362 }
15324404 363
309750fa 364 if (vhost_log == log) {
15324404 365 g_free(log->log);
309750fa 366 vhost_log = NULL;
15324404
MAL
367 } else if (vhost_log_shm == log) {
368 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
369 log->fd);
370 vhost_log_shm = NULL;
309750fa 371 }
15324404 372
309750fa
JW
373 g_free(log);
374 }
375}
d5970055 376
15324404
MAL
377static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
378{
379 return dev->vhost_ops->vhost_requires_shm_log &&
380 dev->vhost_ops->vhost_requires_shm_log(dev);
381}
382
383static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
d5970055 384{
15324404 385 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
309750fa 386 uint64_t log_base = (uintptr_t)log->log;
6b37a23d 387 int r;
6528499f 388
636f4ddd
MAL
389 /* inform backend of log switching, this must be done before
390 releasing the current log, to ensure no logging is lost */
9a78a5dd 391 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
d5970055 392 assert(r >= 0);
309750fa 393 vhost_log_put(dev, true);
d5970055
MT
394 dev->log = log;
395 dev->log_size = size;
396}
397
398static int vhost_verify_ring_mappings(struct vhost_dev *dev,
399 uint64_t start_addr,
400 uint64_t size)
401{
402 int i;
8617343f
MT
403 int r = 0;
404
405 for (i = 0; !r && i < dev->nvqs; ++i) {
d5970055 406 struct vhost_virtqueue *vq = dev->vqs + i;
a8170e5e 407 hwaddr l;
d5970055
MT
408 void *p;
409
410 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
411 continue;
412 }
413 l = vq->ring_size;
414 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
415 if (!p || l != vq->ring_size) {
416 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
8617343f 417 r = -ENOMEM;
d5970055
MT
418 }
419 if (p != vq->ring) {
420 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
8617343f 421 r = -EBUSY;
d5970055
MT
422 }
423 cpu_physical_memory_unmap(p, l, 0, 0);
424 }
8617343f 425 return r;
d5970055
MT
426}
427
4e789564
MT
428static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
429 uint64_t start_addr,
430 uint64_t size)
431{
432 int i, n = dev->mem->nregions;
433 for (i = 0; i < n; ++i) {
434 struct vhost_memory_region *reg = dev->mem->regions + i;
435 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
436 start_addr, size)) {
437 return reg;
438 }
439 }
440 return NULL;
441}
442
443static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
444 uint64_t start_addr,
445 uint64_t size,
446 uint64_t uaddr)
447{
448 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
449 uint64_t reglast;
450 uint64_t memlast;
451
452 if (!reg) {
453 return true;
454 }
455
456 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
457 memlast = range_get_last(start_addr, size);
458
459 /* Need to extend region? */
460 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
461 return true;
462 }
463 /* userspace_addr changed? */
464 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
465}
466
04097f7c
AK
467static void vhost_set_memory(MemoryListener *listener,
468 MemoryRegionSection *section,
469 bool add)
d5970055 470{
04097f7c
AK
471 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
472 memory_listener);
a8170e5e 473 hwaddr start_addr = section->offset_within_address_space;
052e87b0 474 ram_addr_t size = int128_get64(section->size);
2d1a35be
PB
475 bool log_dirty =
476 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
d5970055
MT
477 int s = offsetof(struct vhost_memory, regions) +
478 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
04097f7c
AK
479 void *ram;
480
7267c094 481 dev->mem = g_realloc(dev->mem, s);
d5970055 482
f5a4e64f 483 if (log_dirty) {
04097f7c 484 add = false;
f5a4e64f
MT
485 }
486
d5970055
MT
487 assert(size);
488
4e789564 489 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
d743c382 490 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
04097f7c
AK
491 if (add) {
492 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
4e789564
MT
493 /* Region exists with same address. Nothing to do. */
494 return;
495 }
496 } else {
497 if (!vhost_dev_find_reg(dev, start_addr, size)) {
498 /* Removing region that we don't access. Nothing to do. */
499 return;
500 }
501 }
502
d5970055 503 vhost_dev_unassign_memory(dev, start_addr, size);
04097f7c 504 if (add) {
d5970055 505 /* Add given mapping, merging adjacent regions if any */
04097f7c 506 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
d5970055
MT
507 } else {
508 /* Remove old mapping for this memory, if any. */
509 vhost_dev_unassign_memory(dev, start_addr, size);
510 }
af603142
NB
511 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
512 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
513 dev->memory_changed = true;
2ce68e4c 514 used_memslots = dev->mem->nregions;
af603142
NB
515}
516
517static bool vhost_section(MemoryRegionSection *section)
518{
519 return memory_region_is_ram(section->mr);
520}
521
522static void vhost_begin(MemoryListener *listener)
523{
524 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
525 memory_listener);
526 dev->mem_changed_end_addr = 0;
527 dev->mem_changed_start_addr = -1;
528}
d5970055 529
af603142
NB
530static void vhost_commit(MemoryListener *listener)
531{
532 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
533 memory_listener);
534 hwaddr start_addr = 0;
535 ram_addr_t size = 0;
536 uint64_t log_size;
537 int r;
538
539 if (!dev->memory_changed) {
540 return;
541 }
d5970055
MT
542 if (!dev->started) {
543 return;
544 }
af603142
NB
545 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
546 return;
547 }
d5970055
MT
548
549 if (dev->started) {
af603142
NB
550 start_addr = dev->mem_changed_start_addr;
551 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
552
d5970055
MT
553 r = vhost_verify_ring_mappings(dev, start_addr, size);
554 assert(r >= 0);
555 }
556
557 if (!dev->log_enabled) {
21e70425 558 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
d5970055 559 assert(r >= 0);
af603142 560 dev->memory_changed = false;
d5970055
MT
561 return;
562 }
563 log_size = vhost_get_log_size(dev);
564 /* We allocate an extra 4K bytes to log,
565 * to reduce the * number of reallocations. */
566#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
567 /* To log more, must increase log size before table update. */
568 if (dev->log_size < log_size) {
569 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
570 }
21e70425 571 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
d5970055
MT
572 assert(r >= 0);
573 /* To log less, can only decrease log size after table update. */
574 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
575 vhost_dev_log_resize(dev, log_size);
576 }
af603142 577 dev->memory_changed = false;
50c1e149
AK
578}
579
04097f7c
AK
580static void vhost_region_add(MemoryListener *listener,
581 MemoryRegionSection *section)
582{
2817b260
AK
583 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
584 memory_listener);
585
c49450b9
AK
586 if (!vhost_section(section)) {
587 return;
588 }
589
2817b260
AK
590 ++dev->n_mem_sections;
591 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
592 dev->n_mem_sections);
593 dev->mem_sections[dev->n_mem_sections - 1] = *section;
dfde4e6e 594 memory_region_ref(section->mr);
04097f7c
AK
595 vhost_set_memory(listener, section, true);
596}
597
598static void vhost_region_del(MemoryListener *listener,
599 MemoryRegionSection *section)
600{
2817b260
AK
601 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
602 memory_listener);
603 int i;
604
c49450b9
AK
605 if (!vhost_section(section)) {
606 return;
607 }
608
04097f7c 609 vhost_set_memory(listener, section, false);
dfde4e6e 610 memory_region_unref(section->mr);
2817b260
AK
611 for (i = 0; i < dev->n_mem_sections; ++i) {
612 if (dev->mem_sections[i].offset_within_address_space
613 == section->offset_within_address_space) {
614 --dev->n_mem_sections;
615 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
637f7a6a 616 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
2817b260
AK
617 break;
618 }
619 }
04097f7c
AK
620}
621
50c1e149
AK
622static void vhost_region_nop(MemoryListener *listener,
623 MemoryRegionSection *section)
624{
625}
626
d5970055
MT
627static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
628 struct vhost_virtqueue *vq,
629 unsigned idx, bool enable_log)
630{
631 struct vhost_vring_addr addr = {
632 .index = idx,
2b3af999
SW
633 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
634 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
635 .used_user_addr = (uint64_t)(unsigned long)vq->used,
d5970055
MT
636 .log_guest_addr = vq->used_phys,
637 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
638 };
21e70425 639 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
d5970055
MT
640 if (r < 0) {
641 return -errno;
642 }
643 return 0;
644}
645
646static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
647{
648 uint64_t features = dev->acked_features;
649 int r;
650 if (enable_log) {
9a2ba823 651 features |= 0x1ULL << VHOST_F_LOG_ALL;
d5970055 652 }
21e70425 653 r = dev->vhost_ops->vhost_set_features(dev, features);
d5970055
MT
654 return r < 0 ? -errno : 0;
655}
656
657static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
658{
25a2a920 659 int r, t, i, idx;
d5970055
MT
660 r = vhost_dev_set_features(dev, enable_log);
661 if (r < 0) {
662 goto err_features;
663 }
664 for (i = 0; i < dev->nvqs; ++i) {
25a2a920
TC
665 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
666 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
d5970055
MT
667 enable_log);
668 if (r < 0) {
669 goto err_vq;
670 }
671 }
672 return 0;
673err_vq:
674 for (; i >= 0; --i) {
25a2a920
TC
675 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
676 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
d5970055
MT
677 dev->log_enabled);
678 assert(t >= 0);
679 }
680 t = vhost_dev_set_features(dev, dev->log_enabled);
681 assert(t >= 0);
682err_features:
683 return r;
684}
685
04097f7c 686static int vhost_migration_log(MemoryListener *listener, int enable)
d5970055 687{
04097f7c
AK
688 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
689 memory_listener);
d5970055
MT
690 int r;
691 if (!!enable == dev->log_enabled) {
692 return 0;
693 }
694 if (!dev->started) {
695 dev->log_enabled = enable;
696 return 0;
697 }
698 if (!enable) {
699 r = vhost_dev_set_log(dev, false);
700 if (r < 0) {
701 return r;
702 }
309750fa 703 vhost_log_put(dev, false);
d5970055
MT
704 dev->log = NULL;
705 dev->log_size = 0;
706 } else {
707 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
708 r = vhost_dev_set_log(dev, true);
709 if (r < 0) {
710 return r;
711 }
712 }
713 dev->log_enabled = enable;
714 return 0;
715}
716
04097f7c
AK
717static void vhost_log_global_start(MemoryListener *listener)
718{
719 int r;
720
721 r = vhost_migration_log(listener, true);
722 if (r < 0) {
723 abort();
724 }
725}
726
727static void vhost_log_global_stop(MemoryListener *listener)
728{
729 int r;
730
731 r = vhost_migration_log(listener, false);
732 if (r < 0) {
733 abort();
734 }
735}
736
737static void vhost_log_start(MemoryListener *listener,
b2dfd71c
PB
738 MemoryRegionSection *section,
739 int old, int new)
04097f7c
AK
740{
741 /* FIXME: implement */
742}
743
744static void vhost_log_stop(MemoryListener *listener,
b2dfd71c
PB
745 MemoryRegionSection *section,
746 int old, int new)
04097f7c
AK
747{
748 /* FIXME: implement */
749}
750
04b7a152
GK
751static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
752 bool is_big_endian,
753 int vhost_vq_index)
754{
755 struct vhost_vring_state s = {
756 .index = vhost_vq_index,
757 .num = is_big_endian
758 };
759
21e70425 760 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
04b7a152
GK
761 return 0;
762 }
763
764 if (errno == ENOTTY) {
765 error_report("vhost does not support cross-endian");
766 return -ENOSYS;
767 }
768
769 return -errno;
770}
771
f56a1247 772static int vhost_virtqueue_start(struct vhost_dev *dev,
d5970055
MT
773 struct VirtIODevice *vdev,
774 struct vhost_virtqueue *vq,
775 unsigned idx)
776{
a8170e5e 777 hwaddr s, l, a;
d5970055 778 int r;
21e70425 779 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 780 struct vhost_vring_file file = {
a9f98bb5 781 .index = vhost_vq_index
d5970055
MT
782 };
783 struct vhost_vring_state state = {
a9f98bb5 784 .index = vhost_vq_index
d5970055
MT
785 };
786 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
787
a9f98bb5 788
d5970055 789 vq->num = state.num = virtio_queue_get_num(vdev, idx);
21e70425 790 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
d5970055
MT
791 if (r) {
792 return -errno;
793 }
794
795 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
21e70425 796 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
d5970055
MT
797 if (r) {
798 return -errno;
799 }
800
95129d6f 801 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
04b7a152
GK
802 virtio_legacy_is_cross_endian(vdev)) {
803 r = vhost_virtqueue_set_vring_endian_legacy(dev,
804 virtio_is_big_endian(vdev),
805 vhost_vq_index);
806 if (r) {
807 return -errno;
808 }
809 }
810
d5970055
MT
811 s = l = virtio_queue_get_desc_size(vdev, idx);
812 a = virtio_queue_get_desc_addr(vdev, idx);
813 vq->desc = cpu_physical_memory_map(a, &l, 0);
814 if (!vq->desc || l != s) {
815 r = -ENOMEM;
816 goto fail_alloc_desc;
817 }
818 s = l = virtio_queue_get_avail_size(vdev, idx);
819 a = virtio_queue_get_avail_addr(vdev, idx);
820 vq->avail = cpu_physical_memory_map(a, &l, 0);
821 if (!vq->avail || l != s) {
822 r = -ENOMEM;
823 goto fail_alloc_avail;
824 }
825 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
826 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
827 vq->used = cpu_physical_memory_map(a, &l, 1);
828 if (!vq->used || l != s) {
829 r = -ENOMEM;
830 goto fail_alloc_used;
831 }
832
833 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
834 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
835 vq->ring = cpu_physical_memory_map(a, &l, 1);
836 if (!vq->ring || l != s) {
837 r = -ENOMEM;
838 goto fail_alloc_ring;
839 }
840
a9f98bb5 841 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
d5970055
MT
842 if (r < 0) {
843 r = -errno;
844 goto fail_alloc;
845 }
a9f98bb5 846
d5970055 847 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
21e70425 848 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
d5970055 849 if (r) {
c8852121 850 r = -errno;
d5970055
MT
851 goto fail_kick;
852 }
853
f56a1247
MT
854 /* Clear and discard previous events if any. */
855 event_notifier_test_and_clear(&vq->masked_notifier);
d5970055
MT
856
857 return 0;
858
d5970055 859fail_kick:
d5970055
MT
860fail_alloc:
861 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
862 0, 0);
863fail_alloc_ring:
864 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
865 0, 0);
866fail_alloc_used:
867 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
868 0, 0);
869fail_alloc_avail:
870 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
871 0, 0);
872fail_alloc_desc:
873 return r;
874}
875
f56a1247 876static void vhost_virtqueue_stop(struct vhost_dev *dev,
d5970055
MT
877 struct VirtIODevice *vdev,
878 struct vhost_virtqueue *vq,
879 unsigned idx)
880{
21e70425 881 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 882 struct vhost_vring_state state = {
04b7a152 883 .index = vhost_vq_index,
d5970055
MT
884 };
885 int r;
fc57fd99 886
21e70425 887 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
d5970055
MT
888 if (r < 0) {
889 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
890 fflush(stderr);
891 }
892 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
3561ba14 893 virtio_queue_invalidate_signalled_used(vdev, idx);
04b7a152
GK
894
895 /* In the cross-endian case, we need to reset the vring endianness to
896 * native as legacy devices expect so by default.
897 */
95129d6f 898 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
04b7a152
GK
899 virtio_legacy_is_cross_endian(vdev)) {
900 r = vhost_virtqueue_set_vring_endian_legacy(dev,
901 !virtio_is_big_endian(vdev),
902 vhost_vq_index);
903 if (r < 0) {
904 error_report("failed to reset vring endianness");
905 }
906 }
907
d5970055
MT
908 assert (r >= 0);
909 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
910 0, virtio_queue_get_ring_size(vdev, idx));
911 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
912 1, virtio_queue_get_used_size(vdev, idx));
913 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
914 0, virtio_queue_get_avail_size(vdev, idx));
915 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
916 0, virtio_queue_get_desc_size(vdev, idx));
917}
918
80a1ea37
AK
919static void vhost_eventfd_add(MemoryListener *listener,
920 MemoryRegionSection *section,
753d5e14 921 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
922{
923}
924
925static void vhost_eventfd_del(MemoryListener *listener,
926 MemoryRegionSection *section,
753d5e14 927 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
928{
929}
930
f56a1247
MT
931static int vhost_virtqueue_init(struct vhost_dev *dev,
932 struct vhost_virtqueue *vq, int n)
933{
21e70425 934 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
f56a1247 935 struct vhost_vring_file file = {
b931bfbf 936 .index = vhost_vq_index,
f56a1247
MT
937 };
938 int r = event_notifier_init(&vq->masked_notifier, 0);
939 if (r < 0) {
940 return r;
941 }
942
943 file.fd = event_notifier_get_fd(&vq->masked_notifier);
21e70425 944 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
f56a1247
MT
945 if (r) {
946 r = -errno;
947 goto fail_call;
948 }
949 return 0;
950fail_call:
951 event_notifier_cleanup(&vq->masked_notifier);
952 return r;
953}
954
955static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
956{
957 event_notifier_cleanup(&vq->masked_notifier);
958}
959
81647a65 960int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1e7398a1 961 VhostBackendType backend_type)
d5970055
MT
962{
963 uint64_t features;
f56a1247 964 int i, r;
81647a65 965
d2fc4402
MAL
966 hdev->migration_blocker = NULL;
967
1a1bfac9 968 if (vhost_set_backend_type(hdev, backend_type) < 0) {
b19ca188 969 close((uintptr_t)opaque);
1a1bfac9
NN
970 return -1;
971 }
972
24d1eb33 973 if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
b19ca188 974 close((uintptr_t)opaque);
24d1eb33
NN
975 return -errno;
976 }
977
aebf8168
IM
978 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
979 fprintf(stderr, "vhost backend memory slots limit is less"
980 " than current number of present memory slots\n");
981 close((uintptr_t)opaque);
982 return -1;
983 }
2ce68e4c
IM
984 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
985
21e70425 986 r = hdev->vhost_ops->vhost_set_owner(hdev);
d5970055
MT
987 if (r < 0) {
988 goto fail;
989 }
990
21e70425 991 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
d5970055
MT
992 if (r < 0) {
993 goto fail;
994 }
f56a1247
MT
995
996 for (i = 0; i < hdev->nvqs; ++i) {
b931bfbf 997 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
f56a1247
MT
998 if (r < 0) {
999 goto fail_vq;
1000 }
1001 }
d5970055
MT
1002 hdev->features = features;
1003
04097f7c 1004 hdev->memory_listener = (MemoryListener) {
50c1e149
AK
1005 .begin = vhost_begin,
1006 .commit = vhost_commit,
04097f7c
AK
1007 .region_add = vhost_region_add,
1008 .region_del = vhost_region_del,
50c1e149 1009 .region_nop = vhost_region_nop,
04097f7c
AK
1010 .log_start = vhost_log_start,
1011 .log_stop = vhost_log_stop,
1012 .log_sync = vhost_log_sync,
1013 .log_global_start = vhost_log_global_start,
1014 .log_global_stop = vhost_log_global_stop,
80a1ea37
AK
1015 .eventfd_add = vhost_eventfd_add,
1016 .eventfd_del = vhost_eventfd_del,
72e22d2f 1017 .priority = 10
04097f7c 1018 };
d2fc4402
MAL
1019
1020 if (hdev->migration_blocker == NULL) {
1021 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1022 error_setg(&hdev->migration_blocker,
1023 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
31190ed7
MAL
1024 } else if (!qemu_memfd_check()) {
1025 error_setg(&hdev->migration_blocker,
1026 "Migration disabled: failed to allocate shared memory");
d2fc4402
MAL
1027 }
1028 }
1029
1030 if (hdev->migration_blocker != NULL) {
7145872e
MT
1031 migrate_add_blocker(hdev->migration_blocker);
1032 }
d2fc4402 1033
7267c094 1034 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
2817b260
AK
1035 hdev->n_mem_sections = 0;
1036 hdev->mem_sections = NULL;
d5970055
MT
1037 hdev->log = NULL;
1038 hdev->log_size = 0;
1039 hdev->log_enabled = false;
1040 hdev->started = false;
af603142 1041 hdev->memory_changed = false;
f6790af6 1042 memory_listener_register(&hdev->memory_listener, &address_space_memory);
d5970055 1043 return 0;
f56a1247
MT
1044fail_vq:
1045 while (--i >= 0) {
1046 vhost_virtqueue_cleanup(hdev->vqs + i);
1047 }
d5970055
MT
1048fail:
1049 r = -errno;
24d1eb33 1050 hdev->vhost_ops->vhost_backend_cleanup(hdev);
2ce68e4c 1051 QLIST_REMOVE(hdev, entry);
d5970055
MT
1052 return r;
1053}
1054
1055void vhost_dev_cleanup(struct vhost_dev *hdev)
1056{
f56a1247
MT
1057 int i;
1058 for (i = 0; i < hdev->nvqs; ++i) {
1059 vhost_virtqueue_cleanup(hdev->vqs + i);
1060 }
04097f7c 1061 memory_listener_unregister(&hdev->memory_listener);
7145872e
MT
1062 if (hdev->migration_blocker) {
1063 migrate_del_blocker(hdev->migration_blocker);
1064 error_free(hdev->migration_blocker);
1065 }
7267c094 1066 g_free(hdev->mem);
2817b260 1067 g_free(hdev->mem_sections);
24d1eb33 1068 hdev->vhost_ops->vhost_backend_cleanup(hdev);
2ce68e4c 1069 QLIST_REMOVE(hdev, entry);
d5970055
MT
1070}
1071
b0b3db79
MT
1072/* Stop processing guest IO notifications in qemu.
1073 * Start processing them in vhost in kernel.
1074 */
1075int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1076{
1c819449
FK
1077 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1078 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1079 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
16617e36 1080 int i, r, e;
1c819449 1081 if (!k->set_host_notifier) {
b0b3db79
MT
1082 fprintf(stderr, "binding does not support host notifiers\n");
1083 r = -ENOSYS;
1084 goto fail;
1085 }
1086
1087 for (i = 0; i < hdev->nvqs; ++i) {
1c819449 1088 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
b0b3db79
MT
1089 if (r < 0) {
1090 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
1091 goto fail_vq;
1092 }
1093 }
1094
1095 return 0;
1096fail_vq:
1097 while (--i >= 0) {
16617e36
JW
1098 e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
1099 if (e < 0) {
b0b3db79
MT
1100 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
1101 fflush(stderr);
1102 }
16617e36 1103 assert (e >= 0);
b0b3db79
MT
1104 }
1105fail:
1106 return r;
1107}
1108
1109/* Stop processing guest IO notifications in vhost.
1110 * Start processing them in qemu.
1111 * This might actually run the qemu handlers right away,
1112 * so virtio in qemu must be completely setup when this is called.
1113 */
1114void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1115{
1c819449
FK
1116 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1117 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1118 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
b0b3db79
MT
1119 int i, r;
1120
1121 for (i = 0; i < hdev->nvqs; ++i) {
1c819449 1122 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
b0b3db79
MT
1123 if (r < 0) {
1124 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
1125 fflush(stderr);
1126 }
1127 assert (r >= 0);
1128 }
1129}
1130
f56a1247
MT
1131/* Test and clear event pending status.
1132 * Should be called after unmask to avoid losing events.
1133 */
1134bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1135{
a9f98bb5 1136 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
a9f98bb5 1137 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
f56a1247
MT
1138 return event_notifier_test_and_clear(&vq->masked_notifier);
1139}
1140
1141/* Mask/unmask events from this vq. */
1142void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1143 bool mask)
1144{
1145 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
a9f98bb5 1146 int r, index = n - hdev->vq_index;
fc57fd99 1147 struct vhost_vring_file file;
f56a1247 1148
f56a1247 1149 if (mask) {
a9f98bb5 1150 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
f56a1247
MT
1151 } else {
1152 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1153 }
fc57fd99 1154
21e70425
MAL
1155 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1156 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
f56a1247
MT
1157 assert(r >= 0);
1158}
1159
9a2ba823
CH
1160uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1161 uint64_t features)
2e6d46d7
NN
1162{
1163 const int *bit = feature_bits;
1164 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1165 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1166 if (!(hdev->features & bit_mask)) {
1167 features &= ~bit_mask;
1168 }
1169 bit++;
1170 }
1171 return features;
1172}
1173
1174void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
9a2ba823 1175 uint64_t features)
2e6d46d7
NN
1176{
1177 const int *bit = feature_bits;
1178 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1179 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1180 if (features & bit_mask) {
1181 hdev->acked_features |= bit_mask;
1182 }
1183 bit++;
1184 }
1185}
1186
b0b3db79 1187/* Host notifiers must be enabled at this point. */
d5970055
MT
1188int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1189{
1190 int i, r;
24f4fe34
MT
1191
1192 hdev->started = true;
1193
d5970055
MT
1194 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1195 if (r < 0) {
54dd9321 1196 goto fail_features;
d5970055 1197 }
21e70425 1198 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
d5970055
MT
1199 if (r < 0) {
1200 r = -errno;
54dd9321 1201 goto fail_mem;
d5970055 1202 }
d154e0ba 1203 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1204 r = vhost_virtqueue_start(hdev,
a9f98bb5
JW
1205 vdev,
1206 hdev->vqs + i,
1207 hdev->vq_index + i);
d154e0ba
MT
1208 if (r < 0) {
1209 goto fail_vq;
1210 }
1211 }
1212
d5970055 1213 if (hdev->log_enabled) {
e05ca820
MT
1214 uint64_t log_base;
1215
d5970055 1216 hdev->log_size = vhost_get_log_size(hdev);
15324404
MAL
1217 hdev->log = vhost_log_get(hdev->log_size,
1218 vhost_dev_log_is_shared(hdev));
309750fa 1219 log_base = (uintptr_t)hdev->log->log;
c2bea314 1220 r = hdev->vhost_ops->vhost_set_log_base(hdev,
9a78a5dd
MAL
1221 hdev->log_size ? log_base : 0,
1222 hdev->log);
d5970055
MT
1223 if (r < 0) {
1224 r = -errno;
54dd9321 1225 goto fail_log;
d5970055
MT
1226 }
1227 }
d154e0ba 1228
d5970055 1229 return 0;
54dd9321 1230fail_log:
24bfa207 1231 vhost_log_put(hdev, false);
d5970055
MT
1232fail_vq:
1233 while (--i >= 0) {
f56a1247 1234 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1235 vdev,
1236 hdev->vqs + i,
1237 hdev->vq_index + i);
d5970055 1238 }
a9f98bb5 1239 i = hdev->nvqs;
54dd9321
MT
1240fail_mem:
1241fail_features:
24f4fe34
MT
1242
1243 hdev->started = false;
d5970055
MT
1244 return r;
1245}
1246
b0b3db79 1247/* Host notifiers must be enabled at this point. */
d5970055
MT
1248void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1249{
a9f98bb5 1250 int i;
54dd9321 1251
d5970055 1252 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1253 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1254 vdev,
1255 hdev->vqs + i,
1256 hdev->vq_index + i);
d5970055 1257 }
54dd9321 1258
309750fa 1259 vhost_log_put(hdev, true);
d5970055 1260 hdev->started = false;
c1be973a 1261 hdev->log = NULL;
d5970055
MT
1262 hdev->log_size = 0;
1263}
a9f98bb5 1264