]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/vhost.c
vhost: fix cleanup on not fully initialized device
[mirror_qemu.git] / hw / virtio / vhost.c
CommitLineData
d5970055
MT
1/*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
6b620ca3
PB
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
d5970055
MT
14 */
15
9b8bfe21 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
0d09e41a 18#include "hw/virtio/vhost.h"
d5970055 19#include "hw/hw.h"
5444e768 20#include "qemu/atomic.h"
1de7afc9 21#include "qemu/range.h"
04b7a152 22#include "qemu/error-report.h"
15324404 23#include "qemu/memfd.h"
11078ae3 24#include <linux/vhost.h>
022c62cb 25#include "exec/address-spaces.h"
1c819449 26#include "hw/virtio/virtio-bus.h"
04b7a152 27#include "hw/virtio/virtio-access.h"
7145872e 28#include "migration/migration.h"
d5970055 29
309750fa 30static struct vhost_log *vhost_log;
15324404 31static struct vhost_log *vhost_log_shm;
309750fa 32
2ce68e4c
IM
33static unsigned int used_memslots;
34static QLIST_HEAD(, vhost_dev) vhost_devices =
35 QLIST_HEAD_INITIALIZER(vhost_devices);
36
37bool vhost_has_free_slot(void)
38{
39 unsigned int slots_limit = ~0U;
40 struct vhost_dev *hdev;
41
42 QLIST_FOREACH(hdev, &vhost_devices, entry) {
43 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
44 slots_limit = MIN(slots_limit, r);
45 }
46 return slots_limit > used_memslots;
47}
48
d5970055 49static void vhost_dev_sync_region(struct vhost_dev *dev,
2817b260 50 MemoryRegionSection *section,
d5970055
MT
51 uint64_t mfirst, uint64_t mlast,
52 uint64_t rfirst, uint64_t rlast)
53{
309750fa
JW
54 vhost_log_chunk_t *log = dev->log->log;
55
d5970055
MT
56 uint64_t start = MAX(mfirst, rfirst);
57 uint64_t end = MIN(mlast, rlast);
309750fa
JW
58 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
59 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
d5970055
MT
60 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
61
d5970055
MT
62 if (end < start) {
63 return;
64 }
e314672a 65 assert(end / VHOST_LOG_CHUNK < dev->log_size);
fbbaf9ae 66 assert(start / VHOST_LOG_CHUNK < dev->log_size);
e314672a 67
d5970055
MT
68 for (;from < to; ++from) {
69 vhost_log_chunk_t log;
d5970055
MT
70 /* We first check with non-atomic: much cheaper,
71 * and we expect non-dirty to be the common case. */
72 if (!*from) {
0c600ce2 73 addr += VHOST_LOG_CHUNK;
d5970055
MT
74 continue;
75 }
5444e768
PB
76 /* Data must be read atomically. We don't really need barrier semantics
77 * but it's easier to use atomic_* than roll our own. */
78 log = atomic_xchg(from, 0);
747eb78b
NC
79 while (log) {
80 int bit = ctzl(log);
6b37a23d
MT
81 hwaddr page_addr;
82 hwaddr section_offset;
83 hwaddr mr_offset;
6b37a23d
MT
84 page_addr = addr + bit * VHOST_LOG_PAGE;
85 section_offset = page_addr - section->offset_within_address_space;
86 mr_offset = section_offset + section->offset_within_region;
87 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
d5970055
MT
88 log &= ~(0x1ull << bit);
89 }
90 addr += VHOST_LOG_CHUNK;
91 }
92}
93
04097f7c 94static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
2817b260 95 MemoryRegionSection *section,
6b37a23d
MT
96 hwaddr first,
97 hwaddr last)
d5970055 98{
d5970055 99 int i;
6b37a23d
MT
100 hwaddr start_addr;
101 hwaddr end_addr;
04097f7c 102
d5970055
MT
103 if (!dev->log_enabled || !dev->started) {
104 return 0;
105 }
6b37a23d 106 start_addr = section->offset_within_address_space;
052e87b0 107 end_addr = range_get_last(start_addr, int128_get64(section->size));
6b37a23d
MT
108 start_addr = MAX(first, start_addr);
109 end_addr = MIN(last, end_addr);
110
d5970055
MT
111 for (i = 0; i < dev->mem->nregions; ++i) {
112 struct vhost_memory_region *reg = dev->mem->regions + i;
2817b260 113 vhost_dev_sync_region(dev, section, start_addr, end_addr,
d5970055
MT
114 reg->guest_phys_addr,
115 range_get_last(reg->guest_phys_addr,
116 reg->memory_size));
117 }
118 for (i = 0; i < dev->nvqs; ++i) {
119 struct vhost_virtqueue *vq = dev->vqs + i;
2817b260 120 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
d5970055
MT
121 range_get_last(vq->used_phys, vq->used_size));
122 }
123 return 0;
124}
125
04097f7c
AK
126static void vhost_log_sync(MemoryListener *listener,
127 MemoryRegionSection *section)
128{
129 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
130 memory_listener);
6b37a23d
MT
131 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
132}
04097f7c 133
6b37a23d
MT
134static void vhost_log_sync_range(struct vhost_dev *dev,
135 hwaddr first, hwaddr last)
136{
137 int i;
138 /* FIXME: this is N^2 in number of sections */
139 for (i = 0; i < dev->n_mem_sections; ++i) {
140 MemoryRegionSection *section = &dev->mem_sections[i];
141 vhost_sync_dirty_bitmap(dev, section, first, last);
142 }
04097f7c
AK
143}
144
d5970055
MT
145/* Assign/unassign. Keep an unsorted array of non-overlapping
146 * memory regions in dev->mem. */
147static void vhost_dev_unassign_memory(struct vhost_dev *dev,
148 uint64_t start_addr,
149 uint64_t size)
150{
151 int from, to, n = dev->mem->nregions;
152 /* Track overlapping/split regions for sanity checking. */
153 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
154
155 for (from = 0, to = 0; from < n; ++from, ++to) {
156 struct vhost_memory_region *reg = dev->mem->regions + to;
157 uint64_t reglast;
158 uint64_t memlast;
159 uint64_t change;
160
161 /* clone old region */
162 if (to != from) {
163 memcpy(reg, dev->mem->regions + from, sizeof *reg);
164 }
165
166 /* No overlap is simple */
167 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
168 start_addr, size)) {
169 continue;
170 }
171
172 /* Split only happens if supplied region
173 * is in the middle of an existing one. Thus it can not
174 * overlap with any other existing region. */
175 assert(!split);
176
177 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
178 memlast = range_get_last(start_addr, size);
179
180 /* Remove whole region */
181 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
182 --dev->mem->nregions;
183 --to;
d5970055
MT
184 ++overlap_middle;
185 continue;
186 }
187
188 /* Shrink region */
189 if (memlast >= reglast) {
190 reg->memory_size = start_addr - reg->guest_phys_addr;
191 assert(reg->memory_size);
192 assert(!overlap_end);
193 ++overlap_end;
194 continue;
195 }
196
197 /* Shift region */
198 if (start_addr <= reg->guest_phys_addr) {
199 change = memlast + 1 - reg->guest_phys_addr;
200 reg->memory_size -= change;
201 reg->guest_phys_addr += change;
202 reg->userspace_addr += change;
203 assert(reg->memory_size);
204 assert(!overlap_start);
205 ++overlap_start;
206 continue;
207 }
208
209 /* This only happens if supplied region
210 * is in the middle of an existing one. Thus it can not
211 * overlap with any other existing region. */
212 assert(!overlap_start);
213 assert(!overlap_end);
214 assert(!overlap_middle);
215 /* Split region: shrink first part, shift second part. */
216 memcpy(dev->mem->regions + n, reg, sizeof *reg);
217 reg->memory_size = start_addr - reg->guest_phys_addr;
218 assert(reg->memory_size);
219 change = memlast + 1 - reg->guest_phys_addr;
220 reg = dev->mem->regions + n;
221 reg->memory_size -= change;
222 assert(reg->memory_size);
223 reg->guest_phys_addr += change;
224 reg->userspace_addr += change;
225 /* Never add more than 1 region */
226 assert(dev->mem->nregions == n);
227 ++dev->mem->nregions;
228 ++split;
229 }
230}
231
232/* Called after unassign, so no regions overlap the given range. */
233static void vhost_dev_assign_memory(struct vhost_dev *dev,
234 uint64_t start_addr,
235 uint64_t size,
236 uint64_t uaddr)
237{
238 int from, to;
239 struct vhost_memory_region *merged = NULL;
240 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
241 struct vhost_memory_region *reg = dev->mem->regions + to;
242 uint64_t prlast, urlast;
243 uint64_t pmlast, umlast;
244 uint64_t s, e, u;
245
246 /* clone old region */
247 if (to != from) {
248 memcpy(reg, dev->mem->regions + from, sizeof *reg);
249 }
250 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
251 pmlast = range_get_last(start_addr, size);
252 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
253 umlast = range_get_last(uaddr, size);
254
255 /* check for overlapping regions: should never happen. */
256 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
257 /* Not an adjacent or overlapping region - do not merge. */
258 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
259 (pmlast + 1 != reg->guest_phys_addr ||
260 umlast + 1 != reg->userspace_addr)) {
261 continue;
262 }
263
ffe42cc1
MT
264 if (dev->vhost_ops->vhost_backend_can_merge &&
265 !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
266 reg->userspace_addr,
267 reg->memory_size)) {
268 continue;
269 }
270
d5970055
MT
271 if (merged) {
272 --to;
273 assert(to >= 0);
274 } else {
275 merged = reg;
276 }
277 u = MIN(uaddr, reg->userspace_addr);
278 s = MIN(start_addr, reg->guest_phys_addr);
279 e = MAX(pmlast, prlast);
280 uaddr = merged->userspace_addr = u;
281 start_addr = merged->guest_phys_addr = s;
282 size = merged->memory_size = e - s + 1;
283 assert(merged->memory_size);
284 }
285
286 if (!merged) {
287 struct vhost_memory_region *reg = dev->mem->regions + to;
288 memset(reg, 0, sizeof *reg);
289 reg->memory_size = size;
290 assert(reg->memory_size);
291 reg->guest_phys_addr = start_addr;
292 reg->userspace_addr = uaddr;
293 ++to;
294 }
295 assert(to <= dev->mem->nregions + 1);
296 dev->mem->nregions = to;
297}
298
299static uint64_t vhost_get_log_size(struct vhost_dev *dev)
300{
301 uint64_t log_size = 0;
302 int i;
303 for (i = 0; i < dev->mem->nregions; ++i) {
304 struct vhost_memory_region *reg = dev->mem->regions + i;
305 uint64_t last = range_get_last(reg->guest_phys_addr,
306 reg->memory_size);
307 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
308 }
309 for (i = 0; i < dev->nvqs; ++i) {
310 struct vhost_virtqueue *vq = dev->vqs + i;
311 uint64_t last = vq->used_phys + vq->used_size - 1;
312 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
313 }
314 return log_size;
315}
15324404
MAL
316
317static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
309750fa 318{
15324404
MAL
319 struct vhost_log *log;
320 uint64_t logsize = size * sizeof(*(log->log));
321 int fd = -1;
322
323 log = g_new0(struct vhost_log, 1);
324 if (share) {
325 log->log = qemu_memfd_alloc("vhost-log", logsize,
326 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
327 &fd);
328 memset(log->log, 0, logsize);
329 } else {
330 log->log = g_malloc0(logsize);
331 }
309750fa
JW
332
333 log->size = size;
334 log->refcnt = 1;
15324404 335 log->fd = fd;
309750fa
JW
336
337 return log;
338}
339
15324404 340static struct vhost_log *vhost_log_get(uint64_t size, bool share)
309750fa 341{
15324404
MAL
342 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
343
344 if (!log || log->size != size) {
345 log = vhost_log_alloc(size, share);
346 if (share) {
347 vhost_log_shm = log;
348 } else {
349 vhost_log = log;
350 }
309750fa 351 } else {
15324404 352 ++log->refcnt;
309750fa
JW
353 }
354
15324404 355 return log;
309750fa
JW
356}
357
358static void vhost_log_put(struct vhost_dev *dev, bool sync)
359{
360 struct vhost_log *log = dev->log;
361
362 if (!log) {
363 return;
364 }
9e0bc24f
MAL
365 dev->log = NULL;
366 dev->log_size = 0;
309750fa
JW
367
368 --log->refcnt;
369 if (log->refcnt == 0) {
370 /* Sync only the range covered by the old log */
371 if (dev->log_size && sync) {
372 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
373 }
15324404 374
309750fa 375 if (vhost_log == log) {
15324404 376 g_free(log->log);
309750fa 377 vhost_log = NULL;
15324404
MAL
378 } else if (vhost_log_shm == log) {
379 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
380 log->fd);
381 vhost_log_shm = NULL;
309750fa 382 }
15324404 383
309750fa
JW
384 g_free(log);
385 }
386}
d5970055 387
15324404
MAL
388static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
389{
390 return dev->vhost_ops->vhost_requires_shm_log &&
391 dev->vhost_ops->vhost_requires_shm_log(dev);
392}
393
394static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
d5970055 395{
15324404 396 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
309750fa 397 uint64_t log_base = (uintptr_t)log->log;
6b37a23d 398 int r;
6528499f 399
636f4ddd
MAL
400 /* inform backend of log switching, this must be done before
401 releasing the current log, to ensure no logging is lost */
9a78a5dd 402 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
d5970055 403 assert(r >= 0);
309750fa 404 vhost_log_put(dev, true);
d5970055
MT
405 dev->log = log;
406 dev->log_size = size;
407}
408
409static int vhost_verify_ring_mappings(struct vhost_dev *dev,
410 uint64_t start_addr,
411 uint64_t size)
412{
413 int i;
8617343f
MT
414 int r = 0;
415
416 for (i = 0; !r && i < dev->nvqs; ++i) {
d5970055 417 struct vhost_virtqueue *vq = dev->vqs + i;
a8170e5e 418 hwaddr l;
d5970055
MT
419 void *p;
420
421 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
422 continue;
423 }
424 l = vq->ring_size;
425 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
426 if (!p || l != vq->ring_size) {
427 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
8617343f 428 r = -ENOMEM;
d5970055
MT
429 }
430 if (p != vq->ring) {
431 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
8617343f 432 r = -EBUSY;
d5970055
MT
433 }
434 cpu_physical_memory_unmap(p, l, 0, 0);
435 }
8617343f 436 return r;
d5970055
MT
437}
438
4e789564
MT
439static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
440 uint64_t start_addr,
441 uint64_t size)
442{
443 int i, n = dev->mem->nregions;
444 for (i = 0; i < n; ++i) {
445 struct vhost_memory_region *reg = dev->mem->regions + i;
446 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
447 start_addr, size)) {
448 return reg;
449 }
450 }
451 return NULL;
452}
453
454static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
455 uint64_t start_addr,
456 uint64_t size,
457 uint64_t uaddr)
458{
459 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
460 uint64_t reglast;
461 uint64_t memlast;
462
463 if (!reg) {
464 return true;
465 }
466
467 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
468 memlast = range_get_last(start_addr, size);
469
470 /* Need to extend region? */
471 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
472 return true;
473 }
474 /* userspace_addr changed? */
475 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
476}
477
04097f7c
AK
478static void vhost_set_memory(MemoryListener *listener,
479 MemoryRegionSection *section,
480 bool add)
d5970055 481{
04097f7c
AK
482 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
483 memory_listener);
a8170e5e 484 hwaddr start_addr = section->offset_within_address_space;
052e87b0 485 ram_addr_t size = int128_get64(section->size);
2d1a35be
PB
486 bool log_dirty =
487 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
d5970055
MT
488 int s = offsetof(struct vhost_memory, regions) +
489 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
04097f7c
AK
490 void *ram;
491
7267c094 492 dev->mem = g_realloc(dev->mem, s);
d5970055 493
f5a4e64f 494 if (log_dirty) {
04097f7c 495 add = false;
f5a4e64f
MT
496 }
497
d5970055
MT
498 assert(size);
499
4e789564 500 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
d743c382 501 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
04097f7c
AK
502 if (add) {
503 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
4e789564
MT
504 /* Region exists with same address. Nothing to do. */
505 return;
506 }
507 } else {
508 if (!vhost_dev_find_reg(dev, start_addr, size)) {
509 /* Removing region that we don't access. Nothing to do. */
510 return;
511 }
512 }
513
d5970055 514 vhost_dev_unassign_memory(dev, start_addr, size);
04097f7c 515 if (add) {
d5970055 516 /* Add given mapping, merging adjacent regions if any */
04097f7c 517 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
d5970055
MT
518 } else {
519 /* Remove old mapping for this memory, if any. */
520 vhost_dev_unassign_memory(dev, start_addr, size);
521 }
af603142
NB
522 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
523 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
524 dev->memory_changed = true;
2ce68e4c 525 used_memslots = dev->mem->nregions;
af603142
NB
526}
527
528static bool vhost_section(MemoryRegionSection *section)
529{
530 return memory_region_is_ram(section->mr);
531}
532
533static void vhost_begin(MemoryListener *listener)
534{
535 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
536 memory_listener);
537 dev->mem_changed_end_addr = 0;
538 dev->mem_changed_start_addr = -1;
539}
d5970055 540
af603142
NB
541static void vhost_commit(MemoryListener *listener)
542{
543 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
544 memory_listener);
545 hwaddr start_addr = 0;
546 ram_addr_t size = 0;
547 uint64_t log_size;
548 int r;
549
550 if (!dev->memory_changed) {
551 return;
552 }
d5970055
MT
553 if (!dev->started) {
554 return;
555 }
af603142
NB
556 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
557 return;
558 }
d5970055
MT
559
560 if (dev->started) {
af603142
NB
561 start_addr = dev->mem_changed_start_addr;
562 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
563
d5970055
MT
564 r = vhost_verify_ring_mappings(dev, start_addr, size);
565 assert(r >= 0);
566 }
567
568 if (!dev->log_enabled) {
21e70425 569 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
d5970055 570 assert(r >= 0);
af603142 571 dev->memory_changed = false;
d5970055
MT
572 return;
573 }
574 log_size = vhost_get_log_size(dev);
575 /* We allocate an extra 4K bytes to log,
576 * to reduce the * number of reallocations. */
577#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
578 /* To log more, must increase log size before table update. */
579 if (dev->log_size < log_size) {
580 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
581 }
21e70425 582 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
d5970055
MT
583 assert(r >= 0);
584 /* To log less, can only decrease log size after table update. */
585 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
586 vhost_dev_log_resize(dev, log_size);
587 }
af603142 588 dev->memory_changed = false;
50c1e149
AK
589}
590
04097f7c
AK
591static void vhost_region_add(MemoryListener *listener,
592 MemoryRegionSection *section)
593{
2817b260
AK
594 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
595 memory_listener);
596
c49450b9
AK
597 if (!vhost_section(section)) {
598 return;
599 }
600
2817b260
AK
601 ++dev->n_mem_sections;
602 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
603 dev->n_mem_sections);
604 dev->mem_sections[dev->n_mem_sections - 1] = *section;
dfde4e6e 605 memory_region_ref(section->mr);
04097f7c
AK
606 vhost_set_memory(listener, section, true);
607}
608
609static void vhost_region_del(MemoryListener *listener,
610 MemoryRegionSection *section)
611{
2817b260
AK
612 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
613 memory_listener);
614 int i;
615
c49450b9
AK
616 if (!vhost_section(section)) {
617 return;
618 }
619
04097f7c 620 vhost_set_memory(listener, section, false);
dfde4e6e 621 memory_region_unref(section->mr);
2817b260
AK
622 for (i = 0; i < dev->n_mem_sections; ++i) {
623 if (dev->mem_sections[i].offset_within_address_space
624 == section->offset_within_address_space) {
625 --dev->n_mem_sections;
626 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
637f7a6a 627 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
2817b260
AK
628 break;
629 }
630 }
04097f7c
AK
631}
632
50c1e149
AK
633static void vhost_region_nop(MemoryListener *listener,
634 MemoryRegionSection *section)
635{
636}
637
d5970055
MT
638static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
639 struct vhost_virtqueue *vq,
640 unsigned idx, bool enable_log)
641{
642 struct vhost_vring_addr addr = {
643 .index = idx,
2b3af999
SW
644 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
645 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
646 .used_user_addr = (uint64_t)(unsigned long)vq->used,
d5970055
MT
647 .log_guest_addr = vq->used_phys,
648 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
649 };
21e70425 650 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
d5970055
MT
651 if (r < 0) {
652 return -errno;
653 }
654 return 0;
655}
656
657static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
658{
659 uint64_t features = dev->acked_features;
660 int r;
661 if (enable_log) {
9a2ba823 662 features |= 0x1ULL << VHOST_F_LOG_ALL;
d5970055 663 }
21e70425 664 r = dev->vhost_ops->vhost_set_features(dev, features);
d5970055
MT
665 return r < 0 ? -errno : 0;
666}
667
668static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
669{
25a2a920 670 int r, t, i, idx;
d5970055
MT
671 r = vhost_dev_set_features(dev, enable_log);
672 if (r < 0) {
673 goto err_features;
674 }
675 for (i = 0; i < dev->nvqs; ++i) {
25a2a920
TC
676 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
677 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
d5970055
MT
678 enable_log);
679 if (r < 0) {
680 goto err_vq;
681 }
682 }
683 return 0;
684err_vq:
685 for (; i >= 0; --i) {
25a2a920
TC
686 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
687 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
d5970055
MT
688 dev->log_enabled);
689 assert(t >= 0);
690 }
691 t = vhost_dev_set_features(dev, dev->log_enabled);
692 assert(t >= 0);
693err_features:
694 return r;
695}
696
04097f7c 697static int vhost_migration_log(MemoryListener *listener, int enable)
d5970055 698{
04097f7c
AK
699 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
700 memory_listener);
d5970055
MT
701 int r;
702 if (!!enable == dev->log_enabled) {
703 return 0;
704 }
705 if (!dev->started) {
706 dev->log_enabled = enable;
707 return 0;
708 }
709 if (!enable) {
710 r = vhost_dev_set_log(dev, false);
711 if (r < 0) {
712 return r;
713 }
309750fa 714 vhost_log_put(dev, false);
d5970055
MT
715 } else {
716 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
717 r = vhost_dev_set_log(dev, true);
718 if (r < 0) {
719 return r;
720 }
721 }
722 dev->log_enabled = enable;
723 return 0;
724}
725
04097f7c
AK
726static void vhost_log_global_start(MemoryListener *listener)
727{
728 int r;
729
730 r = vhost_migration_log(listener, true);
731 if (r < 0) {
732 abort();
733 }
734}
735
736static void vhost_log_global_stop(MemoryListener *listener)
737{
738 int r;
739
740 r = vhost_migration_log(listener, false);
741 if (r < 0) {
742 abort();
743 }
744}
745
746static void vhost_log_start(MemoryListener *listener,
b2dfd71c
PB
747 MemoryRegionSection *section,
748 int old, int new)
04097f7c
AK
749{
750 /* FIXME: implement */
751}
752
753static void vhost_log_stop(MemoryListener *listener,
b2dfd71c
PB
754 MemoryRegionSection *section,
755 int old, int new)
04097f7c
AK
756{
757 /* FIXME: implement */
758}
759
46f70ff1
GK
760/* The vhost driver natively knows how to handle the vrings of non
761 * cross-endian legacy devices and modern devices. Only legacy devices
762 * exposed to a bi-endian guest may require the vhost driver to use a
763 * specific endianness.
764 */
a122ab24
GK
765static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
766{
e5848123
GK
767 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
768 return false;
769 }
a122ab24 770#ifdef HOST_WORDS_BIGENDIAN
46f70ff1 771 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
a122ab24 772#else
46f70ff1 773 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
a122ab24 774#endif
a122ab24
GK
775}
776
04b7a152
GK
777static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
778 bool is_big_endian,
779 int vhost_vq_index)
780{
781 struct vhost_vring_state s = {
782 .index = vhost_vq_index,
783 .num = is_big_endian
784 };
785
21e70425 786 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
04b7a152
GK
787 return 0;
788 }
789
790 if (errno == ENOTTY) {
791 error_report("vhost does not support cross-endian");
792 return -ENOSYS;
793 }
794
795 return -errno;
796}
797
f56a1247 798static int vhost_virtqueue_start(struct vhost_dev *dev,
d5970055
MT
799 struct VirtIODevice *vdev,
800 struct vhost_virtqueue *vq,
801 unsigned idx)
802{
a8170e5e 803 hwaddr s, l, a;
d5970055 804 int r;
21e70425 805 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 806 struct vhost_vring_file file = {
a9f98bb5 807 .index = vhost_vq_index
d5970055
MT
808 };
809 struct vhost_vring_state state = {
a9f98bb5 810 .index = vhost_vq_index
d5970055
MT
811 };
812 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
813
a9f98bb5 814
d5970055 815 vq->num = state.num = virtio_queue_get_num(vdev, idx);
21e70425 816 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
d5970055
MT
817 if (r) {
818 return -errno;
819 }
820
821 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
21e70425 822 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
d5970055
MT
823 if (r) {
824 return -errno;
825 }
826
e5848123 827 if (vhost_needs_vring_endian(vdev)) {
04b7a152
GK
828 r = vhost_virtqueue_set_vring_endian_legacy(dev,
829 virtio_is_big_endian(vdev),
830 vhost_vq_index);
831 if (r) {
832 return -errno;
833 }
834 }
835
d5970055
MT
836 s = l = virtio_queue_get_desc_size(vdev, idx);
837 a = virtio_queue_get_desc_addr(vdev, idx);
838 vq->desc = cpu_physical_memory_map(a, &l, 0);
839 if (!vq->desc || l != s) {
840 r = -ENOMEM;
841 goto fail_alloc_desc;
842 }
843 s = l = virtio_queue_get_avail_size(vdev, idx);
844 a = virtio_queue_get_avail_addr(vdev, idx);
845 vq->avail = cpu_physical_memory_map(a, &l, 0);
846 if (!vq->avail || l != s) {
847 r = -ENOMEM;
848 goto fail_alloc_avail;
849 }
850 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
851 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
852 vq->used = cpu_physical_memory_map(a, &l, 1);
853 if (!vq->used || l != s) {
854 r = -ENOMEM;
855 goto fail_alloc_used;
856 }
857
858 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
859 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
860 vq->ring = cpu_physical_memory_map(a, &l, 1);
861 if (!vq->ring || l != s) {
862 r = -ENOMEM;
863 goto fail_alloc_ring;
864 }
865
a9f98bb5 866 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
d5970055
MT
867 if (r < 0) {
868 r = -errno;
869 goto fail_alloc;
870 }
a9f98bb5 871
d5970055 872 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
21e70425 873 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
d5970055 874 if (r) {
c8852121 875 r = -errno;
d5970055
MT
876 goto fail_kick;
877 }
878
f56a1247
MT
879 /* Clear and discard previous events if any. */
880 event_notifier_test_and_clear(&vq->masked_notifier);
d5970055 881
5669655a
VK
882 /* Init vring in unmasked state, unless guest_notifier_mask
883 * will do it later.
884 */
885 if (!vdev->use_guest_notifier_mask) {
886 /* TODO: check and handle errors. */
887 vhost_virtqueue_mask(dev, vdev, idx, false);
888 }
889
d5970055
MT
890 return 0;
891
d5970055 892fail_kick:
d5970055
MT
893fail_alloc:
894 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
895 0, 0);
896fail_alloc_ring:
897 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
898 0, 0);
899fail_alloc_used:
900 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
901 0, 0);
902fail_alloc_avail:
903 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
904 0, 0);
905fail_alloc_desc:
906 return r;
907}
908
f56a1247 909static void vhost_virtqueue_stop(struct vhost_dev *dev,
d5970055
MT
910 struct VirtIODevice *vdev,
911 struct vhost_virtqueue *vq,
912 unsigned idx)
913{
21e70425 914 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 915 struct vhost_vring_state state = {
04b7a152 916 .index = vhost_vq_index,
d5970055
MT
917 };
918 int r;
fc57fd99 919
21e70425 920 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
d5970055
MT
921 if (r < 0) {
922 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
923 fflush(stderr);
924 }
925 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
3561ba14 926 virtio_queue_invalidate_signalled_used(vdev, idx);
04b7a152
GK
927
928 /* In the cross-endian case, we need to reset the vring endianness to
929 * native as legacy devices expect so by default.
930 */
e5848123 931 if (vhost_needs_vring_endian(vdev)) {
04b7a152
GK
932 r = vhost_virtqueue_set_vring_endian_legacy(dev,
933 !virtio_is_big_endian(vdev),
934 vhost_vq_index);
935 if (r < 0) {
936 error_report("failed to reset vring endianness");
937 }
938 }
939
d5970055
MT
940 assert (r >= 0);
941 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
942 0, virtio_queue_get_ring_size(vdev, idx));
943 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
944 1, virtio_queue_get_used_size(vdev, idx));
945 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
946 0, virtio_queue_get_avail_size(vdev, idx));
947 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
948 0, virtio_queue_get_desc_size(vdev, idx));
949}
950
80a1ea37
AK
951static void vhost_eventfd_add(MemoryListener *listener,
952 MemoryRegionSection *section,
753d5e14 953 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
954{
955}
956
957static void vhost_eventfd_del(MemoryListener *listener,
958 MemoryRegionSection *section,
753d5e14 959 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
960{
961}
962
69e87b32
JW
963static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
964 int n, uint32_t timeout)
965{
966 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
967 struct vhost_vring_state state = {
968 .index = vhost_vq_index,
969 .num = timeout,
970 };
971 int r;
972
973 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
974 return -EINVAL;
975 }
976
977 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
978 if (r) {
979 return r;
980 }
981
982 return 0;
983}
984
f56a1247
MT
985static int vhost_virtqueue_init(struct vhost_dev *dev,
986 struct vhost_virtqueue *vq, int n)
987{
21e70425 988 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
f56a1247 989 struct vhost_vring_file file = {
b931bfbf 990 .index = vhost_vq_index,
f56a1247
MT
991 };
992 int r = event_notifier_init(&vq->masked_notifier, 0);
993 if (r < 0) {
994 return r;
995 }
996
997 file.fd = event_notifier_get_fd(&vq->masked_notifier);
21e70425 998 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
f56a1247
MT
999 if (r) {
1000 r = -errno;
1001 goto fail_call;
1002 }
1003 return 0;
1004fail_call:
1005 event_notifier_cleanup(&vq->masked_notifier);
1006 return r;
1007}
1008
1009static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1010{
1011 event_notifier_cleanup(&vq->masked_notifier);
1012}
1013
81647a65 1014int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
69e87b32 1015 VhostBackendType backend_type, uint32_t busyloop_timeout)
d5970055
MT
1016{
1017 uint64_t features;
f56a1247 1018 int i, r;
81647a65 1019
d2fc4402
MAL
1020 hdev->migration_blocker = NULL;
1021
7cb8a9b9
MAL
1022 r = vhost_set_backend_type(hdev, backend_type);
1023 assert(r >= 0);
1a1bfac9 1024
7cb8a9b9
MAL
1025 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1026 if (r < 0) {
1027 goto fail;
24d1eb33
NN
1028 }
1029
aebf8168
IM
1030 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1031 fprintf(stderr, "vhost backend memory slots limit is less"
1032 " than current number of present memory slots\n");
7cb8a9b9
MAL
1033 r = -1;
1034 goto fail;
aebf8168 1035 }
2ce68e4c 1036
21e70425 1037 r = hdev->vhost_ops->vhost_set_owner(hdev);
d5970055
MT
1038 if (r < 0) {
1039 goto fail;
1040 }
1041
21e70425 1042 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
d5970055
MT
1043 if (r < 0) {
1044 goto fail;
1045 }
f56a1247
MT
1046
1047 for (i = 0; i < hdev->nvqs; ++i) {
b931bfbf 1048 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
f56a1247
MT
1049 if (r < 0) {
1050 goto fail_vq;
1051 }
1052 }
69e87b32
JW
1053
1054 if (busyloop_timeout) {
1055 for (i = 0; i < hdev->nvqs; ++i) {
1056 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1057 busyloop_timeout);
1058 if (r < 0) {
1059 goto fail_busyloop;
1060 }
1061 }
1062 }
1063
d5970055
MT
1064 hdev->features = features;
1065
04097f7c 1066 hdev->memory_listener = (MemoryListener) {
50c1e149
AK
1067 .begin = vhost_begin,
1068 .commit = vhost_commit,
04097f7c
AK
1069 .region_add = vhost_region_add,
1070 .region_del = vhost_region_del,
50c1e149 1071 .region_nop = vhost_region_nop,
04097f7c
AK
1072 .log_start = vhost_log_start,
1073 .log_stop = vhost_log_stop,
1074 .log_sync = vhost_log_sync,
1075 .log_global_start = vhost_log_global_start,
1076 .log_global_stop = vhost_log_global_stop,
80a1ea37
AK
1077 .eventfd_add = vhost_eventfd_add,
1078 .eventfd_del = vhost_eventfd_del,
72e22d2f 1079 .priority = 10
04097f7c 1080 };
d2fc4402
MAL
1081
1082 if (hdev->migration_blocker == NULL) {
1083 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1084 error_setg(&hdev->migration_blocker,
1085 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
31190ed7
MAL
1086 } else if (!qemu_memfd_check()) {
1087 error_setg(&hdev->migration_blocker,
1088 "Migration disabled: failed to allocate shared memory");
d2fc4402
MAL
1089 }
1090 }
1091
1092 if (hdev->migration_blocker != NULL) {
7145872e
MT
1093 migrate_add_blocker(hdev->migration_blocker);
1094 }
d2fc4402 1095
7267c094 1096 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
2817b260
AK
1097 hdev->n_mem_sections = 0;
1098 hdev->mem_sections = NULL;
d5970055
MT
1099 hdev->log = NULL;
1100 hdev->log_size = 0;
1101 hdev->log_enabled = false;
1102 hdev->started = false;
af603142 1103 hdev->memory_changed = false;
f6790af6 1104 memory_listener_register(&hdev->memory_listener, &address_space_memory);
5be5f9be 1105 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
d5970055 1106 return 0;
69e87b32
JW
1107fail_busyloop:
1108 while (--i >= 0) {
1109 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1110 }
1111 i = hdev->nvqs;
f56a1247
MT
1112fail_vq:
1113 while (--i >= 0) {
1114 vhost_virtqueue_cleanup(hdev->vqs + i);
1115 }
d5970055
MT
1116fail:
1117 r = -errno;
24d1eb33 1118 hdev->vhost_ops->vhost_backend_cleanup(hdev);
2ce68e4c 1119 QLIST_REMOVE(hdev, entry);
d5970055
MT
1120 return r;
1121}
1122
1123void vhost_dev_cleanup(struct vhost_dev *hdev)
1124{
f56a1247
MT
1125 int i;
1126 for (i = 0; i < hdev->nvqs; ++i) {
1127 vhost_virtqueue_cleanup(hdev->vqs + i);
1128 }
5be5f9be
MAL
1129 if (hdev->mem) {
1130 /* those are only safe after successful init */
1131 memory_listener_unregister(&hdev->memory_listener);
1132 QLIST_REMOVE(hdev, entry);
1133 }
7145872e
MT
1134 if (hdev->migration_blocker) {
1135 migrate_del_blocker(hdev->migration_blocker);
1136 error_free(hdev->migration_blocker);
1137 }
7267c094 1138 g_free(hdev->mem);
2817b260 1139 g_free(hdev->mem_sections);
24d1eb33 1140 hdev->vhost_ops->vhost_backend_cleanup(hdev);
7b527247 1141 assert(!hdev->log);
d5970055
MT
1142}
1143
b0b3db79
MT
1144/* Stop processing guest IO notifications in qemu.
1145 * Start processing them in vhost in kernel.
1146 */
1147int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1148{
1c819449
FK
1149 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1150 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1151 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
16617e36 1152 int i, r, e;
21a4d962 1153 if (!k->ioeventfd_started) {
b0b3db79
MT
1154 fprintf(stderr, "binding does not support host notifiers\n");
1155 r = -ENOSYS;
1156 goto fail;
1157 }
1158
1159 for (i = 0; i < hdev->nvqs; ++i) {
b1f0a33d
CH
1160 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1161 true);
b0b3db79
MT
1162 if (r < 0) {
1163 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
1164 goto fail_vq;
1165 }
1166 }
1167
1168 return 0;
1169fail_vq:
1170 while (--i >= 0) {
b1f0a33d
CH
1171 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1172 false);
16617e36 1173 if (e < 0) {
b0b3db79
MT
1174 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
1175 fflush(stderr);
1176 }
16617e36 1177 assert (e >= 0);
b0b3db79
MT
1178 }
1179fail:
1180 return r;
1181}
1182
1183/* Stop processing guest IO notifications in vhost.
1184 * Start processing them in qemu.
1185 * This might actually run the qemu handlers right away,
1186 * so virtio in qemu must be completely setup when this is called.
1187 */
1188void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1189{
1c819449 1190 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
b0b3db79
MT
1191 int i, r;
1192
1193 for (i = 0; i < hdev->nvqs; ++i) {
b1f0a33d
CH
1194 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1195 false);
b0b3db79
MT
1196 if (r < 0) {
1197 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
1198 fflush(stderr);
1199 }
1200 assert (r >= 0);
1201 }
1202}
1203
f56a1247
MT
1204/* Test and clear event pending status.
1205 * Should be called after unmask to avoid losing events.
1206 */
1207bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1208{
a9f98bb5 1209 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
a9f98bb5 1210 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
f56a1247
MT
1211 return event_notifier_test_and_clear(&vq->masked_notifier);
1212}
1213
1214/* Mask/unmask events from this vq. */
1215void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1216 bool mask)
1217{
1218 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
a9f98bb5 1219 int r, index = n - hdev->vq_index;
fc57fd99 1220 struct vhost_vring_file file;
f56a1247 1221
f56a1247 1222 if (mask) {
5669655a 1223 assert(vdev->use_guest_notifier_mask);
a9f98bb5 1224 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
f56a1247
MT
1225 } else {
1226 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1227 }
fc57fd99 1228
21e70425
MAL
1229 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1230 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
f56a1247
MT
1231 assert(r >= 0);
1232}
1233
9a2ba823
CH
1234uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1235 uint64_t features)
2e6d46d7
NN
1236{
1237 const int *bit = feature_bits;
1238 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1239 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1240 if (!(hdev->features & bit_mask)) {
1241 features &= ~bit_mask;
1242 }
1243 bit++;
1244 }
1245 return features;
1246}
1247
1248void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
9a2ba823 1249 uint64_t features)
2e6d46d7
NN
1250{
1251 const int *bit = feature_bits;
1252 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1253 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1254 if (features & bit_mask) {
1255 hdev->acked_features |= bit_mask;
1256 }
1257 bit++;
1258 }
1259}
1260
b0b3db79 1261/* Host notifiers must be enabled at this point. */
d5970055
MT
1262int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1263{
1264 int i, r;
24f4fe34
MT
1265
1266 hdev->started = true;
1267
d5970055
MT
1268 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1269 if (r < 0) {
54dd9321 1270 goto fail_features;
d5970055 1271 }
21e70425 1272 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
d5970055
MT
1273 if (r < 0) {
1274 r = -errno;
54dd9321 1275 goto fail_mem;
d5970055 1276 }
d154e0ba 1277 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1278 r = vhost_virtqueue_start(hdev,
a9f98bb5
JW
1279 vdev,
1280 hdev->vqs + i,
1281 hdev->vq_index + i);
d154e0ba
MT
1282 if (r < 0) {
1283 goto fail_vq;
1284 }
1285 }
1286
d5970055 1287 if (hdev->log_enabled) {
e05ca820
MT
1288 uint64_t log_base;
1289
d5970055 1290 hdev->log_size = vhost_get_log_size(hdev);
15324404
MAL
1291 hdev->log = vhost_log_get(hdev->log_size,
1292 vhost_dev_log_is_shared(hdev));
309750fa 1293 log_base = (uintptr_t)hdev->log->log;
c2bea314 1294 r = hdev->vhost_ops->vhost_set_log_base(hdev,
9a78a5dd
MAL
1295 hdev->log_size ? log_base : 0,
1296 hdev->log);
d5970055
MT
1297 if (r < 0) {
1298 r = -errno;
54dd9321 1299 goto fail_log;
d5970055
MT
1300 }
1301 }
d154e0ba 1302
d5970055 1303 return 0;
54dd9321 1304fail_log:
24bfa207 1305 vhost_log_put(hdev, false);
d5970055
MT
1306fail_vq:
1307 while (--i >= 0) {
f56a1247 1308 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1309 vdev,
1310 hdev->vqs + i,
1311 hdev->vq_index + i);
d5970055 1312 }
a9f98bb5 1313 i = hdev->nvqs;
54dd9321
MT
1314fail_mem:
1315fail_features:
24f4fe34
MT
1316
1317 hdev->started = false;
d5970055
MT
1318 return r;
1319}
1320
b0b3db79 1321/* Host notifiers must be enabled at this point. */
d5970055
MT
1322void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1323{
a9f98bb5 1324 int i;
54dd9321 1325
d5970055 1326 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1327 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1328 vdev,
1329 hdev->vqs + i,
1330 hdev->vq_index + i);
d5970055 1331 }
54dd9321 1332
309750fa 1333 vhost_log_put(hdev, true);
d5970055 1334 hdev->started = false;
d5970055 1335}