]> git.proxmox.com Git - mirror_qemu.git/blame - hw/vhost.c
cpu: add set_memory flag to request dirty logging
[mirror_qemu.git] / hw / vhost.c
CommitLineData
d5970055
MT
1/*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 */
12
d5970055 13#include <sys/ioctl.h>
d5970055
MT
14#include "vhost.h"
15#include "hw/hw.h"
bf1b0071 16#include "range.h"
11078ae3 17#include <linux/vhost.h>
d5970055
MT
18
19static void vhost_dev_sync_region(struct vhost_dev *dev,
20 uint64_t mfirst, uint64_t mlast,
21 uint64_t rfirst, uint64_t rlast)
22{
23 uint64_t start = MAX(mfirst, rfirst);
24 uint64_t end = MIN(mlast, rlast);
25 vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
26 vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
27 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
28
29 assert(end / VHOST_LOG_CHUNK < dev->log_size);
30 assert(start / VHOST_LOG_CHUNK < dev->log_size);
31 if (end < start) {
32 return;
33 }
34 for (;from < to; ++from) {
35 vhost_log_chunk_t log;
36 int bit;
37 /* We first check with non-atomic: much cheaper,
38 * and we expect non-dirty to be the common case. */
39 if (!*from) {
0c600ce2 40 addr += VHOST_LOG_CHUNK;
d5970055
MT
41 continue;
42 }
43 /* Data must be read atomically. We don't really
44 * need the barrier semantics of __sync
45 * builtins, but it's easier to use them than
46 * roll our own. */
47 log = __sync_fetch_and_and(from, 0);
48 while ((bit = sizeof(log) > sizeof(int) ?
49 ffsll(log) : ffs(log))) {
7b67b18a 50 ram_addr_t ram_addr;
d5970055 51 bit -= 1;
7b67b18a
MT
52 ram_addr = cpu_get_physical_page_desc(addr + bit * VHOST_LOG_PAGE);
53 cpu_physical_memory_set_dirty(ram_addr);
d5970055
MT
54 log &= ~(0x1ull << bit);
55 }
56 addr += VHOST_LOG_CHUNK;
57 }
58}
59
60static int vhost_client_sync_dirty_bitmap(CPUPhysMemoryClient *client,
61 target_phys_addr_t start_addr,
62 target_phys_addr_t end_addr)
63{
64 struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
65 int i;
66 if (!dev->log_enabled || !dev->started) {
67 return 0;
68 }
69 for (i = 0; i < dev->mem->nregions; ++i) {
70 struct vhost_memory_region *reg = dev->mem->regions + i;
71 vhost_dev_sync_region(dev, start_addr, end_addr,
72 reg->guest_phys_addr,
73 range_get_last(reg->guest_phys_addr,
74 reg->memory_size));
75 }
76 for (i = 0; i < dev->nvqs; ++i) {
77 struct vhost_virtqueue *vq = dev->vqs + i;
78 vhost_dev_sync_region(dev, start_addr, end_addr, vq->used_phys,
79 range_get_last(vq->used_phys, vq->used_size));
80 }
81 return 0;
82}
83
84/* Assign/unassign. Keep an unsorted array of non-overlapping
85 * memory regions in dev->mem. */
86static void vhost_dev_unassign_memory(struct vhost_dev *dev,
87 uint64_t start_addr,
88 uint64_t size)
89{
90 int from, to, n = dev->mem->nregions;
91 /* Track overlapping/split regions for sanity checking. */
92 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
93
94 for (from = 0, to = 0; from < n; ++from, ++to) {
95 struct vhost_memory_region *reg = dev->mem->regions + to;
96 uint64_t reglast;
97 uint64_t memlast;
98 uint64_t change;
99
100 /* clone old region */
101 if (to != from) {
102 memcpy(reg, dev->mem->regions + from, sizeof *reg);
103 }
104
105 /* No overlap is simple */
106 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
107 start_addr, size)) {
108 continue;
109 }
110
111 /* Split only happens if supplied region
112 * is in the middle of an existing one. Thus it can not
113 * overlap with any other existing region. */
114 assert(!split);
115
116 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
117 memlast = range_get_last(start_addr, size);
118
119 /* Remove whole region */
120 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
121 --dev->mem->nregions;
122 --to;
123 assert(to >= 0);
124 ++overlap_middle;
125 continue;
126 }
127
128 /* Shrink region */
129 if (memlast >= reglast) {
130 reg->memory_size = start_addr - reg->guest_phys_addr;
131 assert(reg->memory_size);
132 assert(!overlap_end);
133 ++overlap_end;
134 continue;
135 }
136
137 /* Shift region */
138 if (start_addr <= reg->guest_phys_addr) {
139 change = memlast + 1 - reg->guest_phys_addr;
140 reg->memory_size -= change;
141 reg->guest_phys_addr += change;
142 reg->userspace_addr += change;
143 assert(reg->memory_size);
144 assert(!overlap_start);
145 ++overlap_start;
146 continue;
147 }
148
149 /* This only happens if supplied region
150 * is in the middle of an existing one. Thus it can not
151 * overlap with any other existing region. */
152 assert(!overlap_start);
153 assert(!overlap_end);
154 assert(!overlap_middle);
155 /* Split region: shrink first part, shift second part. */
156 memcpy(dev->mem->regions + n, reg, sizeof *reg);
157 reg->memory_size = start_addr - reg->guest_phys_addr;
158 assert(reg->memory_size);
159 change = memlast + 1 - reg->guest_phys_addr;
160 reg = dev->mem->regions + n;
161 reg->memory_size -= change;
162 assert(reg->memory_size);
163 reg->guest_phys_addr += change;
164 reg->userspace_addr += change;
165 /* Never add more than 1 region */
166 assert(dev->mem->nregions == n);
167 ++dev->mem->nregions;
168 ++split;
169 }
170}
171
172/* Called after unassign, so no regions overlap the given range. */
173static void vhost_dev_assign_memory(struct vhost_dev *dev,
174 uint64_t start_addr,
175 uint64_t size,
176 uint64_t uaddr)
177{
178 int from, to;
179 struct vhost_memory_region *merged = NULL;
180 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
181 struct vhost_memory_region *reg = dev->mem->regions + to;
182 uint64_t prlast, urlast;
183 uint64_t pmlast, umlast;
184 uint64_t s, e, u;
185
186 /* clone old region */
187 if (to != from) {
188 memcpy(reg, dev->mem->regions + from, sizeof *reg);
189 }
190 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
191 pmlast = range_get_last(start_addr, size);
192 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
193 umlast = range_get_last(uaddr, size);
194
195 /* check for overlapping regions: should never happen. */
196 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
197 /* Not an adjacent or overlapping region - do not merge. */
198 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
199 (pmlast + 1 != reg->guest_phys_addr ||
200 umlast + 1 != reg->userspace_addr)) {
201 continue;
202 }
203
204 if (merged) {
205 --to;
206 assert(to >= 0);
207 } else {
208 merged = reg;
209 }
210 u = MIN(uaddr, reg->userspace_addr);
211 s = MIN(start_addr, reg->guest_phys_addr);
212 e = MAX(pmlast, prlast);
213 uaddr = merged->userspace_addr = u;
214 start_addr = merged->guest_phys_addr = s;
215 size = merged->memory_size = e - s + 1;
216 assert(merged->memory_size);
217 }
218
219 if (!merged) {
220 struct vhost_memory_region *reg = dev->mem->regions + to;
221 memset(reg, 0, sizeof *reg);
222 reg->memory_size = size;
223 assert(reg->memory_size);
224 reg->guest_phys_addr = start_addr;
225 reg->userspace_addr = uaddr;
226 ++to;
227 }
228 assert(to <= dev->mem->nregions + 1);
229 dev->mem->nregions = to;
230}
231
232static uint64_t vhost_get_log_size(struct vhost_dev *dev)
233{
234 uint64_t log_size = 0;
235 int i;
236 for (i = 0; i < dev->mem->nregions; ++i) {
237 struct vhost_memory_region *reg = dev->mem->regions + i;
238 uint64_t last = range_get_last(reg->guest_phys_addr,
239 reg->memory_size);
240 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
241 }
242 for (i = 0; i < dev->nvqs; ++i) {
243 struct vhost_virtqueue *vq = dev->vqs + i;
244 uint64_t last = vq->used_phys + vq->used_size - 1;
245 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
246 }
247 return log_size;
248}
249
250static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
251{
252 vhost_log_chunk_t *log;
253 uint64_t log_base;
254 int r;
255 if (size) {
256 log = qemu_mallocz(size * sizeof *log);
257 } else {
258 log = NULL;
259 }
260 log_base = (uint64_t)(unsigned long)log;
261 r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
262 assert(r >= 0);
263 vhost_client_sync_dirty_bitmap(&dev->client, 0,
264 (target_phys_addr_t)~0x0ull);
265 if (dev->log) {
266 qemu_free(dev->log);
267 }
268 dev->log = log;
269 dev->log_size = size;
270}
271
272static int vhost_verify_ring_mappings(struct vhost_dev *dev,
273 uint64_t start_addr,
274 uint64_t size)
275{
276 int i;
277 for (i = 0; i < dev->nvqs; ++i) {
278 struct vhost_virtqueue *vq = dev->vqs + i;
279 target_phys_addr_t l;
280 void *p;
281
282 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
283 continue;
284 }
285 l = vq->ring_size;
286 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
287 if (!p || l != vq->ring_size) {
288 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
289 return -ENOMEM;
290 }
291 if (p != vq->ring) {
292 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
293 return -EBUSY;
294 }
295 cpu_physical_memory_unmap(p, l, 0, 0);
296 }
297 return 0;
298}
299
300static void vhost_client_set_memory(CPUPhysMemoryClient *client,
301 target_phys_addr_t start_addr,
302 ram_addr_t size,
0fd542fb
MT
303 ram_addr_t phys_offset,
304 bool log_dirty)
d5970055
MT
305{
306 struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
307 ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
308 int s = offsetof(struct vhost_memory, regions) +
309 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
310 uint64_t log_size;
311 int r;
312 dev->mem = qemu_realloc(dev->mem, s);
313
314 assert(size);
315
316 vhost_dev_unassign_memory(dev, start_addr, size);
317 if (flags == IO_MEM_RAM) {
318 /* Add given mapping, merging adjacent regions if any */
319 vhost_dev_assign_memory(dev, start_addr, size,
320 (uintptr_t)qemu_get_ram_ptr(phys_offset));
321 } else {
322 /* Remove old mapping for this memory, if any. */
323 vhost_dev_unassign_memory(dev, start_addr, size);
324 }
325
326 if (!dev->started) {
327 return;
328 }
329
330 if (dev->started) {
331 r = vhost_verify_ring_mappings(dev, start_addr, size);
332 assert(r >= 0);
333 }
334
335 if (!dev->log_enabled) {
336 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
337 assert(r >= 0);
338 return;
339 }
340 log_size = vhost_get_log_size(dev);
341 /* We allocate an extra 4K bytes to log,
342 * to reduce the * number of reallocations. */
343#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
344 /* To log more, must increase log size before table update. */
345 if (dev->log_size < log_size) {
346 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
347 }
348 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
349 assert(r >= 0);
350 /* To log less, can only decrease log size after table update. */
351 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
352 vhost_dev_log_resize(dev, log_size);
353 }
354}
355
356static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
357 struct vhost_virtqueue *vq,
358 unsigned idx, bool enable_log)
359{
360 struct vhost_vring_addr addr = {
361 .index = idx,
2b3af999
SW
362 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
363 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
364 .used_user_addr = (uint64_t)(unsigned long)vq->used,
d5970055
MT
365 .log_guest_addr = vq->used_phys,
366 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
367 };
368 int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
369 if (r < 0) {
370 return -errno;
371 }
372 return 0;
373}
374
375static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
376{
377 uint64_t features = dev->acked_features;
378 int r;
379 if (enable_log) {
380 features |= 0x1 << VHOST_F_LOG_ALL;
381 }
382 r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
383 return r < 0 ? -errno : 0;
384}
385
386static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
387{
388 int r, t, i;
389 r = vhost_dev_set_features(dev, enable_log);
390 if (r < 0) {
391 goto err_features;
392 }
393 for (i = 0; i < dev->nvqs; ++i) {
394 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
395 enable_log);
396 if (r < 0) {
397 goto err_vq;
398 }
399 }
400 return 0;
401err_vq:
402 for (; i >= 0; --i) {
403 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
404 dev->log_enabled);
405 assert(t >= 0);
406 }
407 t = vhost_dev_set_features(dev, dev->log_enabled);
408 assert(t >= 0);
409err_features:
410 return r;
411}
412
413static int vhost_client_migration_log(CPUPhysMemoryClient *client,
414 int enable)
415{
416 struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
417 int r;
418 if (!!enable == dev->log_enabled) {
419 return 0;
420 }
421 if (!dev->started) {
422 dev->log_enabled = enable;
423 return 0;
424 }
425 if (!enable) {
426 r = vhost_dev_set_log(dev, false);
427 if (r < 0) {
428 return r;
429 }
430 if (dev->log) {
431 qemu_free(dev->log);
432 }
433 dev->log = NULL;
434 dev->log_size = 0;
435 } else {
436 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
437 r = vhost_dev_set_log(dev, true);
438 if (r < 0) {
439 return r;
440 }
441 }
442 dev->log_enabled = enable;
443 return 0;
444}
445
446static int vhost_virtqueue_init(struct vhost_dev *dev,
447 struct VirtIODevice *vdev,
448 struct vhost_virtqueue *vq,
449 unsigned idx)
450{
451 target_phys_addr_t s, l, a;
452 int r;
453 struct vhost_vring_file file = {
454 .index = idx,
455 };
456 struct vhost_vring_state state = {
457 .index = idx,
458 };
459 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
460
d5970055
MT
461 if (!vdev->binding->set_host_notifier) {
462 fprintf(stderr, "binding does not support host notifiers\n");
463 return -ENOSYS;
464 }
465
466 vq->num = state.num = virtio_queue_get_num(vdev, idx);
467 r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
468 if (r) {
469 return -errno;
470 }
471
472 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
473 r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
474 if (r) {
475 return -errno;
476 }
477
478 s = l = virtio_queue_get_desc_size(vdev, idx);
479 a = virtio_queue_get_desc_addr(vdev, idx);
480 vq->desc = cpu_physical_memory_map(a, &l, 0);
481 if (!vq->desc || l != s) {
482 r = -ENOMEM;
483 goto fail_alloc_desc;
484 }
485 s = l = virtio_queue_get_avail_size(vdev, idx);
486 a = virtio_queue_get_avail_addr(vdev, idx);
487 vq->avail = cpu_physical_memory_map(a, &l, 0);
488 if (!vq->avail || l != s) {
489 r = -ENOMEM;
490 goto fail_alloc_avail;
491 }
492 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
493 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
494 vq->used = cpu_physical_memory_map(a, &l, 1);
495 if (!vq->used || l != s) {
496 r = -ENOMEM;
497 goto fail_alloc_used;
498 }
499
500 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
501 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
502 vq->ring = cpu_physical_memory_map(a, &l, 1);
503 if (!vq->ring || l != s) {
504 r = -ENOMEM;
505 goto fail_alloc_ring;
506 }
507
508 r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
509 if (r < 0) {
510 r = -errno;
511 goto fail_alloc;
512 }
d5970055
MT
513 r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
514 if (r < 0) {
515 fprintf(stderr, "Error binding host notifier: %d\n", -r);
516 goto fail_host_notifier;
517 }
518
519 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
520 r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
521 if (r) {
c8852121 522 r = -errno;
d5970055
MT
523 goto fail_kick;
524 }
525
526 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
527 r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
528 if (r) {
c8852121 529 r = -errno;
d5970055
MT
530 goto fail_call;
531 }
532
533 return 0;
534
535fail_call:
536fail_kick:
537 vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
538fail_host_notifier:
d5970055
MT
539fail_alloc:
540 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
541 0, 0);
542fail_alloc_ring:
543 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
544 0, 0);
545fail_alloc_used:
546 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
547 0, 0);
548fail_alloc_avail:
549 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
550 0, 0);
551fail_alloc_desc:
552 return r;
553}
554
555static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
556 struct VirtIODevice *vdev,
557 struct vhost_virtqueue *vq,
558 unsigned idx)
559{
560 struct vhost_vring_state state = {
561 .index = idx,
562 };
563 int r;
d5970055
MT
564 r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
565 if (r < 0) {
566 fprintf(stderr, "vhost VQ %d host cleanup failed: %d\n", idx, r);
567 fflush(stderr);
568 }
569 assert (r >= 0);
570 r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
571 if (r < 0) {
572 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
573 fflush(stderr);
574 }
575 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
576 assert (r >= 0);
577 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
578 0, virtio_queue_get_ring_size(vdev, idx));
579 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
580 1, virtio_queue_get_used_size(vdev, idx));
581 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
582 0, virtio_queue_get_avail_size(vdev, idx));
583 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
584 0, virtio_queue_get_desc_size(vdev, idx));
585}
586
5430a28f 587int vhost_dev_init(struct vhost_dev *hdev, int devfd, bool force)
d5970055
MT
588{
589 uint64_t features;
590 int r;
591 if (devfd >= 0) {
592 hdev->control = devfd;
593 } else {
594 hdev->control = open("/dev/vhost-net", O_RDWR);
595 if (hdev->control < 0) {
596 return -errno;
597 }
598 }
599 r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
600 if (r < 0) {
601 goto fail;
602 }
603
604 r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
605 if (r < 0) {
606 goto fail;
607 }
608 hdev->features = features;
609
610 hdev->client.set_memory = vhost_client_set_memory;
611 hdev->client.sync_dirty_bitmap = vhost_client_sync_dirty_bitmap;
612 hdev->client.migration_log = vhost_client_migration_log;
e5896b12
AP
613 hdev->client.log_start = NULL;
614 hdev->client.log_stop = NULL;
d5970055
MT
615 hdev->mem = qemu_mallocz(offsetof(struct vhost_memory, regions));
616 hdev->log = NULL;
617 hdev->log_size = 0;
618 hdev->log_enabled = false;
619 hdev->started = false;
620 cpu_register_phys_memory_client(&hdev->client);
5430a28f 621 hdev->force = force;
d5970055
MT
622 return 0;
623fail:
624 r = -errno;
625 close(hdev->control);
626 return r;
627}
628
629void vhost_dev_cleanup(struct vhost_dev *hdev)
630{
631 cpu_unregister_phys_memory_client(&hdev->client);
632 qemu_free(hdev->mem);
633 close(hdev->control);
634}
635
5430a28f
MT
636bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev)
637{
638 return !vdev->binding->query_guest_notifiers ||
639 vdev->binding->query_guest_notifiers(vdev->binding_opaque) ||
640 hdev->force;
641}
642
d5970055
MT
643int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
644{
645 int i, r;
54dd9321
MT
646 if (!vdev->binding->set_guest_notifiers) {
647 fprintf(stderr, "binding does not support guest notifiers\n");
648 r = -ENOSYS;
649 goto fail;
650 }
651
652 r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
653 if (r < 0) {
654 fprintf(stderr, "Error binding guest notifier: %d\n", -r);
655 goto fail_notifiers;
656 }
d5970055
MT
657
658 r = vhost_dev_set_features(hdev, hdev->log_enabled);
659 if (r < 0) {
54dd9321 660 goto fail_features;
d5970055
MT
661 }
662 r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
663 if (r < 0) {
664 r = -errno;
54dd9321 665 goto fail_mem;
d5970055 666 }
d154e0ba
MT
667 for (i = 0; i < hdev->nvqs; ++i) {
668 r = vhost_virtqueue_init(hdev,
669 vdev,
670 hdev->vqs + i,
671 i);
672 if (r < 0) {
673 goto fail_vq;
674 }
675 }
676
d5970055
MT
677 if (hdev->log_enabled) {
678 hdev->log_size = vhost_get_log_size(hdev);
679 hdev->log = hdev->log_size ?
680 qemu_mallocz(hdev->log_size * sizeof *hdev->log) : NULL;
681 r = ioctl(hdev->control, VHOST_SET_LOG_BASE,
682 (uint64_t)(unsigned long)hdev->log);
683 if (r < 0) {
684 r = -errno;
54dd9321 685 goto fail_log;
d5970055
MT
686 }
687 }
d154e0ba 688
d5970055
MT
689 hdev->started = true;
690
691 return 0;
54dd9321 692fail_log:
d5970055
MT
693fail_vq:
694 while (--i >= 0) {
695 vhost_virtqueue_cleanup(hdev,
696 vdev,
697 hdev->vqs + i,
698 i);
699 }
54dd9321
MT
700fail_mem:
701fail_features:
702 vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
703fail_notifiers:
d5970055
MT
704fail:
705 return r;
706}
707
708void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
709{
54dd9321
MT
710 int i, r;
711
d5970055
MT
712 for (i = 0; i < hdev->nvqs; ++i) {
713 vhost_virtqueue_cleanup(hdev,
714 vdev,
715 hdev->vqs + i,
716 i);
717 }
718 vhost_client_sync_dirty_bitmap(&hdev->client, 0,
719 (target_phys_addr_t)~0x0ull);
54dd9321
MT
720 r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
721 if (r < 0) {
722 fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
723 fflush(stderr);
724 }
725 assert (r >= 0);
726
d5970055
MT
727 hdev->started = false;
728 qemu_free(hdev->log);
729 hdev->log_size = 0;
730}