]> git.proxmox.com Git - qemu.git/blob - hw/vhost.c
memory: support stateless memory listeners
[qemu.git] / hw / vhost.c
1 /*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include <sys/ioctl.h>
17 #include "vhost.h"
18 #include "hw/hw.h"
19 #include "range.h"
20 #include <linux/vhost.h>
21 #include "exec-memory.h"
22
23 static void vhost_dev_sync_region(struct vhost_dev *dev,
24 MemoryRegionSection *section,
25 uint64_t mfirst, uint64_t mlast,
26 uint64_t rfirst, uint64_t rlast)
27 {
28 uint64_t start = MAX(mfirst, rfirst);
29 uint64_t end = MIN(mlast, rlast);
30 vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
31 vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
32 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
33
34 assert(end / VHOST_LOG_CHUNK < dev->log_size);
35 assert(start / VHOST_LOG_CHUNK < dev->log_size);
36 if (end < start) {
37 return;
38 }
39 for (;from < to; ++from) {
40 vhost_log_chunk_t log;
41 int bit;
42 /* We first check with non-atomic: much cheaper,
43 * and we expect non-dirty to be the common case. */
44 if (!*from) {
45 addr += VHOST_LOG_CHUNK;
46 continue;
47 }
48 /* Data must be read atomically. We don't really
49 * need the barrier semantics of __sync
50 * builtins, but it's easier to use them than
51 * roll our own. */
52 log = __sync_fetch_and_and(from, 0);
53 while ((bit = sizeof(log) > sizeof(int) ?
54 ffsll(log) : ffs(log))) {
55 ram_addr_t ram_addr;
56 bit -= 1;
57 ram_addr = section->offset_within_region + bit * VHOST_LOG_PAGE;
58 memory_region_set_dirty(section->mr, ram_addr, VHOST_LOG_PAGE);
59 log &= ~(0x1ull << bit);
60 }
61 addr += VHOST_LOG_CHUNK;
62 }
63 }
64
65 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
66 MemoryRegionSection *section,
67 target_phys_addr_t start_addr,
68 target_phys_addr_t end_addr)
69 {
70 int i;
71
72 if (!dev->log_enabled || !dev->started) {
73 return 0;
74 }
75 for (i = 0; i < dev->mem->nregions; ++i) {
76 struct vhost_memory_region *reg = dev->mem->regions + i;
77 vhost_dev_sync_region(dev, section, start_addr, end_addr,
78 reg->guest_phys_addr,
79 range_get_last(reg->guest_phys_addr,
80 reg->memory_size));
81 }
82 for (i = 0; i < dev->nvqs; ++i) {
83 struct vhost_virtqueue *vq = dev->vqs + i;
84 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
85 range_get_last(vq->used_phys, vq->used_size));
86 }
87 return 0;
88 }
89
90 static void vhost_log_sync(MemoryListener *listener,
91 MemoryRegionSection *section)
92 {
93 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
94 memory_listener);
95 target_phys_addr_t start_addr = section->offset_within_address_space;
96 target_phys_addr_t end_addr = start_addr + section->size;
97
98 vhost_sync_dirty_bitmap(dev, section, start_addr, end_addr);
99 }
100
101 /* Assign/unassign. Keep an unsorted array of non-overlapping
102 * memory regions in dev->mem. */
103 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
104 uint64_t start_addr,
105 uint64_t size)
106 {
107 int from, to, n = dev->mem->nregions;
108 /* Track overlapping/split regions for sanity checking. */
109 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
110
111 for (from = 0, to = 0; from < n; ++from, ++to) {
112 struct vhost_memory_region *reg = dev->mem->regions + to;
113 uint64_t reglast;
114 uint64_t memlast;
115 uint64_t change;
116
117 /* clone old region */
118 if (to != from) {
119 memcpy(reg, dev->mem->regions + from, sizeof *reg);
120 }
121
122 /* No overlap is simple */
123 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
124 start_addr, size)) {
125 continue;
126 }
127
128 /* Split only happens if supplied region
129 * is in the middle of an existing one. Thus it can not
130 * overlap with any other existing region. */
131 assert(!split);
132
133 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
134 memlast = range_get_last(start_addr, size);
135
136 /* Remove whole region */
137 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
138 --dev->mem->nregions;
139 --to;
140 ++overlap_middle;
141 continue;
142 }
143
144 /* Shrink region */
145 if (memlast >= reglast) {
146 reg->memory_size = start_addr - reg->guest_phys_addr;
147 assert(reg->memory_size);
148 assert(!overlap_end);
149 ++overlap_end;
150 continue;
151 }
152
153 /* Shift region */
154 if (start_addr <= reg->guest_phys_addr) {
155 change = memlast + 1 - reg->guest_phys_addr;
156 reg->memory_size -= change;
157 reg->guest_phys_addr += change;
158 reg->userspace_addr += change;
159 assert(reg->memory_size);
160 assert(!overlap_start);
161 ++overlap_start;
162 continue;
163 }
164
165 /* This only happens if supplied region
166 * is in the middle of an existing one. Thus it can not
167 * overlap with any other existing region. */
168 assert(!overlap_start);
169 assert(!overlap_end);
170 assert(!overlap_middle);
171 /* Split region: shrink first part, shift second part. */
172 memcpy(dev->mem->regions + n, reg, sizeof *reg);
173 reg->memory_size = start_addr - reg->guest_phys_addr;
174 assert(reg->memory_size);
175 change = memlast + 1 - reg->guest_phys_addr;
176 reg = dev->mem->regions + n;
177 reg->memory_size -= change;
178 assert(reg->memory_size);
179 reg->guest_phys_addr += change;
180 reg->userspace_addr += change;
181 /* Never add more than 1 region */
182 assert(dev->mem->nregions == n);
183 ++dev->mem->nregions;
184 ++split;
185 }
186 }
187
188 /* Called after unassign, so no regions overlap the given range. */
189 static void vhost_dev_assign_memory(struct vhost_dev *dev,
190 uint64_t start_addr,
191 uint64_t size,
192 uint64_t uaddr)
193 {
194 int from, to;
195 struct vhost_memory_region *merged = NULL;
196 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
197 struct vhost_memory_region *reg = dev->mem->regions + to;
198 uint64_t prlast, urlast;
199 uint64_t pmlast, umlast;
200 uint64_t s, e, u;
201
202 /* clone old region */
203 if (to != from) {
204 memcpy(reg, dev->mem->regions + from, sizeof *reg);
205 }
206 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
207 pmlast = range_get_last(start_addr, size);
208 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
209 umlast = range_get_last(uaddr, size);
210
211 /* check for overlapping regions: should never happen. */
212 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
213 /* Not an adjacent or overlapping region - do not merge. */
214 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
215 (pmlast + 1 != reg->guest_phys_addr ||
216 umlast + 1 != reg->userspace_addr)) {
217 continue;
218 }
219
220 if (merged) {
221 --to;
222 assert(to >= 0);
223 } else {
224 merged = reg;
225 }
226 u = MIN(uaddr, reg->userspace_addr);
227 s = MIN(start_addr, reg->guest_phys_addr);
228 e = MAX(pmlast, prlast);
229 uaddr = merged->userspace_addr = u;
230 start_addr = merged->guest_phys_addr = s;
231 size = merged->memory_size = e - s + 1;
232 assert(merged->memory_size);
233 }
234
235 if (!merged) {
236 struct vhost_memory_region *reg = dev->mem->regions + to;
237 memset(reg, 0, sizeof *reg);
238 reg->memory_size = size;
239 assert(reg->memory_size);
240 reg->guest_phys_addr = start_addr;
241 reg->userspace_addr = uaddr;
242 ++to;
243 }
244 assert(to <= dev->mem->nregions + 1);
245 dev->mem->nregions = to;
246 }
247
248 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
249 {
250 uint64_t log_size = 0;
251 int i;
252 for (i = 0; i < dev->mem->nregions; ++i) {
253 struct vhost_memory_region *reg = dev->mem->regions + i;
254 uint64_t last = range_get_last(reg->guest_phys_addr,
255 reg->memory_size);
256 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
257 }
258 for (i = 0; i < dev->nvqs; ++i) {
259 struct vhost_virtqueue *vq = dev->vqs + i;
260 uint64_t last = vq->used_phys + vq->used_size - 1;
261 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
262 }
263 return log_size;
264 }
265
266 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
267 {
268 vhost_log_chunk_t *log;
269 uint64_t log_base;
270 int r, i;
271 if (size) {
272 log = g_malloc0(size * sizeof *log);
273 } else {
274 log = NULL;
275 }
276 log_base = (uint64_t)(unsigned long)log;
277 r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
278 assert(r >= 0);
279 for (i = 0; i < dev->n_mem_sections; ++i) {
280 vhost_sync_dirty_bitmap(dev, &dev->mem_sections[i],
281 0, (target_phys_addr_t)~0x0ull);
282 }
283 if (dev->log) {
284 g_free(dev->log);
285 }
286 dev->log = log;
287 dev->log_size = size;
288 }
289
290 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
291 uint64_t start_addr,
292 uint64_t size)
293 {
294 int i;
295 for (i = 0; i < dev->nvqs; ++i) {
296 struct vhost_virtqueue *vq = dev->vqs + i;
297 target_phys_addr_t l;
298 void *p;
299
300 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
301 continue;
302 }
303 l = vq->ring_size;
304 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
305 if (!p || l != vq->ring_size) {
306 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
307 return -ENOMEM;
308 }
309 if (p != vq->ring) {
310 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
311 return -EBUSY;
312 }
313 cpu_physical_memory_unmap(p, l, 0, 0);
314 }
315 return 0;
316 }
317
318 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
319 uint64_t start_addr,
320 uint64_t size)
321 {
322 int i, n = dev->mem->nregions;
323 for (i = 0; i < n; ++i) {
324 struct vhost_memory_region *reg = dev->mem->regions + i;
325 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
326 start_addr, size)) {
327 return reg;
328 }
329 }
330 return NULL;
331 }
332
333 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
334 uint64_t start_addr,
335 uint64_t size,
336 uint64_t uaddr)
337 {
338 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
339 uint64_t reglast;
340 uint64_t memlast;
341
342 if (!reg) {
343 return true;
344 }
345
346 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
347 memlast = range_get_last(start_addr, size);
348
349 /* Need to extend region? */
350 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
351 return true;
352 }
353 /* userspace_addr changed? */
354 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
355 }
356
357 static void vhost_set_memory(MemoryListener *listener,
358 MemoryRegionSection *section,
359 bool add)
360 {
361 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
362 memory_listener);
363 target_phys_addr_t start_addr = section->offset_within_address_space;
364 ram_addr_t size = section->size;
365 bool log_dirty = memory_region_is_logging(section->mr);
366 int s = offsetof(struct vhost_memory, regions) +
367 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
368 uint64_t log_size;
369 int r;
370 void *ram;
371
372 dev->mem = g_realloc(dev->mem, s);
373
374 if (log_dirty) {
375 add = false;
376 }
377
378 assert(size);
379
380 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
381 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
382 if (add) {
383 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
384 /* Region exists with same address. Nothing to do. */
385 return;
386 }
387 } else {
388 if (!vhost_dev_find_reg(dev, start_addr, size)) {
389 /* Removing region that we don't access. Nothing to do. */
390 return;
391 }
392 }
393
394 vhost_dev_unassign_memory(dev, start_addr, size);
395 if (add) {
396 /* Add given mapping, merging adjacent regions if any */
397 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
398 } else {
399 /* Remove old mapping for this memory, if any. */
400 vhost_dev_unassign_memory(dev, start_addr, size);
401 }
402
403 if (!dev->started) {
404 return;
405 }
406
407 if (dev->started) {
408 r = vhost_verify_ring_mappings(dev, start_addr, size);
409 assert(r >= 0);
410 }
411
412 if (!dev->log_enabled) {
413 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
414 assert(r >= 0);
415 return;
416 }
417 log_size = vhost_get_log_size(dev);
418 /* We allocate an extra 4K bytes to log,
419 * to reduce the * number of reallocations. */
420 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
421 /* To log more, must increase log size before table update. */
422 if (dev->log_size < log_size) {
423 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
424 }
425 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
426 assert(r >= 0);
427 /* To log less, can only decrease log size after table update. */
428 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
429 vhost_dev_log_resize(dev, log_size);
430 }
431 }
432
433 static bool vhost_section(MemoryRegionSection *section)
434 {
435 return section->address_space == get_system_memory()
436 && memory_region_is_ram(section->mr);
437 }
438
439 static void vhost_begin(MemoryListener *listener)
440 {
441 }
442
443 static void vhost_commit(MemoryListener *listener)
444 {
445 }
446
447 static void vhost_region_add(MemoryListener *listener,
448 MemoryRegionSection *section)
449 {
450 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
451 memory_listener);
452
453 if (!vhost_section(section)) {
454 return;
455 }
456
457 ++dev->n_mem_sections;
458 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
459 dev->n_mem_sections);
460 dev->mem_sections[dev->n_mem_sections - 1] = *section;
461 vhost_set_memory(listener, section, true);
462 }
463
464 static void vhost_region_del(MemoryListener *listener,
465 MemoryRegionSection *section)
466 {
467 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
468 memory_listener);
469 int i;
470
471 if (!vhost_section(section)) {
472 return;
473 }
474
475 vhost_set_memory(listener, section, false);
476 for (i = 0; i < dev->n_mem_sections; ++i) {
477 if (dev->mem_sections[i].offset_within_address_space
478 == section->offset_within_address_space) {
479 --dev->n_mem_sections;
480 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
481 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
482 break;
483 }
484 }
485 }
486
487 static void vhost_region_nop(MemoryListener *listener,
488 MemoryRegionSection *section)
489 {
490 }
491
492 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
493 struct vhost_virtqueue *vq,
494 unsigned idx, bool enable_log)
495 {
496 struct vhost_vring_addr addr = {
497 .index = idx,
498 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
499 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
500 .used_user_addr = (uint64_t)(unsigned long)vq->used,
501 .log_guest_addr = vq->used_phys,
502 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
503 };
504 int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
505 if (r < 0) {
506 return -errno;
507 }
508 return 0;
509 }
510
511 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
512 {
513 uint64_t features = dev->acked_features;
514 int r;
515 if (enable_log) {
516 features |= 0x1 << VHOST_F_LOG_ALL;
517 }
518 r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
519 return r < 0 ? -errno : 0;
520 }
521
522 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
523 {
524 int r, t, i;
525 r = vhost_dev_set_features(dev, enable_log);
526 if (r < 0) {
527 goto err_features;
528 }
529 for (i = 0; i < dev->nvqs; ++i) {
530 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
531 enable_log);
532 if (r < 0) {
533 goto err_vq;
534 }
535 }
536 return 0;
537 err_vq:
538 for (; i >= 0; --i) {
539 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
540 dev->log_enabled);
541 assert(t >= 0);
542 }
543 t = vhost_dev_set_features(dev, dev->log_enabled);
544 assert(t >= 0);
545 err_features:
546 return r;
547 }
548
549 static int vhost_migration_log(MemoryListener *listener, int enable)
550 {
551 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
552 memory_listener);
553 int r;
554 if (!!enable == dev->log_enabled) {
555 return 0;
556 }
557 if (!dev->started) {
558 dev->log_enabled = enable;
559 return 0;
560 }
561 if (!enable) {
562 r = vhost_dev_set_log(dev, false);
563 if (r < 0) {
564 return r;
565 }
566 if (dev->log) {
567 g_free(dev->log);
568 }
569 dev->log = NULL;
570 dev->log_size = 0;
571 } else {
572 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
573 r = vhost_dev_set_log(dev, true);
574 if (r < 0) {
575 return r;
576 }
577 }
578 dev->log_enabled = enable;
579 return 0;
580 }
581
582 static void vhost_log_global_start(MemoryListener *listener)
583 {
584 int r;
585
586 r = vhost_migration_log(listener, true);
587 if (r < 0) {
588 abort();
589 }
590 }
591
592 static void vhost_log_global_stop(MemoryListener *listener)
593 {
594 int r;
595
596 r = vhost_migration_log(listener, false);
597 if (r < 0) {
598 abort();
599 }
600 }
601
602 static void vhost_log_start(MemoryListener *listener,
603 MemoryRegionSection *section)
604 {
605 /* FIXME: implement */
606 }
607
608 static void vhost_log_stop(MemoryListener *listener,
609 MemoryRegionSection *section)
610 {
611 /* FIXME: implement */
612 }
613
614 static int vhost_virtqueue_init(struct vhost_dev *dev,
615 struct VirtIODevice *vdev,
616 struct vhost_virtqueue *vq,
617 unsigned idx)
618 {
619 target_phys_addr_t s, l, a;
620 int r;
621 struct vhost_vring_file file = {
622 .index = idx,
623 };
624 struct vhost_vring_state state = {
625 .index = idx,
626 };
627 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
628
629 vq->num = state.num = virtio_queue_get_num(vdev, idx);
630 r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
631 if (r) {
632 return -errno;
633 }
634
635 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
636 r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
637 if (r) {
638 return -errno;
639 }
640
641 s = l = virtio_queue_get_desc_size(vdev, idx);
642 a = virtio_queue_get_desc_addr(vdev, idx);
643 vq->desc = cpu_physical_memory_map(a, &l, 0);
644 if (!vq->desc || l != s) {
645 r = -ENOMEM;
646 goto fail_alloc_desc;
647 }
648 s = l = virtio_queue_get_avail_size(vdev, idx);
649 a = virtio_queue_get_avail_addr(vdev, idx);
650 vq->avail = cpu_physical_memory_map(a, &l, 0);
651 if (!vq->avail || l != s) {
652 r = -ENOMEM;
653 goto fail_alloc_avail;
654 }
655 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
656 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
657 vq->used = cpu_physical_memory_map(a, &l, 1);
658 if (!vq->used || l != s) {
659 r = -ENOMEM;
660 goto fail_alloc_used;
661 }
662
663 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
664 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
665 vq->ring = cpu_physical_memory_map(a, &l, 1);
666 if (!vq->ring || l != s) {
667 r = -ENOMEM;
668 goto fail_alloc_ring;
669 }
670
671 r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
672 if (r < 0) {
673 r = -errno;
674 goto fail_alloc;
675 }
676 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
677 r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
678 if (r) {
679 r = -errno;
680 goto fail_kick;
681 }
682
683 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
684 r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
685 if (r) {
686 r = -errno;
687 goto fail_call;
688 }
689
690 return 0;
691
692 fail_call:
693 fail_kick:
694 fail_alloc:
695 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
696 0, 0);
697 fail_alloc_ring:
698 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
699 0, 0);
700 fail_alloc_used:
701 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
702 0, 0);
703 fail_alloc_avail:
704 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
705 0, 0);
706 fail_alloc_desc:
707 return r;
708 }
709
710 static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
711 struct VirtIODevice *vdev,
712 struct vhost_virtqueue *vq,
713 unsigned idx)
714 {
715 struct vhost_vring_state state = {
716 .index = idx,
717 };
718 int r;
719 r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
720 if (r < 0) {
721 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
722 fflush(stderr);
723 }
724 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
725 assert (r >= 0);
726 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
727 0, virtio_queue_get_ring_size(vdev, idx));
728 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
729 1, virtio_queue_get_used_size(vdev, idx));
730 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
731 0, virtio_queue_get_avail_size(vdev, idx));
732 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
733 0, virtio_queue_get_desc_size(vdev, idx));
734 }
735
736 static void vhost_eventfd_add(MemoryListener *listener,
737 MemoryRegionSection *section,
738 bool match_data, uint64_t data, int fd)
739 {
740 }
741
742 static void vhost_eventfd_del(MemoryListener *listener,
743 MemoryRegionSection *section,
744 bool match_data, uint64_t data, int fd)
745 {
746 }
747
748 int vhost_dev_init(struct vhost_dev *hdev, int devfd, bool force)
749 {
750 uint64_t features;
751 int r;
752 if (devfd >= 0) {
753 hdev->control = devfd;
754 } else {
755 hdev->control = open("/dev/vhost-net", O_RDWR);
756 if (hdev->control < 0) {
757 return -errno;
758 }
759 }
760 r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
761 if (r < 0) {
762 goto fail;
763 }
764
765 r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
766 if (r < 0) {
767 goto fail;
768 }
769 hdev->features = features;
770
771 hdev->memory_listener = (MemoryListener) {
772 .begin = vhost_begin,
773 .commit = vhost_commit,
774 .region_add = vhost_region_add,
775 .region_del = vhost_region_del,
776 .region_nop = vhost_region_nop,
777 .log_start = vhost_log_start,
778 .log_stop = vhost_log_stop,
779 .log_sync = vhost_log_sync,
780 .log_global_start = vhost_log_global_start,
781 .log_global_stop = vhost_log_global_stop,
782 .eventfd_add = vhost_eventfd_add,
783 .eventfd_del = vhost_eventfd_del,
784 .priority = 10
785 };
786 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
787 hdev->n_mem_sections = 0;
788 hdev->mem_sections = NULL;
789 hdev->log = NULL;
790 hdev->log_size = 0;
791 hdev->log_enabled = false;
792 hdev->started = false;
793 memory_listener_register(&hdev->memory_listener, NULL);
794 hdev->force = force;
795 return 0;
796 fail:
797 r = -errno;
798 close(hdev->control);
799 return r;
800 }
801
802 void vhost_dev_cleanup(struct vhost_dev *hdev)
803 {
804 memory_listener_unregister(&hdev->memory_listener);
805 g_free(hdev->mem);
806 g_free(hdev->mem_sections);
807 close(hdev->control);
808 }
809
810 bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev)
811 {
812 return !vdev->binding->query_guest_notifiers ||
813 vdev->binding->query_guest_notifiers(vdev->binding_opaque) ||
814 hdev->force;
815 }
816
817 /* Stop processing guest IO notifications in qemu.
818 * Start processing them in vhost in kernel.
819 */
820 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
821 {
822 int i, r;
823 if (!vdev->binding->set_host_notifier) {
824 fprintf(stderr, "binding does not support host notifiers\n");
825 r = -ENOSYS;
826 goto fail;
827 }
828
829 for (i = 0; i < hdev->nvqs; ++i) {
830 r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, true);
831 if (r < 0) {
832 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
833 goto fail_vq;
834 }
835 }
836
837 return 0;
838 fail_vq:
839 while (--i >= 0) {
840 r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
841 if (r < 0) {
842 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
843 fflush(stderr);
844 }
845 assert (r >= 0);
846 }
847 fail:
848 return r;
849 }
850
851 /* Stop processing guest IO notifications in vhost.
852 * Start processing them in qemu.
853 * This might actually run the qemu handlers right away,
854 * so virtio in qemu must be completely setup when this is called.
855 */
856 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
857 {
858 int i, r;
859
860 for (i = 0; i < hdev->nvqs; ++i) {
861 r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
862 if (r < 0) {
863 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
864 fflush(stderr);
865 }
866 assert (r >= 0);
867 }
868 }
869
870 /* Host notifiers must be enabled at this point. */
871 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
872 {
873 int i, r;
874 if (!vdev->binding->set_guest_notifiers) {
875 fprintf(stderr, "binding does not support guest notifiers\n");
876 r = -ENOSYS;
877 goto fail;
878 }
879
880 r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
881 if (r < 0) {
882 fprintf(stderr, "Error binding guest notifier: %d\n", -r);
883 goto fail_notifiers;
884 }
885
886 r = vhost_dev_set_features(hdev, hdev->log_enabled);
887 if (r < 0) {
888 goto fail_features;
889 }
890 r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
891 if (r < 0) {
892 r = -errno;
893 goto fail_mem;
894 }
895 for (i = 0; i < hdev->nvqs; ++i) {
896 r = vhost_virtqueue_init(hdev,
897 vdev,
898 hdev->vqs + i,
899 i);
900 if (r < 0) {
901 goto fail_vq;
902 }
903 }
904
905 if (hdev->log_enabled) {
906 hdev->log_size = vhost_get_log_size(hdev);
907 hdev->log = hdev->log_size ?
908 g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL;
909 r = ioctl(hdev->control, VHOST_SET_LOG_BASE,
910 (uint64_t)(unsigned long)hdev->log);
911 if (r < 0) {
912 r = -errno;
913 goto fail_log;
914 }
915 }
916
917 hdev->started = true;
918
919 return 0;
920 fail_log:
921 fail_vq:
922 while (--i >= 0) {
923 vhost_virtqueue_cleanup(hdev,
924 vdev,
925 hdev->vqs + i,
926 i);
927 }
928 fail_mem:
929 fail_features:
930 vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
931 fail_notifiers:
932 fail:
933 return r;
934 }
935
936 /* Host notifiers must be enabled at this point. */
937 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
938 {
939 int i, r;
940
941 for (i = 0; i < hdev->nvqs; ++i) {
942 vhost_virtqueue_cleanup(hdev,
943 vdev,
944 hdev->vqs + i,
945 i);
946 }
947 for (i = 0; i < hdev->n_mem_sections; ++i) {
948 vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
949 0, (target_phys_addr_t)~0x0ull);
950 }
951 r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
952 if (r < 0) {
953 fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
954 fflush(stderr);
955 }
956 assert (r >= 0);
957
958 hdev->started = false;
959 g_free(hdev->log);
960 hdev->log = NULL;
961 hdev->log_size = 0;
962 }