]> git.proxmox.com Git - mirror_qemu.git/blob - hw/vhost.c
hw: include hw header files with full paths
[mirror_qemu.git] / hw / vhost.c
1 /*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include <sys/ioctl.h>
17 #include "hw/vhost.h"
18 #include "hw/hw.h"
19 #include "qemu/range.h"
20 #include <linux/vhost.h>
21 #include "exec/address-spaces.h"
22
23 static void vhost_dev_sync_region(struct vhost_dev *dev,
24 MemoryRegionSection *section,
25 uint64_t mfirst, uint64_t mlast,
26 uint64_t rfirst, uint64_t rlast)
27 {
28 uint64_t start = MAX(mfirst, rfirst);
29 uint64_t end = MIN(mlast, rlast);
30 vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
31 vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
32 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
33
34 if (end < start) {
35 return;
36 }
37 assert(end / VHOST_LOG_CHUNK < dev->log_size);
38 assert(start / VHOST_LOG_CHUNK < dev->log_size);
39
40 for (;from < to; ++from) {
41 vhost_log_chunk_t log;
42 int bit;
43 /* We first check with non-atomic: much cheaper,
44 * and we expect non-dirty to be the common case. */
45 if (!*from) {
46 addr += VHOST_LOG_CHUNK;
47 continue;
48 }
49 /* Data must be read atomically. We don't really
50 * need the barrier semantics of __sync
51 * builtins, but it's easier to use them than
52 * roll our own. */
53 log = __sync_fetch_and_and(from, 0);
54 while ((bit = sizeof(log) > sizeof(int) ?
55 ffsll(log) : ffs(log))) {
56 ram_addr_t ram_addr;
57 bit -= 1;
58 ram_addr = section->offset_within_region + bit * VHOST_LOG_PAGE;
59 memory_region_set_dirty(section->mr, ram_addr, VHOST_LOG_PAGE);
60 log &= ~(0x1ull << bit);
61 }
62 addr += VHOST_LOG_CHUNK;
63 }
64 }
65
66 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
67 MemoryRegionSection *section,
68 hwaddr start_addr,
69 hwaddr end_addr)
70 {
71 int i;
72
73 if (!dev->log_enabled || !dev->started) {
74 return 0;
75 }
76 for (i = 0; i < dev->mem->nregions; ++i) {
77 struct vhost_memory_region *reg = dev->mem->regions + i;
78 vhost_dev_sync_region(dev, section, start_addr, end_addr,
79 reg->guest_phys_addr,
80 range_get_last(reg->guest_phys_addr,
81 reg->memory_size));
82 }
83 for (i = 0; i < dev->nvqs; ++i) {
84 struct vhost_virtqueue *vq = dev->vqs + i;
85 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
86 range_get_last(vq->used_phys, vq->used_size));
87 }
88 return 0;
89 }
90
91 static void vhost_log_sync(MemoryListener *listener,
92 MemoryRegionSection *section)
93 {
94 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
95 memory_listener);
96 hwaddr start_addr = section->offset_within_address_space;
97 hwaddr end_addr = start_addr + section->size;
98
99 vhost_sync_dirty_bitmap(dev, section, start_addr, end_addr);
100 }
101
102 /* Assign/unassign. Keep an unsorted array of non-overlapping
103 * memory regions in dev->mem. */
104 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
105 uint64_t start_addr,
106 uint64_t size)
107 {
108 int from, to, n = dev->mem->nregions;
109 /* Track overlapping/split regions for sanity checking. */
110 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
111
112 for (from = 0, to = 0; from < n; ++from, ++to) {
113 struct vhost_memory_region *reg = dev->mem->regions + to;
114 uint64_t reglast;
115 uint64_t memlast;
116 uint64_t change;
117
118 /* clone old region */
119 if (to != from) {
120 memcpy(reg, dev->mem->regions + from, sizeof *reg);
121 }
122
123 /* No overlap is simple */
124 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
125 start_addr, size)) {
126 continue;
127 }
128
129 /* Split only happens if supplied region
130 * is in the middle of an existing one. Thus it can not
131 * overlap with any other existing region. */
132 assert(!split);
133
134 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
135 memlast = range_get_last(start_addr, size);
136
137 /* Remove whole region */
138 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
139 --dev->mem->nregions;
140 --to;
141 ++overlap_middle;
142 continue;
143 }
144
145 /* Shrink region */
146 if (memlast >= reglast) {
147 reg->memory_size = start_addr - reg->guest_phys_addr;
148 assert(reg->memory_size);
149 assert(!overlap_end);
150 ++overlap_end;
151 continue;
152 }
153
154 /* Shift region */
155 if (start_addr <= reg->guest_phys_addr) {
156 change = memlast + 1 - reg->guest_phys_addr;
157 reg->memory_size -= change;
158 reg->guest_phys_addr += change;
159 reg->userspace_addr += change;
160 assert(reg->memory_size);
161 assert(!overlap_start);
162 ++overlap_start;
163 continue;
164 }
165
166 /* This only happens if supplied region
167 * is in the middle of an existing one. Thus it can not
168 * overlap with any other existing region. */
169 assert(!overlap_start);
170 assert(!overlap_end);
171 assert(!overlap_middle);
172 /* Split region: shrink first part, shift second part. */
173 memcpy(dev->mem->regions + n, reg, sizeof *reg);
174 reg->memory_size = start_addr - reg->guest_phys_addr;
175 assert(reg->memory_size);
176 change = memlast + 1 - reg->guest_phys_addr;
177 reg = dev->mem->regions + n;
178 reg->memory_size -= change;
179 assert(reg->memory_size);
180 reg->guest_phys_addr += change;
181 reg->userspace_addr += change;
182 /* Never add more than 1 region */
183 assert(dev->mem->nregions == n);
184 ++dev->mem->nregions;
185 ++split;
186 }
187 }
188
189 /* Called after unassign, so no regions overlap the given range. */
190 static void vhost_dev_assign_memory(struct vhost_dev *dev,
191 uint64_t start_addr,
192 uint64_t size,
193 uint64_t uaddr)
194 {
195 int from, to;
196 struct vhost_memory_region *merged = NULL;
197 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
198 struct vhost_memory_region *reg = dev->mem->regions + to;
199 uint64_t prlast, urlast;
200 uint64_t pmlast, umlast;
201 uint64_t s, e, u;
202
203 /* clone old region */
204 if (to != from) {
205 memcpy(reg, dev->mem->regions + from, sizeof *reg);
206 }
207 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
208 pmlast = range_get_last(start_addr, size);
209 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
210 umlast = range_get_last(uaddr, size);
211
212 /* check for overlapping regions: should never happen. */
213 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
214 /* Not an adjacent or overlapping region - do not merge. */
215 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
216 (pmlast + 1 != reg->guest_phys_addr ||
217 umlast + 1 != reg->userspace_addr)) {
218 continue;
219 }
220
221 if (merged) {
222 --to;
223 assert(to >= 0);
224 } else {
225 merged = reg;
226 }
227 u = MIN(uaddr, reg->userspace_addr);
228 s = MIN(start_addr, reg->guest_phys_addr);
229 e = MAX(pmlast, prlast);
230 uaddr = merged->userspace_addr = u;
231 start_addr = merged->guest_phys_addr = s;
232 size = merged->memory_size = e - s + 1;
233 assert(merged->memory_size);
234 }
235
236 if (!merged) {
237 struct vhost_memory_region *reg = dev->mem->regions + to;
238 memset(reg, 0, sizeof *reg);
239 reg->memory_size = size;
240 assert(reg->memory_size);
241 reg->guest_phys_addr = start_addr;
242 reg->userspace_addr = uaddr;
243 ++to;
244 }
245 assert(to <= dev->mem->nregions + 1);
246 dev->mem->nregions = to;
247 }
248
249 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
250 {
251 uint64_t log_size = 0;
252 int i;
253 for (i = 0; i < dev->mem->nregions; ++i) {
254 struct vhost_memory_region *reg = dev->mem->regions + i;
255 uint64_t last = range_get_last(reg->guest_phys_addr,
256 reg->memory_size);
257 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
258 }
259 for (i = 0; i < dev->nvqs; ++i) {
260 struct vhost_virtqueue *vq = dev->vqs + i;
261 uint64_t last = vq->used_phys + vq->used_size - 1;
262 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
263 }
264 return log_size;
265 }
266
267 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
268 {
269 vhost_log_chunk_t *log;
270 uint64_t log_base;
271 int r, i;
272
273 log = g_malloc0(size * sizeof *log);
274 log_base = (uint64_t)(unsigned long)log;
275 r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
276 assert(r >= 0);
277 for (i = 0; i < dev->n_mem_sections; ++i) {
278 /* Sync only the range covered by the old log */
279 vhost_sync_dirty_bitmap(dev, &dev->mem_sections[i], 0,
280 dev->log_size * VHOST_LOG_CHUNK - 1);
281 }
282 if (dev->log) {
283 g_free(dev->log);
284 }
285 dev->log = log;
286 dev->log_size = size;
287 }
288
289 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
290 uint64_t start_addr,
291 uint64_t size)
292 {
293 int i;
294 for (i = 0; i < dev->nvqs; ++i) {
295 struct vhost_virtqueue *vq = dev->vqs + i;
296 hwaddr l;
297 void *p;
298
299 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
300 continue;
301 }
302 l = vq->ring_size;
303 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
304 if (!p || l != vq->ring_size) {
305 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
306 return -ENOMEM;
307 }
308 if (p != vq->ring) {
309 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
310 return -EBUSY;
311 }
312 cpu_physical_memory_unmap(p, l, 0, 0);
313 }
314 return 0;
315 }
316
317 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
318 uint64_t start_addr,
319 uint64_t size)
320 {
321 int i, n = dev->mem->nregions;
322 for (i = 0; i < n; ++i) {
323 struct vhost_memory_region *reg = dev->mem->regions + i;
324 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
325 start_addr, size)) {
326 return reg;
327 }
328 }
329 return NULL;
330 }
331
332 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
333 uint64_t start_addr,
334 uint64_t size,
335 uint64_t uaddr)
336 {
337 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
338 uint64_t reglast;
339 uint64_t memlast;
340
341 if (!reg) {
342 return true;
343 }
344
345 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
346 memlast = range_get_last(start_addr, size);
347
348 /* Need to extend region? */
349 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
350 return true;
351 }
352 /* userspace_addr changed? */
353 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
354 }
355
356 static void vhost_set_memory(MemoryListener *listener,
357 MemoryRegionSection *section,
358 bool add)
359 {
360 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
361 memory_listener);
362 hwaddr start_addr = section->offset_within_address_space;
363 ram_addr_t size = section->size;
364 bool log_dirty = memory_region_is_logging(section->mr);
365 int s = offsetof(struct vhost_memory, regions) +
366 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
367 uint64_t log_size;
368 int r;
369 void *ram;
370
371 dev->mem = g_realloc(dev->mem, s);
372
373 if (log_dirty) {
374 add = false;
375 }
376
377 assert(size);
378
379 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
380 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
381 if (add) {
382 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
383 /* Region exists with same address. Nothing to do. */
384 return;
385 }
386 } else {
387 if (!vhost_dev_find_reg(dev, start_addr, size)) {
388 /* Removing region that we don't access. Nothing to do. */
389 return;
390 }
391 }
392
393 vhost_dev_unassign_memory(dev, start_addr, size);
394 if (add) {
395 /* Add given mapping, merging adjacent regions if any */
396 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
397 } else {
398 /* Remove old mapping for this memory, if any. */
399 vhost_dev_unassign_memory(dev, start_addr, size);
400 }
401
402 if (!dev->started) {
403 return;
404 }
405
406 if (dev->started) {
407 r = vhost_verify_ring_mappings(dev, start_addr, size);
408 assert(r >= 0);
409 }
410
411 if (!dev->log_enabled) {
412 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
413 assert(r >= 0);
414 return;
415 }
416 log_size = vhost_get_log_size(dev);
417 /* We allocate an extra 4K bytes to log,
418 * to reduce the * number of reallocations. */
419 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
420 /* To log more, must increase log size before table update. */
421 if (dev->log_size < log_size) {
422 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
423 }
424 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
425 assert(r >= 0);
426 /* To log less, can only decrease log size after table update. */
427 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
428 vhost_dev_log_resize(dev, log_size);
429 }
430 }
431
432 static bool vhost_section(MemoryRegionSection *section)
433 {
434 return memory_region_is_ram(section->mr);
435 }
436
437 static void vhost_begin(MemoryListener *listener)
438 {
439 }
440
441 static void vhost_commit(MemoryListener *listener)
442 {
443 }
444
445 static void vhost_region_add(MemoryListener *listener,
446 MemoryRegionSection *section)
447 {
448 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
449 memory_listener);
450
451 if (!vhost_section(section)) {
452 return;
453 }
454
455 ++dev->n_mem_sections;
456 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
457 dev->n_mem_sections);
458 dev->mem_sections[dev->n_mem_sections - 1] = *section;
459 vhost_set_memory(listener, section, true);
460 }
461
462 static void vhost_region_del(MemoryListener *listener,
463 MemoryRegionSection *section)
464 {
465 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
466 memory_listener);
467 int i;
468
469 if (!vhost_section(section)) {
470 return;
471 }
472
473 vhost_set_memory(listener, section, false);
474 for (i = 0; i < dev->n_mem_sections; ++i) {
475 if (dev->mem_sections[i].offset_within_address_space
476 == section->offset_within_address_space) {
477 --dev->n_mem_sections;
478 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
479 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
480 break;
481 }
482 }
483 }
484
485 static void vhost_region_nop(MemoryListener *listener,
486 MemoryRegionSection *section)
487 {
488 }
489
490 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
491 struct vhost_virtqueue *vq,
492 unsigned idx, bool enable_log)
493 {
494 struct vhost_vring_addr addr = {
495 .index = idx,
496 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
497 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
498 .used_user_addr = (uint64_t)(unsigned long)vq->used,
499 .log_guest_addr = vq->used_phys,
500 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
501 };
502 int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
503 if (r < 0) {
504 return -errno;
505 }
506 return 0;
507 }
508
509 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
510 {
511 uint64_t features = dev->acked_features;
512 int r;
513 if (enable_log) {
514 features |= 0x1 << VHOST_F_LOG_ALL;
515 }
516 r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
517 return r < 0 ? -errno : 0;
518 }
519
520 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
521 {
522 int r, t, i;
523 r = vhost_dev_set_features(dev, enable_log);
524 if (r < 0) {
525 goto err_features;
526 }
527 for (i = 0; i < dev->nvqs; ++i) {
528 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
529 enable_log);
530 if (r < 0) {
531 goto err_vq;
532 }
533 }
534 return 0;
535 err_vq:
536 for (; i >= 0; --i) {
537 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
538 dev->log_enabled);
539 assert(t >= 0);
540 }
541 t = vhost_dev_set_features(dev, dev->log_enabled);
542 assert(t >= 0);
543 err_features:
544 return r;
545 }
546
547 static int vhost_migration_log(MemoryListener *listener, int enable)
548 {
549 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
550 memory_listener);
551 int r;
552 if (!!enable == dev->log_enabled) {
553 return 0;
554 }
555 if (!dev->started) {
556 dev->log_enabled = enable;
557 return 0;
558 }
559 if (!enable) {
560 r = vhost_dev_set_log(dev, false);
561 if (r < 0) {
562 return r;
563 }
564 if (dev->log) {
565 g_free(dev->log);
566 }
567 dev->log = NULL;
568 dev->log_size = 0;
569 } else {
570 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
571 r = vhost_dev_set_log(dev, true);
572 if (r < 0) {
573 return r;
574 }
575 }
576 dev->log_enabled = enable;
577 return 0;
578 }
579
580 static void vhost_log_global_start(MemoryListener *listener)
581 {
582 int r;
583
584 r = vhost_migration_log(listener, true);
585 if (r < 0) {
586 abort();
587 }
588 }
589
590 static void vhost_log_global_stop(MemoryListener *listener)
591 {
592 int r;
593
594 r = vhost_migration_log(listener, false);
595 if (r < 0) {
596 abort();
597 }
598 }
599
600 static void vhost_log_start(MemoryListener *listener,
601 MemoryRegionSection *section)
602 {
603 /* FIXME: implement */
604 }
605
606 static void vhost_log_stop(MemoryListener *listener,
607 MemoryRegionSection *section)
608 {
609 /* FIXME: implement */
610 }
611
612 static int vhost_virtqueue_start(struct vhost_dev *dev,
613 struct VirtIODevice *vdev,
614 struct vhost_virtqueue *vq,
615 unsigned idx)
616 {
617 hwaddr s, l, a;
618 int r;
619 int vhost_vq_index = idx - dev->vq_index;
620 struct vhost_vring_file file = {
621 .index = vhost_vq_index
622 };
623 struct vhost_vring_state state = {
624 .index = vhost_vq_index
625 };
626 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
627
628 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
629
630 vq->num = state.num = virtio_queue_get_num(vdev, idx);
631 r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
632 if (r) {
633 return -errno;
634 }
635
636 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
637 r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
638 if (r) {
639 return -errno;
640 }
641
642 s = l = virtio_queue_get_desc_size(vdev, idx);
643 a = virtio_queue_get_desc_addr(vdev, idx);
644 vq->desc = cpu_physical_memory_map(a, &l, 0);
645 if (!vq->desc || l != s) {
646 r = -ENOMEM;
647 goto fail_alloc_desc;
648 }
649 s = l = virtio_queue_get_avail_size(vdev, idx);
650 a = virtio_queue_get_avail_addr(vdev, idx);
651 vq->avail = cpu_physical_memory_map(a, &l, 0);
652 if (!vq->avail || l != s) {
653 r = -ENOMEM;
654 goto fail_alloc_avail;
655 }
656 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
657 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
658 vq->used = cpu_physical_memory_map(a, &l, 1);
659 if (!vq->used || l != s) {
660 r = -ENOMEM;
661 goto fail_alloc_used;
662 }
663
664 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
665 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
666 vq->ring = cpu_physical_memory_map(a, &l, 1);
667 if (!vq->ring || l != s) {
668 r = -ENOMEM;
669 goto fail_alloc_ring;
670 }
671
672 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
673 if (r < 0) {
674 r = -errno;
675 goto fail_alloc;
676 }
677
678 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
679 r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
680 if (r) {
681 r = -errno;
682 goto fail_kick;
683 }
684
685 /* Clear and discard previous events if any. */
686 event_notifier_test_and_clear(&vq->masked_notifier);
687
688 return 0;
689
690 fail_kick:
691 fail_alloc:
692 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
693 0, 0);
694 fail_alloc_ring:
695 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
696 0, 0);
697 fail_alloc_used:
698 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
699 0, 0);
700 fail_alloc_avail:
701 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
702 0, 0);
703 fail_alloc_desc:
704 return r;
705 }
706
707 static void vhost_virtqueue_stop(struct vhost_dev *dev,
708 struct VirtIODevice *vdev,
709 struct vhost_virtqueue *vq,
710 unsigned idx)
711 {
712 struct vhost_vring_state state = {
713 .index = idx - dev->vq_index
714 };
715 int r;
716 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
717 r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
718 if (r < 0) {
719 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
720 fflush(stderr);
721 }
722 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
723 assert (r >= 0);
724 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
725 0, virtio_queue_get_ring_size(vdev, idx));
726 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
727 1, virtio_queue_get_used_size(vdev, idx));
728 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
729 0, virtio_queue_get_avail_size(vdev, idx));
730 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
731 0, virtio_queue_get_desc_size(vdev, idx));
732 }
733
734 static void vhost_eventfd_add(MemoryListener *listener,
735 MemoryRegionSection *section,
736 bool match_data, uint64_t data, EventNotifier *e)
737 {
738 }
739
740 static void vhost_eventfd_del(MemoryListener *listener,
741 MemoryRegionSection *section,
742 bool match_data, uint64_t data, EventNotifier *e)
743 {
744 }
745
746 static int vhost_virtqueue_init(struct vhost_dev *dev,
747 struct vhost_virtqueue *vq, int n)
748 {
749 struct vhost_vring_file file = {
750 .index = n,
751 };
752 int r = event_notifier_init(&vq->masked_notifier, 0);
753 if (r < 0) {
754 return r;
755 }
756
757 file.fd = event_notifier_get_fd(&vq->masked_notifier);
758 r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
759 if (r) {
760 r = -errno;
761 goto fail_call;
762 }
763 return 0;
764 fail_call:
765 event_notifier_cleanup(&vq->masked_notifier);
766 return r;
767 }
768
769 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
770 {
771 event_notifier_cleanup(&vq->masked_notifier);
772 }
773
774 int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
775 bool force)
776 {
777 uint64_t features;
778 int i, r;
779 if (devfd >= 0) {
780 hdev->control = devfd;
781 } else {
782 hdev->control = open(devpath, O_RDWR);
783 if (hdev->control < 0) {
784 return -errno;
785 }
786 }
787 r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
788 if (r < 0) {
789 goto fail;
790 }
791
792 r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
793 if (r < 0) {
794 goto fail;
795 }
796
797 for (i = 0; i < hdev->nvqs; ++i) {
798 r = vhost_virtqueue_init(hdev, hdev->vqs + i, i);
799 if (r < 0) {
800 goto fail_vq;
801 }
802 }
803 hdev->features = features;
804
805 hdev->memory_listener = (MemoryListener) {
806 .begin = vhost_begin,
807 .commit = vhost_commit,
808 .region_add = vhost_region_add,
809 .region_del = vhost_region_del,
810 .region_nop = vhost_region_nop,
811 .log_start = vhost_log_start,
812 .log_stop = vhost_log_stop,
813 .log_sync = vhost_log_sync,
814 .log_global_start = vhost_log_global_start,
815 .log_global_stop = vhost_log_global_stop,
816 .eventfd_add = vhost_eventfd_add,
817 .eventfd_del = vhost_eventfd_del,
818 .priority = 10
819 };
820 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
821 hdev->n_mem_sections = 0;
822 hdev->mem_sections = NULL;
823 hdev->log = NULL;
824 hdev->log_size = 0;
825 hdev->log_enabled = false;
826 hdev->started = false;
827 memory_listener_register(&hdev->memory_listener, &address_space_memory);
828 hdev->force = force;
829 return 0;
830 fail_vq:
831 while (--i >= 0) {
832 vhost_virtqueue_cleanup(hdev->vqs + i);
833 }
834 fail:
835 r = -errno;
836 close(hdev->control);
837 return r;
838 }
839
840 void vhost_dev_cleanup(struct vhost_dev *hdev)
841 {
842 int i;
843 for (i = 0; i < hdev->nvqs; ++i) {
844 vhost_virtqueue_cleanup(hdev->vqs + i);
845 }
846 memory_listener_unregister(&hdev->memory_listener);
847 g_free(hdev->mem);
848 g_free(hdev->mem_sections);
849 close(hdev->control);
850 }
851
852 bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev)
853 {
854 return !vdev->binding->query_guest_notifiers ||
855 vdev->binding->query_guest_notifiers(vdev->binding_opaque) ||
856 hdev->force;
857 }
858
859 /* Stop processing guest IO notifications in qemu.
860 * Start processing them in vhost in kernel.
861 */
862 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
863 {
864 int i, r;
865 if (!vdev->binding->set_host_notifier) {
866 fprintf(stderr, "binding does not support host notifiers\n");
867 r = -ENOSYS;
868 goto fail;
869 }
870
871 for (i = 0; i < hdev->nvqs; ++i) {
872 r = vdev->binding->set_host_notifier(vdev->binding_opaque,
873 hdev->vq_index + i,
874 true);
875 if (r < 0) {
876 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
877 goto fail_vq;
878 }
879 }
880
881 return 0;
882 fail_vq:
883 while (--i >= 0) {
884 r = vdev->binding->set_host_notifier(vdev->binding_opaque,
885 hdev->vq_index + i,
886 false);
887 if (r < 0) {
888 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
889 fflush(stderr);
890 }
891 assert (r >= 0);
892 }
893 fail:
894 return r;
895 }
896
897 /* Stop processing guest IO notifications in vhost.
898 * Start processing them in qemu.
899 * This might actually run the qemu handlers right away,
900 * so virtio in qemu must be completely setup when this is called.
901 */
902 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
903 {
904 int i, r;
905
906 for (i = 0; i < hdev->nvqs; ++i) {
907 r = vdev->binding->set_host_notifier(vdev->binding_opaque,
908 hdev->vq_index + i,
909 false);
910 if (r < 0) {
911 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
912 fflush(stderr);
913 }
914 assert (r >= 0);
915 }
916 }
917
918 /* Test and clear event pending status.
919 * Should be called after unmask to avoid losing events.
920 */
921 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
922 {
923 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
924 assert(hdev->started);
925 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
926 return event_notifier_test_and_clear(&vq->masked_notifier);
927 }
928
929 /* Mask/unmask events from this vq. */
930 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
931 bool mask)
932 {
933 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
934 int r, index = n - hdev->vq_index;
935
936 assert(hdev->started);
937 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
938
939 struct vhost_vring_file file = {
940 .index = index
941 };
942 if (mask) {
943 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
944 } else {
945 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
946 }
947 r = ioctl(hdev->control, VHOST_SET_VRING_CALL, &file);
948 assert(r >= 0);
949 }
950
951 /* Host notifiers must be enabled at this point. */
952 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
953 {
954 int i, r;
955
956 hdev->started = true;
957
958 r = vhost_dev_set_features(hdev, hdev->log_enabled);
959 if (r < 0) {
960 goto fail_features;
961 }
962 r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
963 if (r < 0) {
964 r = -errno;
965 goto fail_mem;
966 }
967 for (i = 0; i < hdev->nvqs; ++i) {
968 r = vhost_virtqueue_start(hdev,
969 vdev,
970 hdev->vqs + i,
971 hdev->vq_index + i);
972 if (r < 0) {
973 goto fail_vq;
974 }
975 }
976
977 if (hdev->log_enabled) {
978 hdev->log_size = vhost_get_log_size(hdev);
979 hdev->log = hdev->log_size ?
980 g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL;
981 r = ioctl(hdev->control, VHOST_SET_LOG_BASE,
982 (uint64_t)(unsigned long)hdev->log);
983 if (r < 0) {
984 r = -errno;
985 goto fail_log;
986 }
987 }
988
989 return 0;
990 fail_log:
991 fail_vq:
992 while (--i >= 0) {
993 vhost_virtqueue_stop(hdev,
994 vdev,
995 hdev->vqs + i,
996 hdev->vq_index + i);
997 }
998 i = hdev->nvqs;
999 fail_mem:
1000 fail_features:
1001
1002 hdev->started = false;
1003 return r;
1004 }
1005
1006 /* Host notifiers must be enabled at this point. */
1007 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1008 {
1009 int i;
1010
1011 for (i = 0; i < hdev->nvqs; ++i) {
1012 vhost_virtqueue_stop(hdev,
1013 vdev,
1014 hdev->vqs + i,
1015 hdev->vq_index + i);
1016 }
1017 for (i = 0; i < hdev->n_mem_sections; ++i) {
1018 vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
1019 0, (hwaddr)~0x0ull);
1020 }
1021
1022 hdev->started = false;
1023 g_free(hdev->log);
1024 hdev->log = NULL;
1025 hdev->log_size = 0;
1026 }
1027