]> git.proxmox.com Git - qemu.git/blob - hw/virtio/vhost.c
vhost: Add vhost_commit callback for SeaBIOS ROM region re-mapping
[qemu.git] / hw / virtio / vhost.c
1 /*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/hw.h"
19 #include "qemu/range.h"
20 #include <linux/vhost.h>
21 #include "exec/address-spaces.h"
22
23 static void vhost_dev_sync_region(struct vhost_dev *dev,
24 MemoryRegionSection *section,
25 uint64_t mfirst, uint64_t mlast,
26 uint64_t rfirst, uint64_t rlast)
27 {
28 uint64_t start = MAX(mfirst, rfirst);
29 uint64_t end = MIN(mlast, rlast);
30 vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
31 vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
32 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
33
34 if (end < start) {
35 return;
36 }
37 assert(end / VHOST_LOG_CHUNK < dev->log_size);
38 assert(start / VHOST_LOG_CHUNK < dev->log_size);
39
40 for (;from < to; ++from) {
41 vhost_log_chunk_t log;
42 int bit;
43 /* We first check with non-atomic: much cheaper,
44 * and we expect non-dirty to be the common case. */
45 if (!*from) {
46 addr += VHOST_LOG_CHUNK;
47 continue;
48 }
49 /* Data must be read atomically. We don't really
50 * need the barrier semantics of __sync
51 * builtins, but it's easier to use them than
52 * roll our own. */
53 log = __sync_fetch_and_and(from, 0);
54 while ((bit = sizeof(log) > sizeof(int) ?
55 ffsll(log) : ffs(log))) {
56 hwaddr page_addr;
57 hwaddr section_offset;
58 hwaddr mr_offset;
59 bit -= 1;
60 page_addr = addr + bit * VHOST_LOG_PAGE;
61 section_offset = page_addr - section->offset_within_address_space;
62 mr_offset = section_offset + section->offset_within_region;
63 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
64 log &= ~(0x1ull << bit);
65 }
66 addr += VHOST_LOG_CHUNK;
67 }
68 }
69
70 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
71 MemoryRegionSection *section,
72 hwaddr first,
73 hwaddr last)
74 {
75 int i;
76 hwaddr start_addr;
77 hwaddr end_addr;
78
79 if (!dev->log_enabled || !dev->started) {
80 return 0;
81 }
82 start_addr = section->offset_within_address_space;
83 end_addr = range_get_last(start_addr, section->size);
84 start_addr = MAX(first, start_addr);
85 end_addr = MIN(last, end_addr);
86
87 for (i = 0; i < dev->mem->nregions; ++i) {
88 struct vhost_memory_region *reg = dev->mem->regions + i;
89 vhost_dev_sync_region(dev, section, start_addr, end_addr,
90 reg->guest_phys_addr,
91 range_get_last(reg->guest_phys_addr,
92 reg->memory_size));
93 }
94 for (i = 0; i < dev->nvqs; ++i) {
95 struct vhost_virtqueue *vq = dev->vqs + i;
96 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
97 range_get_last(vq->used_phys, vq->used_size));
98 }
99 return 0;
100 }
101
102 static void vhost_log_sync(MemoryListener *listener,
103 MemoryRegionSection *section)
104 {
105 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
106 memory_listener);
107 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
108 }
109
110 static void vhost_log_sync_range(struct vhost_dev *dev,
111 hwaddr first, hwaddr last)
112 {
113 int i;
114 /* FIXME: this is N^2 in number of sections */
115 for (i = 0; i < dev->n_mem_sections; ++i) {
116 MemoryRegionSection *section = &dev->mem_sections[i];
117 vhost_sync_dirty_bitmap(dev, section, first, last);
118 }
119 }
120
121 /* Assign/unassign. Keep an unsorted array of non-overlapping
122 * memory regions in dev->mem. */
123 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
124 uint64_t start_addr,
125 uint64_t size)
126 {
127 int from, to, n = dev->mem->nregions;
128 /* Track overlapping/split regions for sanity checking. */
129 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
130
131 for (from = 0, to = 0; from < n; ++from, ++to) {
132 struct vhost_memory_region *reg = dev->mem->regions + to;
133 uint64_t reglast;
134 uint64_t memlast;
135 uint64_t change;
136
137 /* clone old region */
138 if (to != from) {
139 memcpy(reg, dev->mem->regions + from, sizeof *reg);
140 }
141
142 /* No overlap is simple */
143 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
144 start_addr, size)) {
145 continue;
146 }
147
148 /* Split only happens if supplied region
149 * is in the middle of an existing one. Thus it can not
150 * overlap with any other existing region. */
151 assert(!split);
152
153 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
154 memlast = range_get_last(start_addr, size);
155
156 /* Remove whole region */
157 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
158 --dev->mem->nregions;
159 --to;
160 ++overlap_middle;
161 continue;
162 }
163
164 /* Shrink region */
165 if (memlast >= reglast) {
166 reg->memory_size = start_addr - reg->guest_phys_addr;
167 assert(reg->memory_size);
168 assert(!overlap_end);
169 ++overlap_end;
170 continue;
171 }
172
173 /* Shift region */
174 if (start_addr <= reg->guest_phys_addr) {
175 change = memlast + 1 - reg->guest_phys_addr;
176 reg->memory_size -= change;
177 reg->guest_phys_addr += change;
178 reg->userspace_addr += change;
179 assert(reg->memory_size);
180 assert(!overlap_start);
181 ++overlap_start;
182 continue;
183 }
184
185 /* This only happens if supplied region
186 * is in the middle of an existing one. Thus it can not
187 * overlap with any other existing region. */
188 assert(!overlap_start);
189 assert(!overlap_end);
190 assert(!overlap_middle);
191 /* Split region: shrink first part, shift second part. */
192 memcpy(dev->mem->regions + n, reg, sizeof *reg);
193 reg->memory_size = start_addr - reg->guest_phys_addr;
194 assert(reg->memory_size);
195 change = memlast + 1 - reg->guest_phys_addr;
196 reg = dev->mem->regions + n;
197 reg->memory_size -= change;
198 assert(reg->memory_size);
199 reg->guest_phys_addr += change;
200 reg->userspace_addr += change;
201 /* Never add more than 1 region */
202 assert(dev->mem->nregions == n);
203 ++dev->mem->nregions;
204 ++split;
205 }
206 }
207
208 /* Called after unassign, so no regions overlap the given range. */
209 static void vhost_dev_assign_memory(struct vhost_dev *dev,
210 uint64_t start_addr,
211 uint64_t size,
212 uint64_t uaddr)
213 {
214 int from, to;
215 struct vhost_memory_region *merged = NULL;
216 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
217 struct vhost_memory_region *reg = dev->mem->regions + to;
218 uint64_t prlast, urlast;
219 uint64_t pmlast, umlast;
220 uint64_t s, e, u;
221
222 /* clone old region */
223 if (to != from) {
224 memcpy(reg, dev->mem->regions + from, sizeof *reg);
225 }
226 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
227 pmlast = range_get_last(start_addr, size);
228 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
229 umlast = range_get_last(uaddr, size);
230
231 /* check for overlapping regions: should never happen. */
232 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
233 /* Not an adjacent or overlapping region - do not merge. */
234 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
235 (pmlast + 1 != reg->guest_phys_addr ||
236 umlast + 1 != reg->userspace_addr)) {
237 continue;
238 }
239
240 if (merged) {
241 --to;
242 assert(to >= 0);
243 } else {
244 merged = reg;
245 }
246 u = MIN(uaddr, reg->userspace_addr);
247 s = MIN(start_addr, reg->guest_phys_addr);
248 e = MAX(pmlast, prlast);
249 uaddr = merged->userspace_addr = u;
250 start_addr = merged->guest_phys_addr = s;
251 size = merged->memory_size = e - s + 1;
252 assert(merged->memory_size);
253 }
254
255 if (!merged) {
256 struct vhost_memory_region *reg = dev->mem->regions + to;
257 memset(reg, 0, sizeof *reg);
258 reg->memory_size = size;
259 assert(reg->memory_size);
260 reg->guest_phys_addr = start_addr;
261 reg->userspace_addr = uaddr;
262 ++to;
263 }
264 assert(to <= dev->mem->nregions + 1);
265 dev->mem->nregions = to;
266 }
267
268 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
269 {
270 uint64_t log_size = 0;
271 int i;
272 for (i = 0; i < dev->mem->nregions; ++i) {
273 struct vhost_memory_region *reg = dev->mem->regions + i;
274 uint64_t last = range_get_last(reg->guest_phys_addr,
275 reg->memory_size);
276 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
277 }
278 for (i = 0; i < dev->nvqs; ++i) {
279 struct vhost_virtqueue *vq = dev->vqs + i;
280 uint64_t last = vq->used_phys + vq->used_size - 1;
281 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
282 }
283 return log_size;
284 }
285
286 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
287 {
288 vhost_log_chunk_t *log;
289 uint64_t log_base;
290 int r;
291
292 log = g_malloc0(size * sizeof *log);
293 log_base = (uint64_t)(unsigned long)log;
294 r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
295 assert(r >= 0);
296 /* Sync only the range covered by the old log */
297 if (dev->log_size) {
298 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
299 }
300 if (dev->log) {
301 g_free(dev->log);
302 }
303 dev->log = log;
304 dev->log_size = size;
305 }
306
307 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
308 uint64_t start_addr,
309 uint64_t size)
310 {
311 int i;
312 for (i = 0; i < dev->nvqs; ++i) {
313 struct vhost_virtqueue *vq = dev->vqs + i;
314 hwaddr l;
315 void *p;
316
317 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
318 continue;
319 }
320 l = vq->ring_size;
321 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
322 if (!p || l != vq->ring_size) {
323 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
324 return -ENOMEM;
325 }
326 if (p != vq->ring) {
327 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
328 return -EBUSY;
329 }
330 cpu_physical_memory_unmap(p, l, 0, 0);
331 }
332 return 0;
333 }
334
335 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
336 uint64_t start_addr,
337 uint64_t size)
338 {
339 int i, n = dev->mem->nregions;
340 for (i = 0; i < n; ++i) {
341 struct vhost_memory_region *reg = dev->mem->regions + i;
342 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
343 start_addr, size)) {
344 return reg;
345 }
346 }
347 return NULL;
348 }
349
350 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
351 uint64_t start_addr,
352 uint64_t size,
353 uint64_t uaddr)
354 {
355 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
356 uint64_t reglast;
357 uint64_t memlast;
358
359 if (!reg) {
360 return true;
361 }
362
363 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
364 memlast = range_get_last(start_addr, size);
365
366 /* Need to extend region? */
367 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
368 return true;
369 }
370 /* userspace_addr changed? */
371 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
372 }
373
374 static void vhost_set_memory(MemoryListener *listener,
375 MemoryRegionSection *section,
376 bool add)
377 {
378 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
379 memory_listener);
380 hwaddr start_addr = section->offset_within_address_space;
381 ram_addr_t size = section->size;
382 bool log_dirty = memory_region_is_logging(section->mr);
383 int s = offsetof(struct vhost_memory, regions) +
384 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
385 void *ram;
386
387 dev->mem = g_realloc(dev->mem, s);
388
389 if (log_dirty) {
390 add = false;
391 }
392
393 assert(size);
394
395 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
396 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
397 if (add) {
398 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
399 /* Region exists with same address. Nothing to do. */
400 return;
401 }
402 } else {
403 if (!vhost_dev_find_reg(dev, start_addr, size)) {
404 /* Removing region that we don't access. Nothing to do. */
405 return;
406 }
407 }
408
409 vhost_dev_unassign_memory(dev, start_addr, size);
410 if (add) {
411 /* Add given mapping, merging adjacent regions if any */
412 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
413 } else {
414 /* Remove old mapping for this memory, if any. */
415 vhost_dev_unassign_memory(dev, start_addr, size);
416 }
417 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
418 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
419 dev->memory_changed = true;
420 }
421
422 static bool vhost_section(MemoryRegionSection *section)
423 {
424 return memory_region_is_ram(section->mr);
425 }
426
427 static void vhost_begin(MemoryListener *listener)
428 {
429 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
430 memory_listener);
431 dev->mem_changed_end_addr = 0;
432 dev->mem_changed_start_addr = -1;
433 }
434
435 static void vhost_commit(MemoryListener *listener)
436 {
437 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
438 memory_listener);
439 hwaddr start_addr = 0;
440 ram_addr_t size = 0;
441 uint64_t log_size;
442 int r;
443
444 if (!dev->memory_changed) {
445 return;
446 }
447 if (!dev->started) {
448 return;
449 }
450 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
451 return;
452 }
453
454 if (dev->started) {
455 start_addr = dev->mem_changed_start_addr;
456 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
457
458 r = vhost_verify_ring_mappings(dev, start_addr, size);
459 assert(r >= 0);
460 }
461
462 if (!dev->log_enabled) {
463 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
464 assert(r >= 0);
465 dev->memory_changed = false;
466 return;
467 }
468 log_size = vhost_get_log_size(dev);
469 /* We allocate an extra 4K bytes to log,
470 * to reduce the * number of reallocations. */
471 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
472 /* To log more, must increase log size before table update. */
473 if (dev->log_size < log_size) {
474 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
475 }
476 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
477 assert(r >= 0);
478 /* To log less, can only decrease log size after table update. */
479 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
480 vhost_dev_log_resize(dev, log_size);
481 }
482 dev->memory_changed = false;
483 }
484
485 static void vhost_region_add(MemoryListener *listener,
486 MemoryRegionSection *section)
487 {
488 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
489 memory_listener);
490
491 if (!vhost_section(section)) {
492 return;
493 }
494
495 ++dev->n_mem_sections;
496 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
497 dev->n_mem_sections);
498 dev->mem_sections[dev->n_mem_sections - 1] = *section;
499 vhost_set_memory(listener, section, true);
500 }
501
502 static void vhost_region_del(MemoryListener *listener,
503 MemoryRegionSection *section)
504 {
505 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
506 memory_listener);
507 int i;
508
509 if (!vhost_section(section)) {
510 return;
511 }
512
513 vhost_set_memory(listener, section, false);
514 for (i = 0; i < dev->n_mem_sections; ++i) {
515 if (dev->mem_sections[i].offset_within_address_space
516 == section->offset_within_address_space) {
517 --dev->n_mem_sections;
518 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
519 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
520 break;
521 }
522 }
523 }
524
525 static void vhost_region_nop(MemoryListener *listener,
526 MemoryRegionSection *section)
527 {
528 }
529
530 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
531 struct vhost_virtqueue *vq,
532 unsigned idx, bool enable_log)
533 {
534 struct vhost_vring_addr addr = {
535 .index = idx,
536 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
537 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
538 .used_user_addr = (uint64_t)(unsigned long)vq->used,
539 .log_guest_addr = vq->used_phys,
540 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
541 };
542 int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
543 if (r < 0) {
544 return -errno;
545 }
546 return 0;
547 }
548
549 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
550 {
551 uint64_t features = dev->acked_features;
552 int r;
553 if (enable_log) {
554 features |= 0x1 << VHOST_F_LOG_ALL;
555 }
556 r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
557 return r < 0 ? -errno : 0;
558 }
559
560 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
561 {
562 int r, t, i;
563 r = vhost_dev_set_features(dev, enable_log);
564 if (r < 0) {
565 goto err_features;
566 }
567 for (i = 0; i < dev->nvqs; ++i) {
568 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
569 enable_log);
570 if (r < 0) {
571 goto err_vq;
572 }
573 }
574 return 0;
575 err_vq:
576 for (; i >= 0; --i) {
577 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
578 dev->log_enabled);
579 assert(t >= 0);
580 }
581 t = vhost_dev_set_features(dev, dev->log_enabled);
582 assert(t >= 0);
583 err_features:
584 return r;
585 }
586
587 static int vhost_migration_log(MemoryListener *listener, int enable)
588 {
589 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
590 memory_listener);
591 int r;
592 if (!!enable == dev->log_enabled) {
593 return 0;
594 }
595 if (!dev->started) {
596 dev->log_enabled = enable;
597 return 0;
598 }
599 if (!enable) {
600 r = vhost_dev_set_log(dev, false);
601 if (r < 0) {
602 return r;
603 }
604 if (dev->log) {
605 g_free(dev->log);
606 }
607 dev->log = NULL;
608 dev->log_size = 0;
609 } else {
610 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
611 r = vhost_dev_set_log(dev, true);
612 if (r < 0) {
613 return r;
614 }
615 }
616 dev->log_enabled = enable;
617 return 0;
618 }
619
620 static void vhost_log_global_start(MemoryListener *listener)
621 {
622 int r;
623
624 r = vhost_migration_log(listener, true);
625 if (r < 0) {
626 abort();
627 }
628 }
629
630 static void vhost_log_global_stop(MemoryListener *listener)
631 {
632 int r;
633
634 r = vhost_migration_log(listener, false);
635 if (r < 0) {
636 abort();
637 }
638 }
639
640 static void vhost_log_start(MemoryListener *listener,
641 MemoryRegionSection *section)
642 {
643 /* FIXME: implement */
644 }
645
646 static void vhost_log_stop(MemoryListener *listener,
647 MemoryRegionSection *section)
648 {
649 /* FIXME: implement */
650 }
651
652 static int vhost_virtqueue_start(struct vhost_dev *dev,
653 struct VirtIODevice *vdev,
654 struct vhost_virtqueue *vq,
655 unsigned idx)
656 {
657 hwaddr s, l, a;
658 int r;
659 int vhost_vq_index = idx - dev->vq_index;
660 struct vhost_vring_file file = {
661 .index = vhost_vq_index
662 };
663 struct vhost_vring_state state = {
664 .index = vhost_vq_index
665 };
666 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
667
668 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
669
670 vq->num = state.num = virtio_queue_get_num(vdev, idx);
671 r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
672 if (r) {
673 return -errno;
674 }
675
676 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
677 r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
678 if (r) {
679 return -errno;
680 }
681
682 s = l = virtio_queue_get_desc_size(vdev, idx);
683 a = virtio_queue_get_desc_addr(vdev, idx);
684 vq->desc = cpu_physical_memory_map(a, &l, 0);
685 if (!vq->desc || l != s) {
686 r = -ENOMEM;
687 goto fail_alloc_desc;
688 }
689 s = l = virtio_queue_get_avail_size(vdev, idx);
690 a = virtio_queue_get_avail_addr(vdev, idx);
691 vq->avail = cpu_physical_memory_map(a, &l, 0);
692 if (!vq->avail || l != s) {
693 r = -ENOMEM;
694 goto fail_alloc_avail;
695 }
696 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
697 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
698 vq->used = cpu_physical_memory_map(a, &l, 1);
699 if (!vq->used || l != s) {
700 r = -ENOMEM;
701 goto fail_alloc_used;
702 }
703
704 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
705 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
706 vq->ring = cpu_physical_memory_map(a, &l, 1);
707 if (!vq->ring || l != s) {
708 r = -ENOMEM;
709 goto fail_alloc_ring;
710 }
711
712 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
713 if (r < 0) {
714 r = -errno;
715 goto fail_alloc;
716 }
717
718 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
719 r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
720 if (r) {
721 r = -errno;
722 goto fail_kick;
723 }
724
725 /* Clear and discard previous events if any. */
726 event_notifier_test_and_clear(&vq->masked_notifier);
727
728 return 0;
729
730 fail_kick:
731 fail_alloc:
732 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
733 0, 0);
734 fail_alloc_ring:
735 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
736 0, 0);
737 fail_alloc_used:
738 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
739 0, 0);
740 fail_alloc_avail:
741 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
742 0, 0);
743 fail_alloc_desc:
744 return r;
745 }
746
747 static void vhost_virtqueue_stop(struct vhost_dev *dev,
748 struct VirtIODevice *vdev,
749 struct vhost_virtqueue *vq,
750 unsigned idx)
751 {
752 struct vhost_vring_state state = {
753 .index = idx - dev->vq_index
754 };
755 int r;
756 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
757 r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
758 if (r < 0) {
759 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
760 fflush(stderr);
761 }
762 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
763 assert (r >= 0);
764 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
765 0, virtio_queue_get_ring_size(vdev, idx));
766 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
767 1, virtio_queue_get_used_size(vdev, idx));
768 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
769 0, virtio_queue_get_avail_size(vdev, idx));
770 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
771 0, virtio_queue_get_desc_size(vdev, idx));
772 }
773
774 static void vhost_eventfd_add(MemoryListener *listener,
775 MemoryRegionSection *section,
776 bool match_data, uint64_t data, EventNotifier *e)
777 {
778 }
779
780 static void vhost_eventfd_del(MemoryListener *listener,
781 MemoryRegionSection *section,
782 bool match_data, uint64_t data, EventNotifier *e)
783 {
784 }
785
786 static int vhost_virtqueue_init(struct vhost_dev *dev,
787 struct vhost_virtqueue *vq, int n)
788 {
789 struct vhost_vring_file file = {
790 .index = n,
791 };
792 int r = event_notifier_init(&vq->masked_notifier, 0);
793 if (r < 0) {
794 return r;
795 }
796
797 file.fd = event_notifier_get_fd(&vq->masked_notifier);
798 r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
799 if (r) {
800 r = -errno;
801 goto fail_call;
802 }
803 return 0;
804 fail_call:
805 event_notifier_cleanup(&vq->masked_notifier);
806 return r;
807 }
808
809 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
810 {
811 event_notifier_cleanup(&vq->masked_notifier);
812 }
813
814 int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
815 bool force)
816 {
817 uint64_t features;
818 int i, r;
819 if (devfd >= 0) {
820 hdev->control = devfd;
821 } else {
822 hdev->control = open(devpath, O_RDWR);
823 if (hdev->control < 0) {
824 return -errno;
825 }
826 }
827 r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
828 if (r < 0) {
829 goto fail;
830 }
831
832 r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
833 if (r < 0) {
834 goto fail;
835 }
836
837 for (i = 0; i < hdev->nvqs; ++i) {
838 r = vhost_virtqueue_init(hdev, hdev->vqs + i, i);
839 if (r < 0) {
840 goto fail_vq;
841 }
842 }
843 hdev->features = features;
844
845 hdev->memory_listener = (MemoryListener) {
846 .begin = vhost_begin,
847 .commit = vhost_commit,
848 .region_add = vhost_region_add,
849 .region_del = vhost_region_del,
850 .region_nop = vhost_region_nop,
851 .log_start = vhost_log_start,
852 .log_stop = vhost_log_stop,
853 .log_sync = vhost_log_sync,
854 .log_global_start = vhost_log_global_start,
855 .log_global_stop = vhost_log_global_stop,
856 .eventfd_add = vhost_eventfd_add,
857 .eventfd_del = vhost_eventfd_del,
858 .priority = 10
859 };
860 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
861 hdev->n_mem_sections = 0;
862 hdev->mem_sections = NULL;
863 hdev->log = NULL;
864 hdev->log_size = 0;
865 hdev->log_enabled = false;
866 hdev->started = false;
867 hdev->memory_changed = false;
868 memory_listener_register(&hdev->memory_listener, &address_space_memory);
869 hdev->force = force;
870 return 0;
871 fail_vq:
872 while (--i >= 0) {
873 vhost_virtqueue_cleanup(hdev->vqs + i);
874 }
875 fail:
876 r = -errno;
877 close(hdev->control);
878 return r;
879 }
880
881 void vhost_dev_cleanup(struct vhost_dev *hdev)
882 {
883 int i;
884 for (i = 0; i < hdev->nvqs; ++i) {
885 vhost_virtqueue_cleanup(hdev->vqs + i);
886 }
887 memory_listener_unregister(&hdev->memory_listener);
888 g_free(hdev->mem);
889 g_free(hdev->mem_sections);
890 close(hdev->control);
891 }
892
893 bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev)
894 {
895 return !vdev->binding->query_guest_notifiers ||
896 vdev->binding->query_guest_notifiers(vdev->binding_opaque) ||
897 hdev->force;
898 }
899
900 /* Stop processing guest IO notifications in qemu.
901 * Start processing them in vhost in kernel.
902 */
903 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
904 {
905 int i, r;
906 if (!vdev->binding->set_host_notifier) {
907 fprintf(stderr, "binding does not support host notifiers\n");
908 r = -ENOSYS;
909 goto fail;
910 }
911
912 for (i = 0; i < hdev->nvqs; ++i) {
913 r = vdev->binding->set_host_notifier(vdev->binding_opaque,
914 hdev->vq_index + i,
915 true);
916 if (r < 0) {
917 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
918 goto fail_vq;
919 }
920 }
921
922 return 0;
923 fail_vq:
924 while (--i >= 0) {
925 r = vdev->binding->set_host_notifier(vdev->binding_opaque,
926 hdev->vq_index + i,
927 false);
928 if (r < 0) {
929 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
930 fflush(stderr);
931 }
932 assert (r >= 0);
933 }
934 fail:
935 return r;
936 }
937
938 /* Stop processing guest IO notifications in vhost.
939 * Start processing them in qemu.
940 * This might actually run the qemu handlers right away,
941 * so virtio in qemu must be completely setup when this is called.
942 */
943 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
944 {
945 int i, r;
946
947 for (i = 0; i < hdev->nvqs; ++i) {
948 r = vdev->binding->set_host_notifier(vdev->binding_opaque,
949 hdev->vq_index + i,
950 false);
951 if (r < 0) {
952 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
953 fflush(stderr);
954 }
955 assert (r >= 0);
956 }
957 }
958
959 /* Test and clear event pending status.
960 * Should be called after unmask to avoid losing events.
961 */
962 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
963 {
964 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
965 assert(hdev->started);
966 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
967 return event_notifier_test_and_clear(&vq->masked_notifier);
968 }
969
970 /* Mask/unmask events from this vq. */
971 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
972 bool mask)
973 {
974 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
975 int r, index = n - hdev->vq_index;
976
977 assert(hdev->started);
978 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
979
980 struct vhost_vring_file file = {
981 .index = index
982 };
983 if (mask) {
984 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
985 } else {
986 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
987 }
988 r = ioctl(hdev->control, VHOST_SET_VRING_CALL, &file);
989 assert(r >= 0);
990 }
991
992 /* Host notifiers must be enabled at this point. */
993 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
994 {
995 int i, r;
996
997 hdev->started = true;
998
999 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1000 if (r < 0) {
1001 goto fail_features;
1002 }
1003 r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
1004 if (r < 0) {
1005 r = -errno;
1006 goto fail_mem;
1007 }
1008 for (i = 0; i < hdev->nvqs; ++i) {
1009 r = vhost_virtqueue_start(hdev,
1010 vdev,
1011 hdev->vqs + i,
1012 hdev->vq_index + i);
1013 if (r < 0) {
1014 goto fail_vq;
1015 }
1016 }
1017
1018 if (hdev->log_enabled) {
1019 hdev->log_size = vhost_get_log_size(hdev);
1020 hdev->log = hdev->log_size ?
1021 g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL;
1022 r = ioctl(hdev->control, VHOST_SET_LOG_BASE,
1023 (uint64_t)(unsigned long)hdev->log);
1024 if (r < 0) {
1025 r = -errno;
1026 goto fail_log;
1027 }
1028 }
1029
1030 return 0;
1031 fail_log:
1032 fail_vq:
1033 while (--i >= 0) {
1034 vhost_virtqueue_stop(hdev,
1035 vdev,
1036 hdev->vqs + i,
1037 hdev->vq_index + i);
1038 }
1039 i = hdev->nvqs;
1040 fail_mem:
1041 fail_features:
1042
1043 hdev->started = false;
1044 return r;
1045 }
1046
1047 /* Host notifiers must be enabled at this point. */
1048 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1049 {
1050 int i;
1051
1052 for (i = 0; i < hdev->nvqs; ++i) {
1053 vhost_virtqueue_stop(hdev,
1054 vdev,
1055 hdev->vqs + i,
1056 hdev->vq_index + i);
1057 }
1058 vhost_log_sync_range(hdev, 0, ~0x0ull);
1059
1060 hdev->started = false;
1061 g_free(hdev->log);
1062 hdev->log = NULL;
1063 hdev->log_size = 0;
1064 }
1065