4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
19 #include "qemu/range.h"
20 #include <linux/vhost.h>
21 #include "exec/address-spaces.h"
23 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
24 MemoryRegionSection
*section
,
25 uint64_t mfirst
, uint64_t mlast
,
26 uint64_t rfirst
, uint64_t rlast
)
28 uint64_t start
= MAX(mfirst
, rfirst
);
29 uint64_t end
= MIN(mlast
, rlast
);
30 vhost_log_chunk_t
*from
= dev
->log
+ start
/ VHOST_LOG_CHUNK
;
31 vhost_log_chunk_t
*to
= dev
->log
+ end
/ VHOST_LOG_CHUNK
+ 1;
32 uint64_t addr
= (start
/ VHOST_LOG_CHUNK
) * VHOST_LOG_CHUNK
;
37 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
38 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
40 for (;from
< to
; ++from
) {
41 vhost_log_chunk_t log
;
43 /* We first check with non-atomic: much cheaper,
44 * and we expect non-dirty to be the common case. */
46 addr
+= VHOST_LOG_CHUNK
;
49 /* Data must be read atomically. We don't really
50 * need the barrier semantics of __sync
51 * builtins, but it's easier to use them than
53 log
= __sync_fetch_and_and(from
, 0);
54 while ((bit
= sizeof(log
) > sizeof(int) ?
55 ffsll(log
) : ffs(log
))) {
57 hwaddr section_offset
;
60 page_addr
= addr
+ bit
* VHOST_LOG_PAGE
;
61 section_offset
= page_addr
- section
->offset_within_address_space
;
62 mr_offset
= section_offset
+ section
->offset_within_region
;
63 memory_region_set_dirty(section
->mr
, mr_offset
, VHOST_LOG_PAGE
);
64 log
&= ~(0x1ull
<< bit
);
66 addr
+= VHOST_LOG_CHUNK
;
70 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
71 MemoryRegionSection
*section
,
79 if (!dev
->log_enabled
|| !dev
->started
) {
82 start_addr
= section
->offset_within_address_space
;
83 end_addr
= range_get_last(start_addr
, section
->size
);
84 start_addr
= MAX(first
, start_addr
);
85 end_addr
= MIN(last
, end_addr
);
87 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
88 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
89 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
91 range_get_last(reg
->guest_phys_addr
,
94 for (i
= 0; i
< dev
->nvqs
; ++i
) {
95 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
96 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
97 range_get_last(vq
->used_phys
, vq
->used_size
));
102 static void vhost_log_sync(MemoryListener
*listener
,
103 MemoryRegionSection
*section
)
105 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
107 vhost_sync_dirty_bitmap(dev
, section
, 0x0, ~0x0ULL
);
110 static void vhost_log_sync_range(struct vhost_dev
*dev
,
111 hwaddr first
, hwaddr last
)
114 /* FIXME: this is N^2 in number of sections */
115 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
116 MemoryRegionSection
*section
= &dev
->mem_sections
[i
];
117 vhost_sync_dirty_bitmap(dev
, section
, first
, last
);
121 /* Assign/unassign. Keep an unsorted array of non-overlapping
122 * memory regions in dev->mem. */
123 static void vhost_dev_unassign_memory(struct vhost_dev
*dev
,
127 int from
, to
, n
= dev
->mem
->nregions
;
128 /* Track overlapping/split regions for sanity checking. */
129 int overlap_start
= 0, overlap_end
= 0, overlap_middle
= 0, split
= 0;
131 for (from
= 0, to
= 0; from
< n
; ++from
, ++to
) {
132 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
137 /* clone old region */
139 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
142 /* No overlap is simple */
143 if (!ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
148 /* Split only happens if supplied region
149 * is in the middle of an existing one. Thus it can not
150 * overlap with any other existing region. */
153 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
154 memlast
= range_get_last(start_addr
, size
);
156 /* Remove whole region */
157 if (start_addr
<= reg
->guest_phys_addr
&& memlast
>= reglast
) {
158 --dev
->mem
->nregions
;
165 if (memlast
>= reglast
) {
166 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
167 assert(reg
->memory_size
);
168 assert(!overlap_end
);
174 if (start_addr
<= reg
->guest_phys_addr
) {
175 change
= memlast
+ 1 - reg
->guest_phys_addr
;
176 reg
->memory_size
-= change
;
177 reg
->guest_phys_addr
+= change
;
178 reg
->userspace_addr
+= change
;
179 assert(reg
->memory_size
);
180 assert(!overlap_start
);
185 /* This only happens if supplied region
186 * is in the middle of an existing one. Thus it can not
187 * overlap with any other existing region. */
188 assert(!overlap_start
);
189 assert(!overlap_end
);
190 assert(!overlap_middle
);
191 /* Split region: shrink first part, shift second part. */
192 memcpy(dev
->mem
->regions
+ n
, reg
, sizeof *reg
);
193 reg
->memory_size
= start_addr
- reg
->guest_phys_addr
;
194 assert(reg
->memory_size
);
195 change
= memlast
+ 1 - reg
->guest_phys_addr
;
196 reg
= dev
->mem
->regions
+ n
;
197 reg
->memory_size
-= change
;
198 assert(reg
->memory_size
);
199 reg
->guest_phys_addr
+= change
;
200 reg
->userspace_addr
+= change
;
201 /* Never add more than 1 region */
202 assert(dev
->mem
->nregions
== n
);
203 ++dev
->mem
->nregions
;
208 /* Called after unassign, so no regions overlap the given range. */
209 static void vhost_dev_assign_memory(struct vhost_dev
*dev
,
215 struct vhost_memory_region
*merged
= NULL
;
216 for (from
= 0, to
= 0; from
< dev
->mem
->nregions
; ++from
, ++to
) {
217 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
218 uint64_t prlast
, urlast
;
219 uint64_t pmlast
, umlast
;
222 /* clone old region */
224 memcpy(reg
, dev
->mem
->regions
+ from
, sizeof *reg
);
226 prlast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
227 pmlast
= range_get_last(start_addr
, size
);
228 urlast
= range_get_last(reg
->userspace_addr
, reg
->memory_size
);
229 umlast
= range_get_last(uaddr
, size
);
231 /* check for overlapping regions: should never happen. */
232 assert(prlast
< start_addr
|| pmlast
< reg
->guest_phys_addr
);
233 /* Not an adjacent or overlapping region - do not merge. */
234 if ((prlast
+ 1 != start_addr
|| urlast
+ 1 != uaddr
) &&
235 (pmlast
+ 1 != reg
->guest_phys_addr
||
236 umlast
+ 1 != reg
->userspace_addr
)) {
246 u
= MIN(uaddr
, reg
->userspace_addr
);
247 s
= MIN(start_addr
, reg
->guest_phys_addr
);
248 e
= MAX(pmlast
, prlast
);
249 uaddr
= merged
->userspace_addr
= u
;
250 start_addr
= merged
->guest_phys_addr
= s
;
251 size
= merged
->memory_size
= e
- s
+ 1;
252 assert(merged
->memory_size
);
256 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ to
;
257 memset(reg
, 0, sizeof *reg
);
258 reg
->memory_size
= size
;
259 assert(reg
->memory_size
);
260 reg
->guest_phys_addr
= start_addr
;
261 reg
->userspace_addr
= uaddr
;
264 assert(to
<= dev
->mem
->nregions
+ 1);
265 dev
->mem
->nregions
= to
;
268 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
270 uint64_t log_size
= 0;
272 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
273 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
274 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
276 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
278 for (i
= 0; i
< dev
->nvqs
; ++i
) {
279 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
280 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
281 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
286 static inline void vhost_dev_log_resize(struct vhost_dev
* dev
, uint64_t size
)
288 vhost_log_chunk_t
*log
;
292 log
= g_malloc0(size
* sizeof *log
);
293 log_base
= (uint64_t)(unsigned long)log
;
294 r
= ioctl(dev
->control
, VHOST_SET_LOG_BASE
, &log_base
);
296 /* Sync only the range covered by the old log */
298 vhost_log_sync_range(dev
, 0, dev
->log_size
* VHOST_LOG_CHUNK
- 1);
304 dev
->log_size
= size
;
307 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
312 for (i
= 0; i
< dev
->nvqs
; ++i
) {
313 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
317 if (!ranges_overlap(start_addr
, size
, vq
->ring_phys
, vq
->ring_size
)) {
321 p
= cpu_physical_memory_map(vq
->ring_phys
, &l
, 1);
322 if (!p
|| l
!= vq
->ring_size
) {
323 fprintf(stderr
, "Unable to map ring buffer for ring %d\n", i
);
327 fprintf(stderr
, "Ring buffer relocated for ring %d\n", i
);
330 cpu_physical_memory_unmap(p
, l
, 0, 0);
335 static struct vhost_memory_region
*vhost_dev_find_reg(struct vhost_dev
*dev
,
339 int i
, n
= dev
->mem
->nregions
;
340 for (i
= 0; i
< n
; ++i
) {
341 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
342 if (ranges_overlap(reg
->guest_phys_addr
, reg
->memory_size
,
350 static bool vhost_dev_cmp_memory(struct vhost_dev
*dev
,
355 struct vhost_memory_region
*reg
= vhost_dev_find_reg(dev
, start_addr
, size
);
363 reglast
= range_get_last(reg
->guest_phys_addr
, reg
->memory_size
);
364 memlast
= range_get_last(start_addr
, size
);
366 /* Need to extend region? */
367 if (start_addr
< reg
->guest_phys_addr
|| memlast
> reglast
) {
370 /* userspace_addr changed? */
371 return uaddr
!= reg
->userspace_addr
+ start_addr
- reg
->guest_phys_addr
;
374 static void vhost_set_memory(MemoryListener
*listener
,
375 MemoryRegionSection
*section
,
378 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
380 hwaddr start_addr
= section
->offset_within_address_space
;
381 ram_addr_t size
= section
->size
;
382 bool log_dirty
= memory_region_is_logging(section
->mr
);
383 int s
= offsetof(struct vhost_memory
, regions
) +
384 (dev
->mem
->nregions
+ 1) * sizeof dev
->mem
->regions
[0];
387 dev
->mem
= g_realloc(dev
->mem
, s
);
395 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
396 ram
= memory_region_get_ram_ptr(section
->mr
) + section
->offset_within_region
;
398 if (!vhost_dev_cmp_memory(dev
, start_addr
, size
, (uintptr_t)ram
)) {
399 /* Region exists with same address. Nothing to do. */
403 if (!vhost_dev_find_reg(dev
, start_addr
, size
)) {
404 /* Removing region that we don't access. Nothing to do. */
409 vhost_dev_unassign_memory(dev
, start_addr
, size
);
411 /* Add given mapping, merging adjacent regions if any */
412 vhost_dev_assign_memory(dev
, start_addr
, size
, (uintptr_t)ram
);
414 /* Remove old mapping for this memory, if any. */
415 vhost_dev_unassign_memory(dev
, start_addr
, size
);
417 dev
->mem_changed_start_addr
= MIN(dev
->mem_changed_start_addr
, start_addr
);
418 dev
->mem_changed_end_addr
= MAX(dev
->mem_changed_end_addr
, start_addr
+ size
- 1);
419 dev
->memory_changed
= true;
422 static bool vhost_section(MemoryRegionSection
*section
)
424 return memory_region_is_ram(section
->mr
);
427 static void vhost_begin(MemoryListener
*listener
)
429 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
431 dev
->mem_changed_end_addr
= 0;
432 dev
->mem_changed_start_addr
= -1;
435 static void vhost_commit(MemoryListener
*listener
)
437 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
439 hwaddr start_addr
= 0;
444 if (!dev
->memory_changed
) {
450 if (dev
->mem_changed_start_addr
> dev
->mem_changed_end_addr
) {
455 start_addr
= dev
->mem_changed_start_addr
;
456 size
= dev
->mem_changed_end_addr
- dev
->mem_changed_start_addr
+ 1;
458 r
= vhost_verify_ring_mappings(dev
, start_addr
, size
);
462 if (!dev
->log_enabled
) {
463 r
= ioctl(dev
->control
, VHOST_SET_MEM_TABLE
, dev
->mem
);
465 dev
->memory_changed
= false;
468 log_size
= vhost_get_log_size(dev
);
469 /* We allocate an extra 4K bytes to log,
470 * to reduce the * number of reallocations. */
471 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
472 /* To log more, must increase log size before table update. */
473 if (dev
->log_size
< log_size
) {
474 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
476 r
= ioctl(dev
->control
, VHOST_SET_MEM_TABLE
, dev
->mem
);
478 /* To log less, can only decrease log size after table update. */
479 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
480 vhost_dev_log_resize(dev
, log_size
);
482 dev
->memory_changed
= false;
485 static void vhost_region_add(MemoryListener
*listener
,
486 MemoryRegionSection
*section
)
488 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
491 if (!vhost_section(section
)) {
495 ++dev
->n_mem_sections
;
496 dev
->mem_sections
= g_renew(MemoryRegionSection
, dev
->mem_sections
,
497 dev
->n_mem_sections
);
498 dev
->mem_sections
[dev
->n_mem_sections
- 1] = *section
;
499 vhost_set_memory(listener
, section
, true);
502 static void vhost_region_del(MemoryListener
*listener
,
503 MemoryRegionSection
*section
)
505 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
509 if (!vhost_section(section
)) {
513 vhost_set_memory(listener
, section
, false);
514 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
515 if (dev
->mem_sections
[i
].offset_within_address_space
516 == section
->offset_within_address_space
) {
517 --dev
->n_mem_sections
;
518 memmove(&dev
->mem_sections
[i
], &dev
->mem_sections
[i
+1],
519 (dev
->n_mem_sections
- i
) * sizeof(*dev
->mem_sections
));
525 static void vhost_region_nop(MemoryListener
*listener
,
526 MemoryRegionSection
*section
)
530 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
531 struct vhost_virtqueue
*vq
,
532 unsigned idx
, bool enable_log
)
534 struct vhost_vring_addr addr
= {
536 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
537 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
538 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
539 .log_guest_addr
= vq
->used_phys
,
540 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
542 int r
= ioctl(dev
->control
, VHOST_SET_VRING_ADDR
, &addr
);
549 static int vhost_dev_set_features(struct vhost_dev
*dev
, bool enable_log
)
551 uint64_t features
= dev
->acked_features
;
554 features
|= 0x1 << VHOST_F_LOG_ALL
;
556 r
= ioctl(dev
->control
, VHOST_SET_FEATURES
, &features
);
557 return r
< 0 ? -errno
: 0;
560 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
563 r
= vhost_dev_set_features(dev
, enable_log
);
567 for (i
= 0; i
< dev
->nvqs
; ++i
) {
568 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
576 for (; i
>= 0; --i
) {
577 t
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, i
,
581 t
= vhost_dev_set_features(dev
, dev
->log_enabled
);
587 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
589 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
592 if (!!enable
== dev
->log_enabled
) {
596 dev
->log_enabled
= enable
;
600 r
= vhost_dev_set_log(dev
, false);
610 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
611 r
= vhost_dev_set_log(dev
, true);
616 dev
->log_enabled
= enable
;
620 static void vhost_log_global_start(MemoryListener
*listener
)
624 r
= vhost_migration_log(listener
, true);
630 static void vhost_log_global_stop(MemoryListener
*listener
)
634 r
= vhost_migration_log(listener
, false);
640 static void vhost_log_start(MemoryListener
*listener
,
641 MemoryRegionSection
*section
)
643 /* FIXME: implement */
646 static void vhost_log_stop(MemoryListener
*listener
,
647 MemoryRegionSection
*section
)
649 /* FIXME: implement */
652 static int vhost_virtqueue_start(struct vhost_dev
*dev
,
653 struct VirtIODevice
*vdev
,
654 struct vhost_virtqueue
*vq
,
659 int vhost_vq_index
= idx
- dev
->vq_index
;
660 struct vhost_vring_file file
= {
661 .index
= vhost_vq_index
663 struct vhost_vring_state state
= {
664 .index
= vhost_vq_index
666 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
668 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
670 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
671 r
= ioctl(dev
->control
, VHOST_SET_VRING_NUM
, &state
);
676 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
677 r
= ioctl(dev
->control
, VHOST_SET_VRING_BASE
, &state
);
682 s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
683 a
= virtio_queue_get_desc_addr(vdev
, idx
);
684 vq
->desc
= cpu_physical_memory_map(a
, &l
, 0);
685 if (!vq
->desc
|| l
!= s
) {
687 goto fail_alloc_desc
;
689 s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
690 a
= virtio_queue_get_avail_addr(vdev
, idx
);
691 vq
->avail
= cpu_physical_memory_map(a
, &l
, 0);
692 if (!vq
->avail
|| l
!= s
) {
694 goto fail_alloc_avail
;
696 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
697 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
698 vq
->used
= cpu_physical_memory_map(a
, &l
, 1);
699 if (!vq
->used
|| l
!= s
) {
701 goto fail_alloc_used
;
704 vq
->ring_size
= s
= l
= virtio_queue_get_ring_size(vdev
, idx
);
705 vq
->ring_phys
= a
= virtio_queue_get_ring_addr(vdev
, idx
);
706 vq
->ring
= cpu_physical_memory_map(a
, &l
, 1);
707 if (!vq
->ring
|| l
!= s
) {
709 goto fail_alloc_ring
;
712 r
= vhost_virtqueue_set_addr(dev
, vq
, vhost_vq_index
, dev
->log_enabled
);
718 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
719 r
= ioctl(dev
->control
, VHOST_SET_VRING_KICK
, &file
);
725 /* Clear and discard previous events if any. */
726 event_notifier_test_and_clear(&vq
->masked_notifier
);
732 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
735 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
738 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
741 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
747 static void vhost_virtqueue_stop(struct vhost_dev
*dev
,
748 struct VirtIODevice
*vdev
,
749 struct vhost_virtqueue
*vq
,
752 struct vhost_vring_state state
= {
753 .index
= idx
- dev
->vq_index
756 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
757 r
= ioctl(dev
->control
, VHOST_GET_VRING_BASE
, &state
);
759 fprintf(stderr
, "vhost VQ %d ring restore failed: %d\n", idx
, r
);
762 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
764 cpu_physical_memory_unmap(vq
->ring
, virtio_queue_get_ring_size(vdev
, idx
),
765 0, virtio_queue_get_ring_size(vdev
, idx
));
766 cpu_physical_memory_unmap(vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
767 1, virtio_queue_get_used_size(vdev
, idx
));
768 cpu_physical_memory_unmap(vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
769 0, virtio_queue_get_avail_size(vdev
, idx
));
770 cpu_physical_memory_unmap(vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
771 0, virtio_queue_get_desc_size(vdev
, idx
));
774 static void vhost_eventfd_add(MemoryListener
*listener
,
775 MemoryRegionSection
*section
,
776 bool match_data
, uint64_t data
, EventNotifier
*e
)
780 static void vhost_eventfd_del(MemoryListener
*listener
,
781 MemoryRegionSection
*section
,
782 bool match_data
, uint64_t data
, EventNotifier
*e
)
786 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
787 struct vhost_virtqueue
*vq
, int n
)
789 struct vhost_vring_file file
= {
792 int r
= event_notifier_init(&vq
->masked_notifier
, 0);
797 file
.fd
= event_notifier_get_fd(&vq
->masked_notifier
);
798 r
= ioctl(dev
->control
, VHOST_SET_VRING_CALL
, &file
);
805 event_notifier_cleanup(&vq
->masked_notifier
);
809 static void vhost_virtqueue_cleanup(struct vhost_virtqueue
*vq
)
811 event_notifier_cleanup(&vq
->masked_notifier
);
814 int vhost_dev_init(struct vhost_dev
*hdev
, int devfd
, const char *devpath
,
820 hdev
->control
= devfd
;
822 hdev
->control
= open(devpath
, O_RDWR
);
823 if (hdev
->control
< 0) {
827 r
= ioctl(hdev
->control
, VHOST_SET_OWNER
, NULL
);
832 r
= ioctl(hdev
->control
, VHOST_GET_FEATURES
, &features
);
837 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
838 r
= vhost_virtqueue_init(hdev
, hdev
->vqs
+ i
, i
);
843 hdev
->features
= features
;
845 hdev
->memory_listener
= (MemoryListener
) {
846 .begin
= vhost_begin
,
847 .commit
= vhost_commit
,
848 .region_add
= vhost_region_add
,
849 .region_del
= vhost_region_del
,
850 .region_nop
= vhost_region_nop
,
851 .log_start
= vhost_log_start
,
852 .log_stop
= vhost_log_stop
,
853 .log_sync
= vhost_log_sync
,
854 .log_global_start
= vhost_log_global_start
,
855 .log_global_stop
= vhost_log_global_stop
,
856 .eventfd_add
= vhost_eventfd_add
,
857 .eventfd_del
= vhost_eventfd_del
,
860 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
861 hdev
->n_mem_sections
= 0;
862 hdev
->mem_sections
= NULL
;
865 hdev
->log_enabled
= false;
866 hdev
->started
= false;
867 hdev
->memory_changed
= false;
868 memory_listener_register(&hdev
->memory_listener
, &address_space_memory
);
873 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
877 close(hdev
->control
);
881 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
884 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
885 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
887 memory_listener_unregister(&hdev
->memory_listener
);
889 g_free(hdev
->mem_sections
);
890 close(hdev
->control
);
893 bool vhost_dev_query(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
895 return !vdev
->binding
->query_guest_notifiers
||
896 vdev
->binding
->query_guest_notifiers(vdev
->binding_opaque
) ||
900 /* Stop processing guest IO notifications in qemu.
901 * Start processing them in vhost in kernel.
903 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
906 if (!vdev
->binding
->set_host_notifier
) {
907 fprintf(stderr
, "binding does not support host notifiers\n");
912 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
913 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
,
917 fprintf(stderr
, "vhost VQ %d notifier binding failed: %d\n", i
, -r
);
925 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
,
929 fprintf(stderr
, "vhost VQ %d notifier cleanup error: %d\n", i
, -r
);
938 /* Stop processing guest IO notifications in vhost.
939 * Start processing them in qemu.
940 * This might actually run the qemu handlers right away,
941 * so virtio in qemu must be completely setup when this is called.
943 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
947 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
948 r
= vdev
->binding
->set_host_notifier(vdev
->binding_opaque
,
952 fprintf(stderr
, "vhost VQ %d notifier cleanup failed: %d\n", i
, -r
);
959 /* Test and clear event pending status.
960 * Should be called after unmask to avoid losing events.
962 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
)
964 struct vhost_virtqueue
*vq
= hdev
->vqs
+ n
- hdev
->vq_index
;
965 assert(hdev
->started
);
966 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
967 return event_notifier_test_and_clear(&vq
->masked_notifier
);
970 /* Mask/unmask events from this vq. */
971 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
974 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, n
);
975 int r
, index
= n
- hdev
->vq_index
;
977 assert(hdev
->started
);
978 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
980 struct vhost_vring_file file
= {
984 file
.fd
= event_notifier_get_fd(&hdev
->vqs
[index
].masked_notifier
);
986 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
988 r
= ioctl(hdev
->control
, VHOST_SET_VRING_CALL
, &file
);
992 /* Host notifiers must be enabled at this point. */
993 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
997 hdev
->started
= true;
999 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
1003 r
= ioctl(hdev
->control
, VHOST_SET_MEM_TABLE
, hdev
->mem
);
1008 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1009 r
= vhost_virtqueue_start(hdev
,
1012 hdev
->vq_index
+ i
);
1018 if (hdev
->log_enabled
) {
1019 hdev
->log_size
= vhost_get_log_size(hdev
);
1020 hdev
->log
= hdev
->log_size
?
1021 g_malloc0(hdev
->log_size
* sizeof *hdev
->log
) : NULL
;
1022 r
= ioctl(hdev
->control
, VHOST_SET_LOG_BASE
,
1023 (uint64_t)(unsigned long)hdev
->log
);
1034 vhost_virtqueue_stop(hdev
,
1037 hdev
->vq_index
+ i
);
1043 hdev
->started
= false;
1047 /* Host notifiers must be enabled at this point. */
1048 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1052 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1053 vhost_virtqueue_stop(hdev
,
1056 hdev
->vq_index
+ i
);
1058 vhost_log_sync_range(hdev
, 0, ~0x0ull
);
1060 hdev
->started
= false;