4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/blocker.h"
29 #include "sysemu/dma.h"
32 /* enabled until disconnected backend stabilizes */
33 #define _VHOST_DEBUG 1
36 #define VHOST_OPS_DEBUG(fmt, ...) \
37 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38 strerror(errno), errno); } while (0)
40 #define VHOST_OPS_DEBUG(fmt, ...) \
44 static struct vhost_log
*vhost_log
;
45 static struct vhost_log
*vhost_log_shm
;
47 static unsigned int used_memslots
;
48 static QLIST_HEAD(, vhost_dev
) vhost_devices
=
49 QLIST_HEAD_INITIALIZER(vhost_devices
);
51 bool vhost_has_free_slot(void)
53 unsigned int slots_limit
= ~0U;
54 struct vhost_dev
*hdev
;
56 QLIST_FOREACH(hdev
, &vhost_devices
, entry
) {
57 unsigned int r
= hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
);
58 slots_limit
= MIN(slots_limit
, r
);
60 return slots_limit
> used_memslots
;
63 static void vhost_dev_sync_region(struct vhost_dev
*dev
,
64 MemoryRegionSection
*section
,
65 uint64_t mfirst
, uint64_t mlast
,
66 uint64_t rfirst
, uint64_t rlast
)
68 vhost_log_chunk_t
*log
= dev
->log
->log
;
70 uint64_t start
= MAX(mfirst
, rfirst
);
71 uint64_t end
= MIN(mlast
, rlast
);
72 vhost_log_chunk_t
*from
= log
+ start
/ VHOST_LOG_CHUNK
;
73 vhost_log_chunk_t
*to
= log
+ end
/ VHOST_LOG_CHUNK
+ 1;
74 uint64_t addr
= QEMU_ALIGN_DOWN(start
, VHOST_LOG_CHUNK
);
79 assert(end
/ VHOST_LOG_CHUNK
< dev
->log_size
);
80 assert(start
/ VHOST_LOG_CHUNK
< dev
->log_size
);
82 for (;from
< to
; ++from
) {
83 vhost_log_chunk_t log
;
84 /* We first check with non-atomic: much cheaper,
85 * and we expect non-dirty to be the common case. */
87 addr
+= VHOST_LOG_CHUNK
;
90 /* Data must be read atomically. We don't really need barrier semantics
91 * but it's easier to use atomic_* than roll our own. */
92 log
= atomic_xchg(from
, 0);
96 hwaddr section_offset
;
98 page_addr
= addr
+ bit
* VHOST_LOG_PAGE
;
99 section_offset
= page_addr
- section
->offset_within_address_space
;
100 mr_offset
= section_offset
+ section
->offset_within_region
;
101 memory_region_set_dirty(section
->mr
, mr_offset
, VHOST_LOG_PAGE
);
102 log
&= ~(0x1ull
<< bit
);
104 addr
+= VHOST_LOG_CHUNK
;
108 static int vhost_sync_dirty_bitmap(struct vhost_dev
*dev
,
109 MemoryRegionSection
*section
,
117 if (!dev
->log_enabled
|| !dev
->started
) {
120 start_addr
= section
->offset_within_address_space
;
121 end_addr
= range_get_last(start_addr
, int128_get64(section
->size
));
122 start_addr
= MAX(first
, start_addr
);
123 end_addr
= MIN(last
, end_addr
);
125 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
126 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
127 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
,
128 reg
->guest_phys_addr
,
129 range_get_last(reg
->guest_phys_addr
,
132 for (i
= 0; i
< dev
->nvqs
; ++i
) {
133 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
134 vhost_dev_sync_region(dev
, section
, start_addr
, end_addr
, vq
->used_phys
,
135 range_get_last(vq
->used_phys
, vq
->used_size
));
140 static void vhost_log_sync(MemoryListener
*listener
,
141 MemoryRegionSection
*section
)
143 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
145 vhost_sync_dirty_bitmap(dev
, section
, 0x0, ~0x0ULL
);
148 static void vhost_log_sync_range(struct vhost_dev
*dev
,
149 hwaddr first
, hwaddr last
)
152 /* FIXME: this is N^2 in number of sections */
153 for (i
= 0; i
< dev
->n_mem_sections
; ++i
) {
154 MemoryRegionSection
*section
= &dev
->mem_sections
[i
];
155 vhost_sync_dirty_bitmap(dev
, section
, first
, last
);
159 static uint64_t vhost_get_log_size(struct vhost_dev
*dev
)
161 uint64_t log_size
= 0;
163 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
164 struct vhost_memory_region
*reg
= dev
->mem
->regions
+ i
;
165 uint64_t last
= range_get_last(reg
->guest_phys_addr
,
167 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
169 for (i
= 0; i
< dev
->nvqs
; ++i
) {
170 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
171 uint64_t last
= vq
->used_phys
+ vq
->used_size
- 1;
172 log_size
= MAX(log_size
, last
/ VHOST_LOG_CHUNK
+ 1);
177 static struct vhost_log
*vhost_log_alloc(uint64_t size
, bool share
)
180 struct vhost_log
*log
;
181 uint64_t logsize
= size
* sizeof(*(log
->log
));
184 log
= g_new0(struct vhost_log
, 1);
186 log
->log
= qemu_memfd_alloc("vhost-log", logsize
,
187 F_SEAL_GROW
| F_SEAL_SHRINK
| F_SEAL_SEAL
,
190 error_report_err(err
);
194 memset(log
->log
, 0, logsize
);
196 log
->log
= g_malloc0(logsize
);
206 static struct vhost_log
*vhost_log_get(uint64_t size
, bool share
)
208 struct vhost_log
*log
= share
? vhost_log_shm
: vhost_log
;
210 if (!log
|| log
->size
!= size
) {
211 log
= vhost_log_alloc(size
, share
);
224 static void vhost_log_put(struct vhost_dev
*dev
, bool sync
)
226 struct vhost_log
*log
= dev
->log
;
233 if (log
->refcnt
== 0) {
234 /* Sync only the range covered by the old log */
235 if (dev
->log_size
&& sync
) {
236 vhost_log_sync_range(dev
, 0, dev
->log_size
* VHOST_LOG_CHUNK
- 1);
239 if (vhost_log
== log
) {
242 } else if (vhost_log_shm
== log
) {
243 qemu_memfd_free(log
->log
, log
->size
* sizeof(*(log
->log
)),
245 vhost_log_shm
= NULL
;
255 static bool vhost_dev_log_is_shared(struct vhost_dev
*dev
)
257 return dev
->vhost_ops
->vhost_requires_shm_log
&&
258 dev
->vhost_ops
->vhost_requires_shm_log(dev
);
261 static inline void vhost_dev_log_resize(struct vhost_dev
*dev
, uint64_t size
)
263 struct vhost_log
*log
= vhost_log_get(size
, vhost_dev_log_is_shared(dev
));
264 uint64_t log_base
= (uintptr_t)log
->log
;
267 /* inform backend of log switching, this must be done before
268 releasing the current log, to ensure no logging is lost */
269 r
= dev
->vhost_ops
->vhost_set_log_base(dev
, log_base
, log
);
271 VHOST_OPS_DEBUG("vhost_set_log_base failed");
274 vhost_log_put(dev
, true);
276 dev
->log_size
= size
;
279 static int vhost_dev_has_iommu(struct vhost_dev
*dev
)
281 VirtIODevice
*vdev
= dev
->vdev
;
283 return virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
);
286 static void *vhost_memory_map(struct vhost_dev
*dev
, hwaddr addr
,
287 hwaddr
*plen
, int is_write
)
289 if (!vhost_dev_has_iommu(dev
)) {
290 return cpu_physical_memory_map(addr
, plen
, is_write
);
292 return (void *)(uintptr_t)addr
;
296 static void vhost_memory_unmap(struct vhost_dev
*dev
, void *buffer
,
297 hwaddr len
, int is_write
,
300 if (!vhost_dev_has_iommu(dev
)) {
301 cpu_physical_memory_unmap(buffer
, len
, is_write
, access_len
);
305 static int vhost_verify_ring_part_mapping(void *ring_hva
,
312 uint64_t hva_ring_offset
;
313 uint64_t ring_last
= range_get_last(ring_gpa
, ring_size
);
314 uint64_t reg_last
= range_get_last(reg_gpa
, reg_size
);
316 if (ring_last
< reg_gpa
|| ring_gpa
> reg_last
) {
319 /* check that whole ring's is mapped */
320 if (ring_last
> reg_last
) {
323 /* check that ring's MemoryRegion wasn't replaced */
324 hva_ring_offset
= ring_gpa
- reg_gpa
;
325 if (ring_hva
!= reg_hva
+ hva_ring_offset
) {
332 static int vhost_verify_ring_mappings(struct vhost_dev
*dev
,
339 const char *part_name
[] = {
345 for (i
= 0; i
< dev
->nvqs
; ++i
) {
346 struct vhost_virtqueue
*vq
= dev
->vqs
+ i
;
348 if (vq
->desc_phys
== 0) {
353 r
= vhost_verify_ring_part_mapping(
354 vq
->desc
, vq
->desc_phys
, vq
->desc_size
,
355 reg_hva
, reg_gpa
, reg_size
);
361 r
= vhost_verify_ring_part_mapping(
362 vq
->desc
, vq
->desc_phys
, vq
->desc_size
,
363 reg_hva
, reg_gpa
, reg_size
);
369 r
= vhost_verify_ring_part_mapping(
370 vq
->desc
, vq
->desc_phys
, vq
->desc_size
,
371 reg_hva
, reg_gpa
, reg_size
);
378 error_report("Unable to map %s for ring %d", part_name
[j
], i
);
379 } else if (r
== -EBUSY
) {
380 error_report("%s relocated for ring %d", part_name
[j
], i
);
385 static bool vhost_section(MemoryRegionSection
*section
)
388 bool log_dirty
= memory_region_get_dirty_log_mask(section
->mr
) &
389 ~(1 << DIRTY_MEMORY_MIGRATION
);
390 result
= memory_region_is_ram(section
->mr
) &&
391 !memory_region_is_rom(section
->mr
);
393 /* Vhost doesn't handle any block which is doing dirty-tracking other
394 * than migration; this typically fires on VGA areas.
396 result
&= !log_dirty
;
398 trace_vhost_section(section
->mr
->name
, result
);
402 static void vhost_begin(MemoryListener
*listener
)
404 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
406 dev
->tmp_sections
= NULL
;
407 dev
->n_tmp_sections
= 0;
410 static void vhost_commit(MemoryListener
*listener
)
412 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
414 MemoryRegionSection
*old_sections
;
420 bool changed
= false;
422 /* Note we can be called before the device is started, but then
423 * starting the device calls set_mem_table, so we need to have
424 * built the data structures.
426 old_sections
= dev
->mem_sections
;
427 n_old_sections
= dev
->n_mem_sections
;
428 dev
->mem_sections
= dev
->tmp_sections
;
429 dev
->n_mem_sections
= dev
->n_tmp_sections
;
431 if (dev
->n_mem_sections
!= n_old_sections
) {
434 /* Same size, lets check the contents */
435 changed
= n_old_sections
&& memcmp(dev
->mem_sections
, old_sections
,
436 n_old_sections
* sizeof(old_sections
[0])) != 0;
439 trace_vhost_commit(dev
->started
, changed
);
444 /* Rebuild the regions list from the new sections list */
445 regions_size
= offsetof(struct vhost_memory
, regions
) +
446 dev
->n_mem_sections
* sizeof dev
->mem
->regions
[0];
447 dev
->mem
= g_realloc(dev
->mem
, regions_size
);
448 dev
->mem
->nregions
= dev
->n_mem_sections
;
449 used_memslots
= dev
->mem
->nregions
;
450 for (i
= 0; i
< dev
->n_mem_sections
; i
++) {
451 struct vhost_memory_region
*cur_vmr
= dev
->mem
->regions
+ i
;
452 struct MemoryRegionSection
*mrs
= dev
->mem_sections
+ i
;
454 cur_vmr
->guest_phys_addr
= mrs
->offset_within_address_space
;
455 cur_vmr
->memory_size
= int128_get64(mrs
->size
);
456 cur_vmr
->userspace_addr
=
457 (uintptr_t)memory_region_get_ram_ptr(mrs
->mr
) +
458 mrs
->offset_within_region
;
459 cur_vmr
->flags_padding
= 0;
466 for (i
= 0; i
< dev
->mem
->nregions
; i
++) {
467 if (vhost_verify_ring_mappings(dev
,
468 (void *)(uintptr_t)dev
->mem
->regions
[i
].userspace_addr
,
469 dev
->mem
->regions
[i
].guest_phys_addr
,
470 dev
->mem
->regions
[i
].memory_size
)) {
471 error_report("Verify ring failure on region %d", i
);
476 if (!dev
->log_enabled
) {
477 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
479 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
483 log_size
= vhost_get_log_size(dev
);
484 /* We allocate an extra 4K bytes to log,
485 * to reduce the * number of reallocations. */
486 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
487 /* To log more, must increase log size before table update. */
488 if (dev
->log_size
< log_size
) {
489 vhost_dev_log_resize(dev
, log_size
+ VHOST_LOG_BUFFER
);
491 r
= dev
->vhost_ops
->vhost_set_mem_table(dev
, dev
->mem
);
493 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
495 /* To log less, can only decrease log size after table update. */
496 if (dev
->log_size
> log_size
+ VHOST_LOG_BUFFER
) {
497 vhost_dev_log_resize(dev
, log_size
);
501 /* Deref the old list of sections, this must happen _after_ the
502 * vhost_set_mem_table to ensure the client isn't still using the
503 * section we're about to unref.
505 while (n_old_sections
--) {
506 memory_region_unref(old_sections
[n_old_sections
].mr
);
508 g_free(old_sections
);
512 /* Adds the section data to the tmp_section structure.
513 * It relies on the listener calling us in memory address order
514 * and for each region (via the _add and _nop methods) to
517 static void vhost_region_add_section(struct vhost_dev
*dev
,
518 MemoryRegionSection
*section
)
520 bool need_add
= true;
521 uint64_t mrs_size
= int128_get64(section
->size
);
522 uint64_t mrs_gpa
= section
->offset_within_address_space
;
523 uintptr_t mrs_host
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) +
524 section
->offset_within_region
;
526 trace_vhost_region_add_section(section
->mr
->name
, mrs_gpa
, mrs_size
,
529 if (dev
->n_tmp_sections
) {
530 /* Since we already have at least one section, lets see if
531 * this extends it; since we're scanning in order, we only
532 * have to look at the last one, and the FlatView that calls
533 * us shouldn't have overlaps.
535 MemoryRegionSection
*prev_sec
= dev
->tmp_sections
+
536 (dev
->n_tmp_sections
- 1);
537 uint64_t prev_gpa_start
= prev_sec
->offset_within_address_space
;
538 uint64_t prev_size
= int128_get64(prev_sec
->size
);
539 uint64_t prev_gpa_end
= range_get_last(prev_gpa_start
, prev_size
);
540 uint64_t prev_host_start
=
541 (uintptr_t)memory_region_get_ram_ptr(prev_sec
->mr
) +
542 prev_sec
->offset_within_region
;
543 uint64_t prev_host_end
= range_get_last(prev_host_start
, prev_size
);
545 if (prev_gpa_end
+ 1 == mrs_gpa
&&
546 prev_host_end
+ 1 == mrs_host
&&
547 section
->mr
== prev_sec
->mr
&&
548 (!dev
->vhost_ops
->vhost_backend_can_merge
||
549 dev
->vhost_ops
->vhost_backend_can_merge(dev
,
551 prev_host_start
, prev_size
))) {
552 /* The two sections abut */
554 prev_sec
->size
= int128_add(prev_sec
->size
, section
->size
);
555 trace_vhost_region_add_section_abut(section
->mr
->name
,
556 mrs_size
+ prev_size
);
561 ++dev
->n_tmp_sections
;
562 dev
->tmp_sections
= g_renew(MemoryRegionSection
, dev
->tmp_sections
,
563 dev
->n_tmp_sections
);
564 dev
->tmp_sections
[dev
->n_tmp_sections
- 1] = *section
;
565 /* The flatview isn't stable and we don't use it, making it NULL
566 * means we can memcmp the list.
568 dev
->tmp_sections
[dev
->n_tmp_sections
- 1].fv
= NULL
;
569 memory_region_ref(section
->mr
);
573 /* Used for both add and nop callbacks */
574 static void vhost_region_addnop(MemoryListener
*listener
,
575 MemoryRegionSection
*section
)
577 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
580 if (!vhost_section(section
)) {
583 vhost_region_add_section(dev
, section
);
586 static void vhost_iommu_unmap_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
588 struct vhost_iommu
*iommu
= container_of(n
, struct vhost_iommu
, n
);
589 struct vhost_dev
*hdev
= iommu
->hdev
;
590 hwaddr iova
= iotlb
->iova
+ iommu
->iommu_offset
;
592 if (vhost_backend_invalidate_device_iotlb(hdev
, iova
,
593 iotlb
->addr_mask
+ 1)) {
594 error_report("Fail to invalidate device iotlb");
598 static void vhost_iommu_region_add(MemoryListener
*listener
,
599 MemoryRegionSection
*section
)
601 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
603 struct vhost_iommu
*iommu
;
606 if (!memory_region_is_iommu(section
->mr
)) {
610 iommu
= g_malloc0(sizeof(*iommu
));
611 end
= int128_add(int128_make64(section
->offset_within_region
),
613 end
= int128_sub(end
, int128_one());
614 iommu_notifier_init(&iommu
->n
, vhost_iommu_unmap_notify
,
615 IOMMU_NOTIFIER_UNMAP
,
616 section
->offset_within_region
,
618 iommu
->mr
= section
->mr
;
619 iommu
->iommu_offset
= section
->offset_within_address_space
-
620 section
->offset_within_region
;
622 memory_region_register_iommu_notifier(section
->mr
, &iommu
->n
);
623 QLIST_INSERT_HEAD(&dev
->iommu_list
, iommu
, iommu_next
);
624 /* TODO: can replay help performance here? */
627 static void vhost_iommu_region_del(MemoryListener
*listener
,
628 MemoryRegionSection
*section
)
630 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
632 struct vhost_iommu
*iommu
;
634 if (!memory_region_is_iommu(section
->mr
)) {
638 QLIST_FOREACH(iommu
, &dev
->iommu_list
, iommu_next
) {
639 if (iommu
->mr
== section
->mr
&&
640 iommu
->n
.start
== section
->offset_within_region
) {
641 memory_region_unregister_iommu_notifier(iommu
->mr
,
643 QLIST_REMOVE(iommu
, iommu_next
);
650 static int vhost_virtqueue_set_addr(struct vhost_dev
*dev
,
651 struct vhost_virtqueue
*vq
,
652 unsigned idx
, bool enable_log
)
654 struct vhost_vring_addr addr
= {
656 .desc_user_addr
= (uint64_t)(unsigned long)vq
->desc
,
657 .avail_user_addr
= (uint64_t)(unsigned long)vq
->avail
,
658 .used_user_addr
= (uint64_t)(unsigned long)vq
->used
,
659 .log_guest_addr
= vq
->used_phys
,
660 .flags
= enable_log
? (1 << VHOST_VRING_F_LOG
) : 0,
662 int r
= dev
->vhost_ops
->vhost_set_vring_addr(dev
, &addr
);
664 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
670 static int vhost_dev_set_features(struct vhost_dev
*dev
,
673 uint64_t features
= dev
->acked_features
;
676 features
|= 0x1ULL
<< VHOST_F_LOG_ALL
;
678 r
= dev
->vhost_ops
->vhost_set_features(dev
, features
);
680 VHOST_OPS_DEBUG("vhost_set_features failed");
682 return r
< 0 ? -errno
: 0;
685 static int vhost_dev_set_log(struct vhost_dev
*dev
, bool enable_log
)
688 r
= vhost_dev_set_features(dev
, enable_log
);
692 for (i
= 0; i
< dev
->nvqs
; ++i
) {
693 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
694 r
= vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
702 for (; i
>= 0; --i
) {
703 idx
= dev
->vhost_ops
->vhost_get_vq_index(dev
, dev
->vq_index
+ i
);
704 vhost_virtqueue_set_addr(dev
, dev
->vqs
+ i
, idx
,
707 vhost_dev_set_features(dev
, dev
->log_enabled
);
712 static int vhost_migration_log(MemoryListener
*listener
, int enable
)
714 struct vhost_dev
*dev
= container_of(listener
, struct vhost_dev
,
717 if (!!enable
== dev
->log_enabled
) {
721 dev
->log_enabled
= enable
;
725 r
= vhost_dev_set_log(dev
, false);
729 vhost_log_put(dev
, false);
731 vhost_dev_log_resize(dev
, vhost_get_log_size(dev
));
732 r
= vhost_dev_set_log(dev
, true);
737 dev
->log_enabled
= enable
;
741 static void vhost_log_global_start(MemoryListener
*listener
)
745 r
= vhost_migration_log(listener
, true);
751 static void vhost_log_global_stop(MemoryListener
*listener
)
755 r
= vhost_migration_log(listener
, false);
761 static void vhost_log_start(MemoryListener
*listener
,
762 MemoryRegionSection
*section
,
765 /* FIXME: implement */
768 static void vhost_log_stop(MemoryListener
*listener
,
769 MemoryRegionSection
*section
,
772 /* FIXME: implement */
775 /* The vhost driver natively knows how to handle the vrings of non
776 * cross-endian legacy devices and modern devices. Only legacy devices
777 * exposed to a bi-endian guest may require the vhost driver to use a
778 * specific endianness.
780 static inline bool vhost_needs_vring_endian(VirtIODevice
*vdev
)
782 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
785 #ifdef HOST_WORDS_BIGENDIAN
786 return vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_LITTLE
;
788 return vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_BIG
;
792 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev
*dev
,
796 struct vhost_vring_state s
= {
797 .index
= vhost_vq_index
,
801 if (!dev
->vhost_ops
->vhost_set_vring_endian(dev
, &s
)) {
805 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
806 if (errno
== ENOTTY
) {
807 error_report("vhost does not support cross-endian");
814 static int vhost_memory_region_lookup(struct vhost_dev
*hdev
,
815 uint64_t gpa
, uint64_t *uaddr
,
820 for (i
= 0; i
< hdev
->mem
->nregions
; i
++) {
821 struct vhost_memory_region
*reg
= hdev
->mem
->regions
+ i
;
823 if (gpa
>= reg
->guest_phys_addr
&&
824 reg
->guest_phys_addr
+ reg
->memory_size
> gpa
) {
825 *uaddr
= reg
->userspace_addr
+ gpa
- reg
->guest_phys_addr
;
826 *len
= reg
->guest_phys_addr
+ reg
->memory_size
- gpa
;
834 int vhost_device_iotlb_miss(struct vhost_dev
*dev
, uint64_t iova
, int write
)
842 iotlb
= address_space_get_iotlb_entry(dev
->vdev
->dma_as
,
844 if (iotlb
.target_as
!= NULL
) {
845 ret
= vhost_memory_region_lookup(dev
, iotlb
.translated_addr
,
848 error_report("Fail to lookup the translated address "
849 "%"PRIx64
, iotlb
.translated_addr
);
853 len
= MIN(iotlb
.addr_mask
+ 1, len
);
854 iova
= iova
& ~iotlb
.addr_mask
;
856 ret
= vhost_backend_update_device_iotlb(dev
, iova
, uaddr
,
859 error_report("Fail to update device iotlb");
869 static int vhost_virtqueue_start(struct vhost_dev
*dev
,
870 struct VirtIODevice
*vdev
,
871 struct vhost_virtqueue
*vq
,
874 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
875 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
876 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
879 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
880 struct vhost_vring_file file
= {
881 .index
= vhost_vq_index
883 struct vhost_vring_state state
= {
884 .index
= vhost_vq_index
886 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, idx
);
888 a
= virtio_queue_get_desc_addr(vdev
, idx
);
890 /* Queue might not be ready for start */
894 vq
->num
= state
.num
= virtio_queue_get_num(vdev
, idx
);
895 r
= dev
->vhost_ops
->vhost_set_vring_num(dev
, &state
);
897 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
901 state
.num
= virtio_queue_get_last_avail_idx(vdev
, idx
);
902 r
= dev
->vhost_ops
->vhost_set_vring_base(dev
, &state
);
904 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
908 if (vhost_needs_vring_endian(vdev
)) {
909 r
= vhost_virtqueue_set_vring_endian_legacy(dev
,
910 virtio_is_big_endian(vdev
),
917 vq
->desc_size
= s
= l
= virtio_queue_get_desc_size(vdev
, idx
);
919 vq
->desc
= vhost_memory_map(dev
, a
, &l
, 0);
920 if (!vq
->desc
|| l
!= s
) {
922 goto fail_alloc_desc
;
924 vq
->avail_size
= s
= l
= virtio_queue_get_avail_size(vdev
, idx
);
925 vq
->avail_phys
= a
= virtio_queue_get_avail_addr(vdev
, idx
);
926 vq
->avail
= vhost_memory_map(dev
, a
, &l
, 0);
927 if (!vq
->avail
|| l
!= s
) {
929 goto fail_alloc_avail
;
931 vq
->used_size
= s
= l
= virtio_queue_get_used_size(vdev
, idx
);
932 vq
->used_phys
= a
= virtio_queue_get_used_addr(vdev
, idx
);
933 vq
->used
= vhost_memory_map(dev
, a
, &l
, 1);
934 if (!vq
->used
|| l
!= s
) {
936 goto fail_alloc_used
;
939 r
= vhost_virtqueue_set_addr(dev
, vq
, vhost_vq_index
, dev
->log_enabled
);
945 file
.fd
= event_notifier_get_fd(virtio_queue_get_host_notifier(vvq
));
946 r
= dev
->vhost_ops
->vhost_set_vring_kick(dev
, &file
);
948 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
953 /* Clear and discard previous events if any. */
954 event_notifier_test_and_clear(&vq
->masked_notifier
);
956 /* Init vring in unmasked state, unless guest_notifier_mask
959 if (!vdev
->use_guest_notifier_mask
) {
960 /* TODO: check and handle errors. */
961 vhost_virtqueue_mask(dev
, vdev
, idx
, false);
964 if (k
->query_guest_notifiers
&&
965 k
->query_guest_notifiers(qbus
->parent
) &&
966 virtio_queue_vector(vdev
, idx
) == VIRTIO_NO_VECTOR
) {
968 r
= dev
->vhost_ops
->vhost_set_vring_call(dev
, &file
);
979 vhost_memory_unmap(dev
, vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
982 vhost_memory_unmap(dev
, vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
985 vhost_memory_unmap(dev
, vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
991 static void vhost_virtqueue_stop(struct vhost_dev
*dev
,
992 struct VirtIODevice
*vdev
,
993 struct vhost_virtqueue
*vq
,
996 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, idx
);
997 struct vhost_vring_state state
= {
998 .index
= vhost_vq_index
,
1003 a
= virtio_queue_get_desc_addr(vdev
, idx
);
1005 /* Don't stop the virtqueue which might have not been started */
1009 r
= dev
->vhost_ops
->vhost_get_vring_base(dev
, &state
);
1011 VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx
, r
);
1012 /* Connection to the backend is broken, so let's sync internal
1013 * last avail idx to the device used idx.
1015 virtio_queue_restore_last_avail_idx(vdev
, idx
);
1017 virtio_queue_set_last_avail_idx(vdev
, idx
, state
.num
);
1019 virtio_queue_invalidate_signalled_used(vdev
, idx
);
1020 virtio_queue_update_used_idx(vdev
, idx
);
1022 /* In the cross-endian case, we need to reset the vring endianness to
1023 * native as legacy devices expect so by default.
1025 if (vhost_needs_vring_endian(vdev
)) {
1026 vhost_virtqueue_set_vring_endian_legacy(dev
,
1027 !virtio_is_big_endian(vdev
),
1031 vhost_memory_unmap(dev
, vq
->used
, virtio_queue_get_used_size(vdev
, idx
),
1032 1, virtio_queue_get_used_size(vdev
, idx
));
1033 vhost_memory_unmap(dev
, vq
->avail
, virtio_queue_get_avail_size(vdev
, idx
),
1034 0, virtio_queue_get_avail_size(vdev
, idx
));
1035 vhost_memory_unmap(dev
, vq
->desc
, virtio_queue_get_desc_size(vdev
, idx
),
1036 0, virtio_queue_get_desc_size(vdev
, idx
));
1039 static void vhost_eventfd_add(MemoryListener
*listener
,
1040 MemoryRegionSection
*section
,
1041 bool match_data
, uint64_t data
, EventNotifier
*e
)
1045 static void vhost_eventfd_del(MemoryListener
*listener
,
1046 MemoryRegionSection
*section
,
1047 bool match_data
, uint64_t data
, EventNotifier
*e
)
1051 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev
*dev
,
1052 int n
, uint32_t timeout
)
1054 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, n
);
1055 struct vhost_vring_state state
= {
1056 .index
= vhost_vq_index
,
1061 if (!dev
->vhost_ops
->vhost_set_vring_busyloop_timeout
) {
1065 r
= dev
->vhost_ops
->vhost_set_vring_busyloop_timeout(dev
, &state
);
1067 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1074 static int vhost_virtqueue_init(struct vhost_dev
*dev
,
1075 struct vhost_virtqueue
*vq
, int n
)
1077 int vhost_vq_index
= dev
->vhost_ops
->vhost_get_vq_index(dev
, n
);
1078 struct vhost_vring_file file
= {
1079 .index
= vhost_vq_index
,
1081 int r
= event_notifier_init(&vq
->masked_notifier
, 0);
1086 file
.fd
= event_notifier_get_fd(&vq
->masked_notifier
);
1087 r
= dev
->vhost_ops
->vhost_set_vring_call(dev
, &file
);
1089 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1098 event_notifier_cleanup(&vq
->masked_notifier
);
1102 static void vhost_virtqueue_cleanup(struct vhost_virtqueue
*vq
)
1104 event_notifier_cleanup(&vq
->masked_notifier
);
1107 int vhost_dev_init(struct vhost_dev
*hdev
, void *opaque
,
1108 VhostBackendType backend_type
, uint32_t busyloop_timeout
)
1111 int i
, r
, n_initialized_vqs
= 0;
1112 Error
*local_err
= NULL
;
1115 hdev
->migration_blocker
= NULL
;
1117 r
= vhost_set_backend_type(hdev
, backend_type
);
1120 r
= hdev
->vhost_ops
->vhost_backend_init(hdev
, opaque
);
1125 r
= hdev
->vhost_ops
->vhost_set_owner(hdev
);
1127 VHOST_OPS_DEBUG("vhost_set_owner failed");
1131 r
= hdev
->vhost_ops
->vhost_get_features(hdev
, &features
);
1133 VHOST_OPS_DEBUG("vhost_get_features failed");
1137 for (i
= 0; i
< hdev
->nvqs
; ++i
, ++n_initialized_vqs
) {
1138 r
= vhost_virtqueue_init(hdev
, hdev
->vqs
+ i
, hdev
->vq_index
+ i
);
1144 if (busyloop_timeout
) {
1145 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1146 r
= vhost_virtqueue_set_busyloop_timeout(hdev
, hdev
->vq_index
+ i
,
1154 hdev
->features
= features
;
1156 hdev
->memory_listener
= (MemoryListener
) {
1157 .begin
= vhost_begin
,
1158 .commit
= vhost_commit
,
1159 .region_add
= vhost_region_addnop
,
1160 .region_nop
= vhost_region_addnop
,
1161 .log_start
= vhost_log_start
,
1162 .log_stop
= vhost_log_stop
,
1163 .log_sync
= vhost_log_sync
,
1164 .log_global_start
= vhost_log_global_start
,
1165 .log_global_stop
= vhost_log_global_stop
,
1166 .eventfd_add
= vhost_eventfd_add
,
1167 .eventfd_del
= vhost_eventfd_del
,
1171 hdev
->iommu_listener
= (MemoryListener
) {
1172 .region_add
= vhost_iommu_region_add
,
1173 .region_del
= vhost_iommu_region_del
,
1176 if (hdev
->migration_blocker
== NULL
) {
1177 if (!(hdev
->features
& (0x1ULL
<< VHOST_F_LOG_ALL
))) {
1178 error_setg(&hdev
->migration_blocker
,
1179 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1180 } else if (vhost_dev_log_is_shared(hdev
) && !qemu_memfd_check()) {
1181 error_setg(&hdev
->migration_blocker
,
1182 "Migration disabled: failed to allocate shared memory");
1186 if (hdev
->migration_blocker
!= NULL
) {
1187 r
= migrate_add_blocker(hdev
->migration_blocker
, &local_err
);
1189 error_report_err(local_err
);
1190 error_free(hdev
->migration_blocker
);
1195 hdev
->mem
= g_malloc0(offsetof(struct vhost_memory
, regions
));
1196 hdev
->n_mem_sections
= 0;
1197 hdev
->mem_sections
= NULL
;
1200 hdev
->log_enabled
= false;
1201 hdev
->started
= false;
1202 memory_listener_register(&hdev
->memory_listener
, &address_space_memory
);
1203 QLIST_INSERT_HEAD(&vhost_devices
, hdev
, entry
);
1205 if (used_memslots
> hdev
->vhost_ops
->vhost_backend_memslots_limit(hdev
)) {
1206 error_report("vhost backend memory slots limit is less"
1207 " than current number of present memory slots");
1209 if (busyloop_timeout
) {
1220 vhost_virtqueue_set_busyloop_timeout(hdev
, hdev
->vq_index
+ i
, 0);
1223 hdev
->nvqs
= n_initialized_vqs
;
1224 vhost_dev_cleanup(hdev
);
1228 void vhost_dev_cleanup(struct vhost_dev
*hdev
)
1232 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1233 vhost_virtqueue_cleanup(hdev
->vqs
+ i
);
1236 /* those are only safe after successful init */
1237 memory_listener_unregister(&hdev
->memory_listener
);
1238 QLIST_REMOVE(hdev
, entry
);
1240 if (hdev
->migration_blocker
) {
1241 migrate_del_blocker(hdev
->migration_blocker
);
1242 error_free(hdev
->migration_blocker
);
1245 g_free(hdev
->mem_sections
);
1246 if (hdev
->vhost_ops
) {
1247 hdev
->vhost_ops
->vhost_backend_cleanup(hdev
);
1251 memset(hdev
, 0, sizeof(struct vhost_dev
));
1254 /* Stop processing guest IO notifications in qemu.
1255 * Start processing them in vhost in kernel.
1257 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1259 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1262 /* We will pass the notifiers to the kernel, make sure that QEMU
1263 * doesn't interfere.
1265 r
= virtio_device_grab_ioeventfd(vdev
);
1267 error_report("binding does not support host notifiers");
1271 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1272 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1275 error_report("vhost VQ %d notifier binding failed: %d", i
, -r
);
1283 e
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1286 error_report("vhost VQ %d notifier cleanup error: %d", i
, -r
);
1289 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
);
1291 virtio_device_release_ioeventfd(vdev
);
1296 /* Stop processing guest IO notifications in vhost.
1297 * Start processing them in qemu.
1298 * This might actually run the qemu handlers right away,
1299 * so virtio in qemu must be completely setup when this is called.
1301 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1303 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1306 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1307 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
,
1310 error_report("vhost VQ %d notifier cleanup failed: %d", i
, -r
);
1313 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus
), hdev
->vq_index
+ i
);
1315 virtio_device_release_ioeventfd(vdev
);
1318 /* Test and clear event pending status.
1319 * Should be called after unmask to avoid losing events.
1321 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
)
1323 struct vhost_virtqueue
*vq
= hdev
->vqs
+ n
- hdev
->vq_index
;
1324 assert(n
>= hdev
->vq_index
&& n
< hdev
->vq_index
+ hdev
->nvqs
);
1325 return event_notifier_test_and_clear(&vq
->masked_notifier
);
1328 /* Mask/unmask events from this vq. */
1329 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
1332 struct VirtQueue
*vvq
= virtio_get_queue(vdev
, n
);
1333 int r
, index
= n
- hdev
->vq_index
;
1334 struct vhost_vring_file file
;
1336 /* should only be called after backend is connected */
1337 assert(hdev
->vhost_ops
);
1340 assert(vdev
->use_guest_notifier_mask
);
1341 file
.fd
= event_notifier_get_fd(&hdev
->vqs
[index
].masked_notifier
);
1343 file
.fd
= event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq
));
1346 file
.index
= hdev
->vhost_ops
->vhost_get_vq_index(hdev
, n
);
1347 r
= hdev
->vhost_ops
->vhost_set_vring_call(hdev
, &file
);
1349 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1353 uint64_t vhost_get_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1356 const int *bit
= feature_bits
;
1357 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1358 uint64_t bit_mask
= (1ULL << *bit
);
1359 if (!(hdev
->features
& bit_mask
)) {
1360 features
&= ~bit_mask
;
1367 void vhost_ack_features(struct vhost_dev
*hdev
, const int *feature_bits
,
1370 const int *bit
= feature_bits
;
1371 while (*bit
!= VHOST_INVALID_FEATURE_BIT
) {
1372 uint64_t bit_mask
= (1ULL << *bit
);
1373 if (features
& bit_mask
) {
1374 hdev
->acked_features
|= bit_mask
;
1380 int vhost_dev_get_config(struct vhost_dev
*hdev
, uint8_t *config
,
1381 uint32_t config_len
)
1383 assert(hdev
->vhost_ops
);
1385 if (hdev
->vhost_ops
->vhost_get_config
) {
1386 return hdev
->vhost_ops
->vhost_get_config(hdev
, config
, config_len
);
1392 int vhost_dev_set_config(struct vhost_dev
*hdev
, const uint8_t *data
,
1393 uint32_t offset
, uint32_t size
, uint32_t flags
)
1395 assert(hdev
->vhost_ops
);
1397 if (hdev
->vhost_ops
->vhost_set_config
) {
1398 return hdev
->vhost_ops
->vhost_set_config(hdev
, data
, offset
,
1405 void vhost_dev_set_config_notifier(struct vhost_dev
*hdev
,
1406 const VhostDevConfigOps
*ops
)
1408 assert(hdev
->vhost_ops
);
1409 hdev
->config_ops
= ops
;
1412 /* Host notifiers must be enabled at this point. */
1413 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1417 /* should only be called after backend is connected */
1418 assert(hdev
->vhost_ops
);
1420 hdev
->started
= true;
1423 r
= vhost_dev_set_features(hdev
, hdev
->log_enabled
);
1428 if (vhost_dev_has_iommu(hdev
)) {
1429 memory_listener_register(&hdev
->iommu_listener
, vdev
->dma_as
);
1432 r
= hdev
->vhost_ops
->vhost_set_mem_table(hdev
, hdev
->mem
);
1434 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1438 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1439 r
= vhost_virtqueue_start(hdev
,
1442 hdev
->vq_index
+ i
);
1448 if (hdev
->log_enabled
) {
1451 hdev
->log_size
= vhost_get_log_size(hdev
);
1452 hdev
->log
= vhost_log_get(hdev
->log_size
,
1453 vhost_dev_log_is_shared(hdev
));
1454 log_base
= (uintptr_t)hdev
->log
->log
;
1455 r
= hdev
->vhost_ops
->vhost_set_log_base(hdev
,
1456 hdev
->log_size
? log_base
: 0,
1459 VHOST_OPS_DEBUG("vhost_set_log_base failed");
1465 if (vhost_dev_has_iommu(hdev
)) {
1466 hdev
->vhost_ops
->vhost_set_iotlb_callback(hdev
, true);
1468 /* Update used ring information for IOTLB to work correctly,
1469 * vhost-kernel code requires for this.*/
1470 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1471 struct vhost_virtqueue
*vq
= hdev
->vqs
+ i
;
1472 vhost_device_iotlb_miss(hdev
, vq
->used_phys
, true);
1477 vhost_log_put(hdev
, false);
1480 vhost_virtqueue_stop(hdev
,
1483 hdev
->vq_index
+ i
);
1490 hdev
->started
= false;
1494 /* Host notifiers must be enabled at this point. */
1495 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
)
1499 /* should only be called after backend is connected */
1500 assert(hdev
->vhost_ops
);
1502 for (i
= 0; i
< hdev
->nvqs
; ++i
) {
1503 vhost_virtqueue_stop(hdev
,
1506 hdev
->vq_index
+ i
);
1509 if (vhost_dev_has_iommu(hdev
)) {
1510 hdev
->vhost_ops
->vhost_set_iotlb_callback(hdev
, false);
1511 memory_listener_unregister(&hdev
->iommu_listener
);
1513 vhost_log_put(hdev
, true);
1514 hdev
->started
= false;
1518 int vhost_net_set_backend(struct vhost_dev
*hdev
,
1519 struct vhost_vring_file
*file
)
1521 if (hdev
->vhost_ops
->vhost_net_set_backend
) {
1522 return hdev
->vhost_ops
->vhost_net_set_backend(hdev
, file
);