4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
18 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "hw/virtio/virtio.h"
23 #include "migration/qemu-file-types.h"
24 #include "qemu/atomic.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "sysemu/dma.h"
29 #include "sysemu/runstate.h"
30 #include "standard-headers/linux/virtio_ids.h"
33 * The alignment to use between consumer and producer parts of vring.
34 * x86 pagesize again. This is the default, used by transports like PCI
35 * which don't provide a means for the guest to tell the host the alignment.
37 #define VIRTIO_PCI_VRING_ALIGN 4096
39 typedef struct VRingDesc
47 typedef struct VRingPackedDesc
{
54 typedef struct VRingAvail
61 typedef struct VRingUsedElem
67 typedef struct VRingUsed
74 typedef struct VRingMemoryRegionCaches
{
76 MemoryRegionCache desc
;
77 MemoryRegionCache avail
;
78 MemoryRegionCache used
;
79 } VRingMemoryRegionCaches
;
84 unsigned int num_default
;
89 VRingMemoryRegionCaches
*caches
;
92 typedef struct VRingPackedDescEvent
{
95 } VRingPackedDescEvent
;
100 VirtQueueElement
*used_elems
;
102 /* Next head to pop */
103 uint16_t last_avail_idx
;
104 bool last_avail_wrap_counter
;
106 /* Last avail_idx read from VQ. */
107 uint16_t shadow_avail_idx
;
108 bool shadow_avail_wrap_counter
;
111 bool used_wrap_counter
;
113 /* Last used index value we have signalled on */
114 uint16_t signalled_used
;
116 /* Last used index value we have signalled on */
117 bool signalled_used_valid
;
119 /* Notification enabled? */
122 uint16_t queue_index
;
127 VirtIOHandleOutput handle_output
;
128 VirtIOHandleAIOOutput handle_aio_output
;
130 EventNotifier guest_notifier
;
131 EventNotifier host_notifier
;
132 bool host_notifier_enabled
;
133 QLIST_ENTRY(VirtQueue
) node
;
136 /* Called within call_rcu(). */
137 static void virtio_free_region_cache(VRingMemoryRegionCaches
*caches
)
139 assert(caches
!= NULL
);
140 address_space_cache_destroy(&caches
->desc
);
141 address_space_cache_destroy(&caches
->avail
);
142 address_space_cache_destroy(&caches
->used
);
146 static void virtio_virtqueue_reset_region_cache(struct VirtQueue
*vq
)
148 VRingMemoryRegionCaches
*caches
;
150 caches
= qatomic_read(&vq
->vring
.caches
);
151 qatomic_rcu_set(&vq
->vring
.caches
, NULL
);
153 call_rcu(caches
, virtio_free_region_cache
, rcu
);
157 static void virtio_init_region_cache(VirtIODevice
*vdev
, int n
)
159 VirtQueue
*vq
= &vdev
->vq
[n
];
160 VRingMemoryRegionCaches
*old
= vq
->vring
.caches
;
161 VRingMemoryRegionCaches
*new = NULL
;
167 addr
= vq
->vring
.desc
;
171 new = g_new0(VRingMemoryRegionCaches
, 1);
172 size
= virtio_queue_get_desc_size(vdev
, n
);
173 packed
= virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
) ?
175 len
= address_space_cache_init(&new->desc
, vdev
->dma_as
,
178 virtio_error(vdev
, "Cannot map desc");
182 size
= virtio_queue_get_used_size(vdev
, n
);
183 len
= address_space_cache_init(&new->used
, vdev
->dma_as
,
184 vq
->vring
.used
, size
, true);
186 virtio_error(vdev
, "Cannot map used");
190 size
= virtio_queue_get_avail_size(vdev
, n
);
191 len
= address_space_cache_init(&new->avail
, vdev
->dma_as
,
192 vq
->vring
.avail
, size
, false);
194 virtio_error(vdev
, "Cannot map avail");
198 qatomic_rcu_set(&vq
->vring
.caches
, new);
200 call_rcu(old
, virtio_free_region_cache
, rcu
);
205 address_space_cache_destroy(&new->avail
);
207 address_space_cache_destroy(&new->used
);
209 address_space_cache_destroy(&new->desc
);
212 virtio_virtqueue_reset_region_cache(vq
);
215 /* virt queue functions */
216 void virtio_queue_update_rings(VirtIODevice
*vdev
, int n
)
218 VRing
*vring
= &vdev
->vq
[n
].vring
;
220 if (!vring
->num
|| !vring
->desc
|| !vring
->align
) {
221 /* not yet setup -> nothing to do */
224 vring
->avail
= vring
->desc
+ vring
->num
* sizeof(VRingDesc
);
225 vring
->used
= vring_align(vring
->avail
+
226 offsetof(VRingAvail
, ring
[vring
->num
]),
228 virtio_init_region_cache(vdev
, n
);
231 /* Called within rcu_read_lock(). */
232 static void vring_split_desc_read(VirtIODevice
*vdev
, VRingDesc
*desc
,
233 MemoryRegionCache
*cache
, int i
)
235 address_space_read_cached(cache
, i
* sizeof(VRingDesc
),
236 desc
, sizeof(VRingDesc
));
237 virtio_tswap64s(vdev
, &desc
->addr
);
238 virtio_tswap32s(vdev
, &desc
->len
);
239 virtio_tswap16s(vdev
, &desc
->flags
);
240 virtio_tswap16s(vdev
, &desc
->next
);
243 static void vring_packed_event_read(VirtIODevice
*vdev
,
244 MemoryRegionCache
*cache
,
245 VRingPackedDescEvent
*e
)
247 hwaddr off_off
= offsetof(VRingPackedDescEvent
, off_wrap
);
248 hwaddr off_flags
= offsetof(VRingPackedDescEvent
, flags
);
250 address_space_read_cached(cache
, off_flags
, &e
->flags
,
252 /* Make sure flags is seen before off_wrap */
254 address_space_read_cached(cache
, off_off
, &e
->off_wrap
,
255 sizeof(e
->off_wrap
));
256 virtio_tswap16s(vdev
, &e
->off_wrap
);
257 virtio_tswap16s(vdev
, &e
->flags
);
260 static void vring_packed_off_wrap_write(VirtIODevice
*vdev
,
261 MemoryRegionCache
*cache
,
264 hwaddr off
= offsetof(VRingPackedDescEvent
, off_wrap
);
266 virtio_tswap16s(vdev
, &off_wrap
);
267 address_space_write_cached(cache
, off
, &off_wrap
, sizeof(off_wrap
));
268 address_space_cache_invalidate(cache
, off
, sizeof(off_wrap
));
271 static void vring_packed_flags_write(VirtIODevice
*vdev
,
272 MemoryRegionCache
*cache
, uint16_t flags
)
274 hwaddr off
= offsetof(VRingPackedDescEvent
, flags
);
276 virtio_tswap16s(vdev
, &flags
);
277 address_space_write_cached(cache
, off
, &flags
, sizeof(flags
));
278 address_space_cache_invalidate(cache
, off
, sizeof(flags
));
281 /* Called within rcu_read_lock(). */
282 static VRingMemoryRegionCaches
*vring_get_region_caches(struct VirtQueue
*vq
)
284 return qatomic_rcu_read(&vq
->vring
.caches
);
287 /* Called within rcu_read_lock(). */
288 static inline uint16_t vring_avail_flags(VirtQueue
*vq
)
290 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
291 hwaddr pa
= offsetof(VRingAvail
, flags
);
297 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
300 /* Called within rcu_read_lock(). */
301 static inline uint16_t vring_avail_idx(VirtQueue
*vq
)
303 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
304 hwaddr pa
= offsetof(VRingAvail
, idx
);
310 vq
->shadow_avail_idx
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
311 return vq
->shadow_avail_idx
;
314 /* Called within rcu_read_lock(). */
315 static inline uint16_t vring_avail_ring(VirtQueue
*vq
, int i
)
317 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
318 hwaddr pa
= offsetof(VRingAvail
, ring
[i
]);
324 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
327 /* Called within rcu_read_lock(). */
328 static inline uint16_t vring_get_used_event(VirtQueue
*vq
)
330 return vring_avail_ring(vq
, vq
->vring
.num
);
333 /* Called within rcu_read_lock(). */
334 static inline void vring_used_write(VirtQueue
*vq
, VRingUsedElem
*uelem
,
337 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
338 hwaddr pa
= offsetof(VRingUsed
, ring
[i
]);
344 virtio_tswap32s(vq
->vdev
, &uelem
->id
);
345 virtio_tswap32s(vq
->vdev
, &uelem
->len
);
346 address_space_write_cached(&caches
->used
, pa
, uelem
, sizeof(VRingUsedElem
));
347 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(VRingUsedElem
));
350 /* Called within rcu_read_lock(). */
351 static uint16_t vring_used_idx(VirtQueue
*vq
)
353 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
354 hwaddr pa
= offsetof(VRingUsed
, idx
);
360 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
363 /* Called within rcu_read_lock(). */
364 static inline void vring_used_idx_set(VirtQueue
*vq
, uint16_t val
)
366 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
367 hwaddr pa
= offsetof(VRingUsed
, idx
);
370 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
371 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
377 /* Called within rcu_read_lock(). */
378 static inline void vring_used_flags_set_bit(VirtQueue
*vq
, int mask
)
380 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
381 VirtIODevice
*vdev
= vq
->vdev
;
382 hwaddr pa
= offsetof(VRingUsed
, flags
);
389 flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
390 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
| mask
);
391 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
394 /* Called within rcu_read_lock(). */
395 static inline void vring_used_flags_unset_bit(VirtQueue
*vq
, int mask
)
397 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
398 VirtIODevice
*vdev
= vq
->vdev
;
399 hwaddr pa
= offsetof(VRingUsed
, flags
);
406 flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
407 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
& ~mask
);
408 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
411 /* Called within rcu_read_lock(). */
412 static inline void vring_set_avail_event(VirtQueue
*vq
, uint16_t val
)
414 VRingMemoryRegionCaches
*caches
;
416 if (!vq
->notification
) {
420 caches
= vring_get_region_caches(vq
);
425 pa
= offsetof(VRingUsed
, ring
[vq
->vring
.num
]);
426 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
427 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
430 static void virtio_queue_split_set_notification(VirtQueue
*vq
, int enable
)
432 RCU_READ_LOCK_GUARD();
434 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
435 vring_set_avail_event(vq
, vring_avail_idx(vq
));
437 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
439 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
442 /* Expose avail event/used flags before caller checks the avail idx. */
447 static void virtio_queue_packed_set_notification(VirtQueue
*vq
, int enable
)
450 VRingPackedDescEvent e
;
451 VRingMemoryRegionCaches
*caches
;
453 RCU_READ_LOCK_GUARD();
454 caches
= vring_get_region_caches(vq
);
459 vring_packed_event_read(vq
->vdev
, &caches
->used
, &e
);
462 e
.flags
= VRING_PACKED_EVENT_FLAG_DISABLE
;
463 } else if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
464 off_wrap
= vq
->shadow_avail_idx
| vq
->shadow_avail_wrap_counter
<< 15;
465 vring_packed_off_wrap_write(vq
->vdev
, &caches
->used
, off_wrap
);
466 /* Make sure off_wrap is wrote before flags */
468 e
.flags
= VRING_PACKED_EVENT_FLAG_DESC
;
470 e
.flags
= VRING_PACKED_EVENT_FLAG_ENABLE
;
473 vring_packed_flags_write(vq
->vdev
, &caches
->used
, e
.flags
);
475 /* Expose avail event/used flags before caller checks the avail idx. */
480 bool virtio_queue_get_notification(VirtQueue
*vq
)
482 return vq
->notification
;
485 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
)
487 vq
->notification
= enable
;
489 if (!vq
->vring
.desc
) {
493 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
494 virtio_queue_packed_set_notification(vq
, enable
);
496 virtio_queue_split_set_notification(vq
, enable
);
500 int virtio_queue_ready(VirtQueue
*vq
)
502 return vq
->vring
.avail
!= 0;
505 static void vring_packed_desc_read_flags(VirtIODevice
*vdev
,
507 MemoryRegionCache
*cache
,
510 address_space_read_cached(cache
,
511 i
* sizeof(VRingPackedDesc
) +
512 offsetof(VRingPackedDesc
, flags
),
513 flags
, sizeof(*flags
));
514 virtio_tswap16s(vdev
, flags
);
517 static void vring_packed_desc_read(VirtIODevice
*vdev
,
518 VRingPackedDesc
*desc
,
519 MemoryRegionCache
*cache
,
520 int i
, bool strict_order
)
522 hwaddr off
= i
* sizeof(VRingPackedDesc
);
524 vring_packed_desc_read_flags(vdev
, &desc
->flags
, cache
, i
);
527 /* Make sure flags is read before the rest fields. */
531 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, addr
),
532 &desc
->addr
, sizeof(desc
->addr
));
533 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, id
),
534 &desc
->id
, sizeof(desc
->id
));
535 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, len
),
536 &desc
->len
, sizeof(desc
->len
));
537 virtio_tswap64s(vdev
, &desc
->addr
);
538 virtio_tswap16s(vdev
, &desc
->id
);
539 virtio_tswap32s(vdev
, &desc
->len
);
542 static void vring_packed_desc_write_data(VirtIODevice
*vdev
,
543 VRingPackedDesc
*desc
,
544 MemoryRegionCache
*cache
,
547 hwaddr off_id
= i
* sizeof(VRingPackedDesc
) +
548 offsetof(VRingPackedDesc
, id
);
549 hwaddr off_len
= i
* sizeof(VRingPackedDesc
) +
550 offsetof(VRingPackedDesc
, len
);
552 virtio_tswap32s(vdev
, &desc
->len
);
553 virtio_tswap16s(vdev
, &desc
->id
);
554 address_space_write_cached(cache
, off_id
, &desc
->id
, sizeof(desc
->id
));
555 address_space_cache_invalidate(cache
, off_id
, sizeof(desc
->id
));
556 address_space_write_cached(cache
, off_len
, &desc
->len
, sizeof(desc
->len
));
557 address_space_cache_invalidate(cache
, off_len
, sizeof(desc
->len
));
560 static void vring_packed_desc_write_flags(VirtIODevice
*vdev
,
561 VRingPackedDesc
*desc
,
562 MemoryRegionCache
*cache
,
565 hwaddr off
= i
* sizeof(VRingPackedDesc
) + offsetof(VRingPackedDesc
, flags
);
567 virtio_tswap16s(vdev
, &desc
->flags
);
568 address_space_write_cached(cache
, off
, &desc
->flags
, sizeof(desc
->flags
));
569 address_space_cache_invalidate(cache
, off
, sizeof(desc
->flags
));
572 static void vring_packed_desc_write(VirtIODevice
*vdev
,
573 VRingPackedDesc
*desc
,
574 MemoryRegionCache
*cache
,
575 int i
, bool strict_order
)
577 vring_packed_desc_write_data(vdev
, desc
, cache
, i
);
579 /* Make sure data is wrote before flags. */
582 vring_packed_desc_write_flags(vdev
, desc
, cache
, i
);
585 static inline bool is_desc_avail(uint16_t flags
, bool wrap_counter
)
589 avail
= !!(flags
& (1 << VRING_PACKED_DESC_F_AVAIL
));
590 used
= !!(flags
& (1 << VRING_PACKED_DESC_F_USED
));
591 return (avail
!= used
) && (avail
== wrap_counter
);
594 /* Fetch avail_idx from VQ memory only when we really need to know if
595 * guest has added some buffers.
596 * Called within rcu_read_lock(). */
597 static int virtio_queue_empty_rcu(VirtQueue
*vq
)
599 if (virtio_device_disabled(vq
->vdev
)) {
603 if (unlikely(!vq
->vring
.avail
)) {
607 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
611 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
614 static int virtio_queue_split_empty(VirtQueue
*vq
)
618 if (virtio_device_disabled(vq
->vdev
)) {
622 if (unlikely(!vq
->vring
.avail
)) {
626 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
630 RCU_READ_LOCK_GUARD();
631 empty
= vring_avail_idx(vq
) == vq
->last_avail_idx
;
635 /* Called within rcu_read_lock(). */
636 static int virtio_queue_packed_empty_rcu(VirtQueue
*vq
)
638 struct VRingPackedDesc desc
;
639 VRingMemoryRegionCaches
*cache
;
641 if (unlikely(!vq
->vring
.desc
)) {
645 cache
= vring_get_region_caches(vq
);
650 vring_packed_desc_read_flags(vq
->vdev
, &desc
.flags
, &cache
->desc
,
653 return !is_desc_avail(desc
.flags
, vq
->last_avail_wrap_counter
);
656 static int virtio_queue_packed_empty(VirtQueue
*vq
)
658 RCU_READ_LOCK_GUARD();
659 return virtio_queue_packed_empty_rcu(vq
);
662 int virtio_queue_empty(VirtQueue
*vq
)
664 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
665 return virtio_queue_packed_empty(vq
);
667 return virtio_queue_split_empty(vq
);
671 static void virtqueue_unmap_sg(VirtQueue
*vq
, const VirtQueueElement
*elem
,
674 AddressSpace
*dma_as
= vq
->vdev
->dma_as
;
679 for (i
= 0; i
< elem
->in_num
; i
++) {
680 size_t size
= MIN(len
- offset
, elem
->in_sg
[i
].iov_len
);
682 dma_memory_unmap(dma_as
, elem
->in_sg
[i
].iov_base
,
683 elem
->in_sg
[i
].iov_len
,
684 DMA_DIRECTION_FROM_DEVICE
, size
);
689 for (i
= 0; i
< elem
->out_num
; i
++)
690 dma_memory_unmap(dma_as
, elem
->out_sg
[i
].iov_base
,
691 elem
->out_sg
[i
].iov_len
,
692 DMA_DIRECTION_TO_DEVICE
,
693 elem
->out_sg
[i
].iov_len
);
696 /* virtqueue_detach_element:
697 * @vq: The #VirtQueue
698 * @elem: The #VirtQueueElement
699 * @len: number of bytes written
701 * Detach the element from the virtqueue. This function is suitable for device
702 * reset or other situations where a #VirtQueueElement is simply freed and will
703 * not be pushed or discarded.
705 void virtqueue_detach_element(VirtQueue
*vq
, const VirtQueueElement
*elem
,
708 vq
->inuse
-= elem
->ndescs
;
709 virtqueue_unmap_sg(vq
, elem
, len
);
712 static void virtqueue_split_rewind(VirtQueue
*vq
, unsigned int num
)
714 vq
->last_avail_idx
-= num
;
717 static void virtqueue_packed_rewind(VirtQueue
*vq
, unsigned int num
)
719 if (vq
->last_avail_idx
< num
) {
720 vq
->last_avail_idx
= vq
->vring
.num
+ vq
->last_avail_idx
- num
;
721 vq
->last_avail_wrap_counter
^= 1;
723 vq
->last_avail_idx
-= num
;
728 * @vq: The #VirtQueue
729 * @elem: The #VirtQueueElement
730 * @len: number of bytes written
732 * Pretend the most recent element wasn't popped from the virtqueue. The next
733 * call to virtqueue_pop() will refetch the element.
735 void virtqueue_unpop(VirtQueue
*vq
, const VirtQueueElement
*elem
,
739 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
740 virtqueue_packed_rewind(vq
, 1);
742 virtqueue_split_rewind(vq
, 1);
745 virtqueue_detach_element(vq
, elem
, len
);
749 * @vq: The #VirtQueue
750 * @num: Number of elements to push back
752 * Pretend that elements weren't popped from the virtqueue. The next
753 * virtqueue_pop() will refetch the oldest element.
755 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
757 * Returns: true on success, false if @num is greater than the number of in use
760 bool virtqueue_rewind(VirtQueue
*vq
, unsigned int num
)
762 if (num
> vq
->inuse
) {
767 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
768 virtqueue_packed_rewind(vq
, num
);
770 virtqueue_split_rewind(vq
, num
);
775 static void virtqueue_split_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
776 unsigned int len
, unsigned int idx
)
780 if (unlikely(!vq
->vring
.used
)) {
784 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
786 uelem
.id
= elem
->index
;
788 vring_used_write(vq
, &uelem
, idx
);
791 static void virtqueue_packed_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
792 unsigned int len
, unsigned int idx
)
794 vq
->used_elems
[idx
].index
= elem
->index
;
795 vq
->used_elems
[idx
].len
= len
;
796 vq
->used_elems
[idx
].ndescs
= elem
->ndescs
;
799 static void virtqueue_packed_fill_desc(VirtQueue
*vq
,
800 const VirtQueueElement
*elem
,
805 VRingMemoryRegionCaches
*caches
;
806 VRingPackedDesc desc
= {
810 bool wrap_counter
= vq
->used_wrap_counter
;
812 if (unlikely(!vq
->vring
.desc
)) {
816 head
= vq
->used_idx
+ idx
;
817 if (head
>= vq
->vring
.num
) {
818 head
-= vq
->vring
.num
;
822 desc
.flags
|= (1 << VRING_PACKED_DESC_F_AVAIL
);
823 desc
.flags
|= (1 << VRING_PACKED_DESC_F_USED
);
825 desc
.flags
&= ~(1 << VRING_PACKED_DESC_F_AVAIL
);
826 desc
.flags
&= ~(1 << VRING_PACKED_DESC_F_USED
);
829 caches
= vring_get_region_caches(vq
);
834 vring_packed_desc_write(vq
->vdev
, &desc
, &caches
->desc
, head
, strict_order
);
837 /* Called within rcu_read_lock(). */
838 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
839 unsigned int len
, unsigned int idx
)
841 trace_virtqueue_fill(vq
, elem
, len
, idx
);
843 virtqueue_unmap_sg(vq
, elem
, len
);
845 if (virtio_device_disabled(vq
->vdev
)) {
849 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
850 virtqueue_packed_fill(vq
, elem
, len
, idx
);
852 virtqueue_split_fill(vq
, elem
, len
, idx
);
856 /* Called within rcu_read_lock(). */
857 static void virtqueue_split_flush(VirtQueue
*vq
, unsigned int count
)
861 if (unlikely(!vq
->vring
.used
)) {
865 /* Make sure buffer is written before we update index. */
867 trace_virtqueue_flush(vq
, count
);
870 vring_used_idx_set(vq
, new);
872 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
)))
873 vq
->signalled_used_valid
= false;
876 static void virtqueue_packed_flush(VirtQueue
*vq
, unsigned int count
)
878 unsigned int i
, ndescs
= 0;
880 if (unlikely(!vq
->vring
.desc
)) {
884 for (i
= 1; i
< count
; i
++) {
885 virtqueue_packed_fill_desc(vq
, &vq
->used_elems
[i
], i
, false);
886 ndescs
+= vq
->used_elems
[i
].ndescs
;
888 virtqueue_packed_fill_desc(vq
, &vq
->used_elems
[0], 0, true);
889 ndescs
+= vq
->used_elems
[0].ndescs
;
892 vq
->used_idx
+= ndescs
;
893 if (vq
->used_idx
>= vq
->vring
.num
) {
894 vq
->used_idx
-= vq
->vring
.num
;
895 vq
->used_wrap_counter
^= 1;
899 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
)
901 if (virtio_device_disabled(vq
->vdev
)) {
906 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
907 virtqueue_packed_flush(vq
, count
);
909 virtqueue_split_flush(vq
, count
);
913 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
916 RCU_READ_LOCK_GUARD();
917 virtqueue_fill(vq
, elem
, len
, 0);
918 virtqueue_flush(vq
, 1);
921 /* Called within rcu_read_lock(). */
922 static int virtqueue_num_heads(VirtQueue
*vq
, unsigned int idx
)
924 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
926 /* Check it isn't doing very strange things with descriptor numbers. */
927 if (num_heads
> vq
->vring
.num
) {
928 virtio_error(vq
->vdev
, "Guest moved used index from %u to %u",
929 idx
, vq
->shadow_avail_idx
);
932 /* On success, callers read a descriptor at vq->last_avail_idx.
933 * Make sure descriptor read does not bypass avail index read. */
941 /* Called within rcu_read_lock(). */
942 static bool virtqueue_get_head(VirtQueue
*vq
, unsigned int idx
,
945 /* Grab the next descriptor number they're advertising, and increment
946 * the index we've seen. */
947 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
949 /* If their number is silly, that's a fatal mistake. */
950 if (*head
>= vq
->vring
.num
) {
951 virtio_error(vq
->vdev
, "Guest says index %u is available", *head
);
959 VIRTQUEUE_READ_DESC_ERROR
= -1,
960 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
961 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
964 static int virtqueue_split_read_next_desc(VirtIODevice
*vdev
, VRingDesc
*desc
,
965 MemoryRegionCache
*desc_cache
,
966 unsigned int max
, unsigned int *next
)
968 /* If this descriptor says it doesn't chain, we're done. */
969 if (!(desc
->flags
& VRING_DESC_F_NEXT
)) {
970 return VIRTQUEUE_READ_DESC_DONE
;
973 /* Check they're not leading us off end of descriptors. */
975 /* Make sure compiler knows to grab that: we don't want it changing! */
979 virtio_error(vdev
, "Desc next is %u", *next
);
980 return VIRTQUEUE_READ_DESC_ERROR
;
983 vring_split_desc_read(vdev
, desc
, desc_cache
, *next
);
984 return VIRTQUEUE_READ_DESC_MORE
;
987 /* Called within rcu_read_lock(). */
988 static void virtqueue_split_get_avail_bytes(VirtQueue
*vq
,
989 unsigned int *in_bytes
, unsigned int *out_bytes
,
990 unsigned max_in_bytes
, unsigned max_out_bytes
,
991 VRingMemoryRegionCaches
*caches
)
993 VirtIODevice
*vdev
= vq
->vdev
;
994 unsigned int max
, idx
;
995 unsigned int total_bufs
, in_total
, out_total
;
996 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1000 idx
= vq
->last_avail_idx
;
1001 total_bufs
= in_total
= out_total
= 0;
1003 max
= vq
->vring
.num
;
1005 while ((rc
= virtqueue_num_heads(vq
, idx
)) > 0) {
1006 MemoryRegionCache
*desc_cache
= &caches
->desc
;
1007 unsigned int num_bufs
;
1011 num_bufs
= total_bufs
;
1013 if (!virtqueue_get_head(vq
, idx
++, &i
)) {
1017 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1019 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1020 if (!desc
.len
|| (desc
.len
% sizeof(VRingDesc
))) {
1021 virtio_error(vdev
, "Invalid size for indirect buffer table");
1025 /* If we've got too many, that implies a descriptor loop. */
1026 if (num_bufs
>= max
) {
1027 virtio_error(vdev
, "Looped descriptor");
1031 /* loop over the indirect descriptor table */
1032 len
= address_space_cache_init(&indirect_desc_cache
,
1034 desc
.addr
, desc
.len
, false);
1035 desc_cache
= &indirect_desc_cache
;
1036 if (len
< desc
.len
) {
1037 virtio_error(vdev
, "Cannot map indirect buffer");
1041 max
= desc
.len
/ sizeof(VRingDesc
);
1043 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1047 /* If we've got too many, that implies a descriptor loop. */
1048 if (++num_bufs
> max
) {
1049 virtio_error(vdev
, "Looped descriptor");
1053 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1054 in_total
+= desc
.len
;
1056 out_total
+= desc
.len
;
1058 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1062 rc
= virtqueue_split_read_next_desc(vdev
, &desc
, desc_cache
, max
, &i
);
1063 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1065 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1069 if (desc_cache
== &indirect_desc_cache
) {
1070 address_space_cache_destroy(&indirect_desc_cache
);
1073 total_bufs
= num_bufs
;
1082 address_space_cache_destroy(&indirect_desc_cache
);
1084 *in_bytes
= in_total
;
1087 *out_bytes
= out_total
;
1092 in_total
= out_total
= 0;
1096 static int virtqueue_packed_read_next_desc(VirtQueue
*vq
,
1097 VRingPackedDesc
*desc
,
1104 /* If this descriptor says it doesn't chain, we're done. */
1105 if (!indirect
&& !(desc
->flags
& VRING_DESC_F_NEXT
)) {
1106 return VIRTQUEUE_READ_DESC_DONE
;
1112 return VIRTQUEUE_READ_DESC_DONE
;
1114 (*next
) -= vq
->vring
.num
;
1118 vring_packed_desc_read(vq
->vdev
, desc
, desc_cache
, *next
, false);
1119 return VIRTQUEUE_READ_DESC_MORE
;
1122 /* Called within rcu_read_lock(). */
1123 static void virtqueue_packed_get_avail_bytes(VirtQueue
*vq
,
1124 unsigned int *in_bytes
,
1125 unsigned int *out_bytes
,
1126 unsigned max_in_bytes
,
1127 unsigned max_out_bytes
,
1128 VRingMemoryRegionCaches
*caches
)
1130 VirtIODevice
*vdev
= vq
->vdev
;
1131 unsigned int max
, idx
;
1132 unsigned int total_bufs
, in_total
, out_total
;
1133 MemoryRegionCache
*desc_cache
;
1134 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1136 VRingPackedDesc desc
;
1139 idx
= vq
->last_avail_idx
;
1140 wrap_counter
= vq
->last_avail_wrap_counter
;
1141 total_bufs
= in_total
= out_total
= 0;
1143 max
= vq
->vring
.num
;
1146 unsigned int num_bufs
= total_bufs
;
1147 unsigned int i
= idx
;
1150 desc_cache
= &caches
->desc
;
1151 vring_packed_desc_read(vdev
, &desc
, desc_cache
, idx
, true);
1152 if (!is_desc_avail(desc
.flags
, wrap_counter
)) {
1156 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1157 if (desc
.len
% sizeof(VRingPackedDesc
)) {
1158 virtio_error(vdev
, "Invalid size for indirect buffer table");
1162 /* If we've got too many, that implies a descriptor loop. */
1163 if (num_bufs
>= max
) {
1164 virtio_error(vdev
, "Looped descriptor");
1168 /* loop over the indirect descriptor table */
1169 len
= address_space_cache_init(&indirect_desc_cache
,
1171 desc
.addr
, desc
.len
, false);
1172 desc_cache
= &indirect_desc_cache
;
1173 if (len
< desc
.len
) {
1174 virtio_error(vdev
, "Cannot map indirect buffer");
1178 max
= desc
.len
/ sizeof(VRingPackedDesc
);
1180 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, false);
1184 /* If we've got too many, that implies a descriptor loop. */
1185 if (++num_bufs
> max
) {
1186 virtio_error(vdev
, "Looped descriptor");
1190 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1191 in_total
+= desc
.len
;
1193 out_total
+= desc
.len
;
1195 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1199 rc
= virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
, max
,
1201 &indirect_desc_cache
);
1202 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1204 if (desc_cache
== &indirect_desc_cache
) {
1205 address_space_cache_destroy(&indirect_desc_cache
);
1209 idx
+= num_bufs
- total_bufs
;
1210 total_bufs
= num_bufs
;
1213 if (idx
>= vq
->vring
.num
) {
1214 idx
-= vq
->vring
.num
;
1219 /* Record the index and wrap counter for a kick we want */
1220 vq
->shadow_avail_idx
= idx
;
1221 vq
->shadow_avail_wrap_counter
= wrap_counter
;
1223 address_space_cache_destroy(&indirect_desc_cache
);
1225 *in_bytes
= in_total
;
1228 *out_bytes
= out_total
;
1233 in_total
= out_total
= 0;
1237 void virtqueue_get_avail_bytes(VirtQueue
*vq
, unsigned int *in_bytes
,
1238 unsigned int *out_bytes
,
1239 unsigned max_in_bytes
, unsigned max_out_bytes
)
1242 VRingMemoryRegionCaches
*caches
;
1244 RCU_READ_LOCK_GUARD();
1246 if (unlikely(!vq
->vring
.desc
)) {
1250 caches
= vring_get_region_caches(vq
);
1255 desc_size
= virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
) ?
1256 sizeof(VRingPackedDesc
) : sizeof(VRingDesc
);
1257 if (caches
->desc
.len
< vq
->vring
.num
* desc_size
) {
1258 virtio_error(vq
->vdev
, "Cannot map descriptor ring");
1262 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
1263 virtqueue_packed_get_avail_bytes(vq
, in_bytes
, out_bytes
,
1264 max_in_bytes
, max_out_bytes
,
1267 virtqueue_split_get_avail_bytes(vq
, in_bytes
, out_bytes
,
1268 max_in_bytes
, max_out_bytes
,
1282 int virtqueue_avail_bytes(VirtQueue
*vq
, unsigned int in_bytes
,
1283 unsigned int out_bytes
)
1285 unsigned int in_total
, out_total
;
1287 virtqueue_get_avail_bytes(vq
, &in_total
, &out_total
, in_bytes
, out_bytes
);
1288 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
1291 static bool virtqueue_map_desc(VirtIODevice
*vdev
, unsigned int *p_num_sg
,
1292 hwaddr
*addr
, struct iovec
*iov
,
1293 unsigned int max_num_sg
, bool is_write
,
1294 hwaddr pa
, size_t sz
)
1297 unsigned num_sg
= *p_num_sg
;
1298 assert(num_sg
<= max_num_sg
);
1301 virtio_error(vdev
, "virtio: zero sized buffers are not allowed");
1308 if (num_sg
== max_num_sg
) {
1309 virtio_error(vdev
, "virtio: too many write descriptors in "
1314 iov
[num_sg
].iov_base
= dma_memory_map(vdev
->dma_as
, pa
, &len
,
1316 DMA_DIRECTION_FROM_DEVICE
:
1317 DMA_DIRECTION_TO_DEVICE
);
1318 if (!iov
[num_sg
].iov_base
) {
1319 virtio_error(vdev
, "virtio: bogus descriptor or out of resources");
1323 iov
[num_sg
].iov_len
= len
;
1337 /* Only used by error code paths before we have a VirtQueueElement (therefore
1338 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
1341 static void virtqueue_undo_map_desc(unsigned int out_num
, unsigned int in_num
,
1346 for (i
= 0; i
< out_num
+ in_num
; i
++) {
1347 int is_write
= i
>= out_num
;
1349 cpu_physical_memory_unmap(iov
->iov_base
, iov
->iov_len
, is_write
, 0);
1354 static void virtqueue_map_iovec(VirtIODevice
*vdev
, struct iovec
*sg
,
1355 hwaddr
*addr
, unsigned int num_sg
,
1361 for (i
= 0; i
< num_sg
; i
++) {
1362 len
= sg
[i
].iov_len
;
1363 sg
[i
].iov_base
= dma_memory_map(vdev
->dma_as
,
1364 addr
[i
], &len
, is_write
?
1365 DMA_DIRECTION_FROM_DEVICE
:
1366 DMA_DIRECTION_TO_DEVICE
);
1367 if (!sg
[i
].iov_base
) {
1368 error_report("virtio: error trying to map MMIO memory");
1371 if (len
!= sg
[i
].iov_len
) {
1372 error_report("virtio: unexpected memory split");
1378 void virtqueue_map(VirtIODevice
*vdev
, VirtQueueElement
*elem
)
1380 virtqueue_map_iovec(vdev
, elem
->in_sg
, elem
->in_addr
, elem
->in_num
, true);
1381 virtqueue_map_iovec(vdev
, elem
->out_sg
, elem
->out_addr
, elem
->out_num
,
1385 static void *virtqueue_alloc_element(size_t sz
, unsigned out_num
, unsigned in_num
)
1387 VirtQueueElement
*elem
;
1388 size_t in_addr_ofs
= QEMU_ALIGN_UP(sz
, __alignof__(elem
->in_addr
[0]));
1389 size_t out_addr_ofs
= in_addr_ofs
+ in_num
* sizeof(elem
->in_addr
[0]);
1390 size_t out_addr_end
= out_addr_ofs
+ out_num
* sizeof(elem
->out_addr
[0]);
1391 size_t in_sg_ofs
= QEMU_ALIGN_UP(out_addr_end
, __alignof__(elem
->in_sg
[0]));
1392 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
1393 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
1395 assert(sz
>= sizeof(VirtQueueElement
));
1396 elem
= g_malloc(out_sg_end
);
1397 trace_virtqueue_alloc_element(elem
, sz
, in_num
, out_num
);
1398 elem
->out_num
= out_num
;
1399 elem
->in_num
= in_num
;
1400 elem
->in_addr
= (void *)elem
+ in_addr_ofs
;
1401 elem
->out_addr
= (void *)elem
+ out_addr_ofs
;
1402 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
1403 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
1407 static void *virtqueue_split_pop(VirtQueue
*vq
, size_t sz
)
1409 unsigned int i
, head
, max
;
1410 VRingMemoryRegionCaches
*caches
;
1411 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1412 MemoryRegionCache
*desc_cache
;
1414 VirtIODevice
*vdev
= vq
->vdev
;
1415 VirtQueueElement
*elem
= NULL
;
1416 unsigned out_num
, in_num
, elem_entries
;
1417 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
1418 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
1422 RCU_READ_LOCK_GUARD();
1423 if (virtio_queue_empty_rcu(vq
)) {
1426 /* Needed after virtio_queue_empty(), see comment in
1427 * virtqueue_num_heads(). */
1430 /* When we start there are none of either input nor output. */
1431 out_num
= in_num
= elem_entries
= 0;
1433 max
= vq
->vring
.num
;
1435 if (vq
->inuse
>= vq
->vring
.num
) {
1436 virtio_error(vdev
, "Virtqueue size exceeded");
1440 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
++, &head
)) {
1444 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
1445 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1450 caches
= vring_get_region_caches(vq
);
1452 virtio_error(vdev
, "Region caches not initialized");
1456 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
1457 virtio_error(vdev
, "Cannot map descriptor ring");
1461 desc_cache
= &caches
->desc
;
1462 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1463 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1464 if (!desc
.len
|| (desc
.len
% sizeof(VRingDesc
))) {
1465 virtio_error(vdev
, "Invalid size for indirect buffer table");
1469 /* loop over the indirect descriptor table */
1470 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
1471 desc
.addr
, desc
.len
, false);
1472 desc_cache
= &indirect_desc_cache
;
1473 if (len
< desc
.len
) {
1474 virtio_error(vdev
, "Cannot map indirect buffer");
1478 max
= desc
.len
/ sizeof(VRingDesc
);
1480 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1483 /* Collect all the descriptors */
1487 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1488 map_ok
= virtqueue_map_desc(vdev
, &in_num
, addr
+ out_num
,
1490 VIRTQUEUE_MAX_SIZE
- out_num
, true,
1491 desc
.addr
, desc
.len
);
1494 virtio_error(vdev
, "Incorrect order for descriptors");
1497 map_ok
= virtqueue_map_desc(vdev
, &out_num
, addr
, iov
,
1498 VIRTQUEUE_MAX_SIZE
, false,
1499 desc
.addr
, desc
.len
);
1505 /* If we've got too many, that implies a descriptor loop. */
1506 if (++elem_entries
> max
) {
1507 virtio_error(vdev
, "Looped descriptor");
1511 rc
= virtqueue_split_read_next_desc(vdev
, &desc
, desc_cache
, max
, &i
);
1512 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1514 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1518 /* Now copy what we have collected and mapped */
1519 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
1522 for (i
= 0; i
< out_num
; i
++) {
1523 elem
->out_addr
[i
] = addr
[i
];
1524 elem
->out_sg
[i
] = iov
[i
];
1526 for (i
= 0; i
< in_num
; i
++) {
1527 elem
->in_addr
[i
] = addr
[out_num
+ i
];
1528 elem
->in_sg
[i
] = iov
[out_num
+ i
];
1533 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
1535 address_space_cache_destroy(&indirect_desc_cache
);
1540 virtqueue_undo_map_desc(out_num
, in_num
, iov
);
1544 static void *virtqueue_packed_pop(VirtQueue
*vq
, size_t sz
)
1546 unsigned int i
, max
;
1547 VRingMemoryRegionCaches
*caches
;
1548 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
1549 MemoryRegionCache
*desc_cache
;
1551 VirtIODevice
*vdev
= vq
->vdev
;
1552 VirtQueueElement
*elem
= NULL
;
1553 unsigned out_num
, in_num
, elem_entries
;
1554 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
1555 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
1556 VRingPackedDesc desc
;
1560 RCU_READ_LOCK_GUARD();
1561 if (virtio_queue_packed_empty_rcu(vq
)) {
1565 /* When we start there are none of either input nor output. */
1566 out_num
= in_num
= elem_entries
= 0;
1568 max
= vq
->vring
.num
;
1570 if (vq
->inuse
>= vq
->vring
.num
) {
1571 virtio_error(vdev
, "Virtqueue size exceeded");
1575 i
= vq
->last_avail_idx
;
1577 caches
= vring_get_region_caches(vq
);
1579 virtio_error(vdev
, "Region caches not initialized");
1583 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
1584 virtio_error(vdev
, "Cannot map descriptor ring");
1588 desc_cache
= &caches
->desc
;
1589 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, true);
1591 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1592 if (desc
.len
% sizeof(VRingPackedDesc
)) {
1593 virtio_error(vdev
, "Invalid size for indirect buffer table");
1597 /* loop over the indirect descriptor table */
1598 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
1599 desc
.addr
, desc
.len
, false);
1600 desc_cache
= &indirect_desc_cache
;
1601 if (len
< desc
.len
) {
1602 virtio_error(vdev
, "Cannot map indirect buffer");
1606 max
= desc
.len
/ sizeof(VRingPackedDesc
);
1608 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, false);
1611 /* Collect all the descriptors */
1615 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1616 map_ok
= virtqueue_map_desc(vdev
, &in_num
, addr
+ out_num
,
1618 VIRTQUEUE_MAX_SIZE
- out_num
, true,
1619 desc
.addr
, desc
.len
);
1622 virtio_error(vdev
, "Incorrect order for descriptors");
1625 map_ok
= virtqueue_map_desc(vdev
, &out_num
, addr
, iov
,
1626 VIRTQUEUE_MAX_SIZE
, false,
1627 desc
.addr
, desc
.len
);
1633 /* If we've got too many, that implies a descriptor loop. */
1634 if (++elem_entries
> max
) {
1635 virtio_error(vdev
, "Looped descriptor");
1639 rc
= virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
, max
, &i
,
1641 &indirect_desc_cache
);
1642 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1644 /* Now copy what we have collected and mapped */
1645 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
1646 for (i
= 0; i
< out_num
; i
++) {
1647 elem
->out_addr
[i
] = addr
[i
];
1648 elem
->out_sg
[i
] = iov
[i
];
1650 for (i
= 0; i
< in_num
; i
++) {
1651 elem
->in_addr
[i
] = addr
[out_num
+ i
];
1652 elem
->in_sg
[i
] = iov
[out_num
+ i
];
1656 elem
->ndescs
= (desc_cache
== &indirect_desc_cache
) ? 1 : elem_entries
;
1657 vq
->last_avail_idx
+= elem
->ndescs
;
1658 vq
->inuse
+= elem
->ndescs
;
1660 if (vq
->last_avail_idx
>= vq
->vring
.num
) {
1661 vq
->last_avail_idx
-= vq
->vring
.num
;
1662 vq
->last_avail_wrap_counter
^= 1;
1665 vq
->shadow_avail_idx
= vq
->last_avail_idx
;
1666 vq
->shadow_avail_wrap_counter
= vq
->last_avail_wrap_counter
;
1668 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
1670 address_space_cache_destroy(&indirect_desc_cache
);
1675 virtqueue_undo_map_desc(out_num
, in_num
, iov
);
1679 void *virtqueue_pop(VirtQueue
*vq
, size_t sz
)
1681 if (virtio_device_disabled(vq
->vdev
)) {
1685 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
1686 return virtqueue_packed_pop(vq
, sz
);
1688 return virtqueue_split_pop(vq
, sz
);
1692 static unsigned int virtqueue_packed_drop_all(VirtQueue
*vq
)
1694 VRingMemoryRegionCaches
*caches
;
1695 MemoryRegionCache
*desc_cache
;
1696 unsigned int dropped
= 0;
1697 VirtQueueElement elem
= {};
1698 VirtIODevice
*vdev
= vq
->vdev
;
1699 VRingPackedDesc desc
;
1701 RCU_READ_LOCK_GUARD();
1703 caches
= vring_get_region_caches(vq
);
1708 desc_cache
= &caches
->desc
;
1710 virtio_queue_set_notification(vq
, 0);
1712 while (vq
->inuse
< vq
->vring
.num
) {
1713 unsigned int idx
= vq
->last_avail_idx
;
1715 * works similar to virtqueue_pop but does not map buffers
1716 * and does not allocate any memory.
1718 vring_packed_desc_read(vdev
, &desc
, desc_cache
,
1719 vq
->last_avail_idx
, true);
1720 if (!is_desc_avail(desc
.flags
, vq
->last_avail_wrap_counter
)) {
1723 elem
.index
= desc
.id
;
1725 while (virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
,
1726 vq
->vring
.num
, &idx
, false)) {
1730 * immediately push the element, nothing to unmap
1731 * as both in_num and out_num are set to 0.
1733 virtqueue_push(vq
, &elem
, 0);
1735 vq
->last_avail_idx
+= elem
.ndescs
;
1736 if (vq
->last_avail_idx
>= vq
->vring
.num
) {
1737 vq
->last_avail_idx
-= vq
->vring
.num
;
1738 vq
->last_avail_wrap_counter
^= 1;
1745 static unsigned int virtqueue_split_drop_all(VirtQueue
*vq
)
1747 unsigned int dropped
= 0;
1748 VirtQueueElement elem
= {};
1749 VirtIODevice
*vdev
= vq
->vdev
;
1750 bool fEventIdx
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
1752 while (!virtio_queue_empty(vq
) && vq
->inuse
< vq
->vring
.num
) {
1753 /* works similar to virtqueue_pop but does not map buffers
1754 * and does not allocate any memory */
1756 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
, &elem
.index
)) {
1760 vq
->last_avail_idx
++;
1762 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1764 /* immediately push the element, nothing to unmap
1765 * as both in_num and out_num are set to 0 */
1766 virtqueue_push(vq
, &elem
, 0);
1773 /* virtqueue_drop_all:
1774 * @vq: The #VirtQueue
1775 * Drops all queued buffers and indicates them to the guest
1776 * as if they are done. Useful when buffers can not be
1777 * processed but must be returned to the guest.
1779 unsigned int virtqueue_drop_all(VirtQueue
*vq
)
1781 struct VirtIODevice
*vdev
= vq
->vdev
;
1783 if (virtio_device_disabled(vq
->vdev
)) {
1787 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1788 return virtqueue_packed_drop_all(vq
);
1790 return virtqueue_split_drop_all(vq
);
1794 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1795 * it is what QEMU has always done by mistake. We can change it sooner
1796 * or later by bumping the version number of the affected vm states.
1797 * In the meanwhile, since the in-memory layout of VirtQueueElement
1798 * has changed, we need to marshal to and from the layout that was
1799 * used before the change.
1801 typedef struct VirtQueueElementOld
{
1803 unsigned int out_num
;
1804 unsigned int in_num
;
1805 hwaddr in_addr
[VIRTQUEUE_MAX_SIZE
];
1806 hwaddr out_addr
[VIRTQUEUE_MAX_SIZE
];
1807 struct iovec in_sg
[VIRTQUEUE_MAX_SIZE
];
1808 struct iovec out_sg
[VIRTQUEUE_MAX_SIZE
];
1809 } VirtQueueElementOld
;
1811 void *qemu_get_virtqueue_element(VirtIODevice
*vdev
, QEMUFile
*f
, size_t sz
)
1813 VirtQueueElement
*elem
;
1814 VirtQueueElementOld data
;
1817 qemu_get_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1819 /* TODO: teach all callers that this can fail, and return failure instead
1820 * of asserting here.
1821 * This is just one thing (there are probably more) that must be
1822 * fixed before we can allow NDEBUG compilation.
1824 assert(ARRAY_SIZE(data
.in_addr
) >= data
.in_num
);
1825 assert(ARRAY_SIZE(data
.out_addr
) >= data
.out_num
);
1827 elem
= virtqueue_alloc_element(sz
, data
.out_num
, data
.in_num
);
1828 elem
->index
= data
.index
;
1830 for (i
= 0; i
< elem
->in_num
; i
++) {
1831 elem
->in_addr
[i
] = data
.in_addr
[i
];
1834 for (i
= 0; i
< elem
->out_num
; i
++) {
1835 elem
->out_addr
[i
] = data
.out_addr
[i
];
1838 for (i
= 0; i
< elem
->in_num
; i
++) {
1839 /* Base is overwritten by virtqueue_map. */
1840 elem
->in_sg
[i
].iov_base
= 0;
1841 elem
->in_sg
[i
].iov_len
= data
.in_sg
[i
].iov_len
;
1844 for (i
= 0; i
< elem
->out_num
; i
++) {
1845 /* Base is overwritten by virtqueue_map. */
1846 elem
->out_sg
[i
].iov_base
= 0;
1847 elem
->out_sg
[i
].iov_len
= data
.out_sg
[i
].iov_len
;
1850 if (virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1851 qemu_get_be32s(f
, &elem
->ndescs
);
1854 virtqueue_map(vdev
, elem
);
1858 void qemu_put_virtqueue_element(VirtIODevice
*vdev
, QEMUFile
*f
,
1859 VirtQueueElement
*elem
)
1861 VirtQueueElementOld data
;
1864 memset(&data
, 0, sizeof(data
));
1865 data
.index
= elem
->index
;
1866 data
.in_num
= elem
->in_num
;
1867 data
.out_num
= elem
->out_num
;
1869 for (i
= 0; i
< elem
->in_num
; i
++) {
1870 data
.in_addr
[i
] = elem
->in_addr
[i
];
1873 for (i
= 0; i
< elem
->out_num
; i
++) {
1874 data
.out_addr
[i
] = elem
->out_addr
[i
];
1877 for (i
= 0; i
< elem
->in_num
; i
++) {
1878 /* Base is overwritten by virtqueue_map when loading. Do not
1879 * save it, as it would leak the QEMU address space layout. */
1880 data
.in_sg
[i
].iov_len
= elem
->in_sg
[i
].iov_len
;
1883 for (i
= 0; i
< elem
->out_num
; i
++) {
1884 /* Do not save iov_base as above. */
1885 data
.out_sg
[i
].iov_len
= elem
->out_sg
[i
].iov_len
;
1888 if (virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1889 qemu_put_be32s(f
, &elem
->ndescs
);
1892 qemu_put_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1896 static void virtio_notify_vector(VirtIODevice
*vdev
, uint16_t vector
)
1898 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1899 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1901 if (virtio_device_disabled(vdev
)) {
1906 k
->notify(qbus
->parent
, vector
);
1910 void virtio_update_irq(VirtIODevice
*vdev
)
1912 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
1915 static int virtio_validate_features(VirtIODevice
*vdev
)
1917 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1919 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
) &&
1920 !virtio_vdev_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
1924 if (k
->validate_features
) {
1925 return k
->validate_features(vdev
);
1931 int virtio_set_status(VirtIODevice
*vdev
, uint8_t val
)
1933 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1934 trace_virtio_set_status(vdev
, val
);
1936 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1937 if (!(vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) &&
1938 val
& VIRTIO_CONFIG_S_FEATURES_OK
) {
1939 int ret
= virtio_validate_features(vdev
);
1947 if ((vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
) !=
1948 (val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1949 virtio_set_started(vdev
, val
& VIRTIO_CONFIG_S_DRIVER_OK
);
1952 if (k
->set_status
) {
1953 k
->set_status(vdev
, val
);
1960 static enum virtio_device_endian
virtio_default_endian(void)
1962 if (target_words_bigendian()) {
1963 return VIRTIO_DEVICE_ENDIAN_BIG
;
1965 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
1969 static enum virtio_device_endian
virtio_current_cpu_endian(void)
1971 if (cpu_virtio_is_big_endian(current_cpu
)) {
1972 return VIRTIO_DEVICE_ENDIAN_BIG
;
1974 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
1978 void virtio_reset(void *opaque
)
1980 VirtIODevice
*vdev
= opaque
;
1981 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1984 virtio_set_status(vdev
, 0);
1986 /* Guest initiated reset */
1987 vdev
->device_endian
= virtio_current_cpu_endian();
1990 vdev
->device_endian
= virtio_default_endian();
1997 vdev
->start_on_kick
= false;
1998 vdev
->started
= false;
1999 vdev
->broken
= false;
2000 vdev
->guest_features
= 0;
2001 vdev
->queue_sel
= 0;
2003 vdev
->disabled
= false;
2004 qatomic_set(&vdev
->isr
, 0);
2005 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
2006 virtio_notify_vector(vdev
, vdev
->config_vector
);
2008 for(i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2009 vdev
->vq
[i
].vring
.desc
= 0;
2010 vdev
->vq
[i
].vring
.avail
= 0;
2011 vdev
->vq
[i
].vring
.used
= 0;
2012 vdev
->vq
[i
].last_avail_idx
= 0;
2013 vdev
->vq
[i
].shadow_avail_idx
= 0;
2014 vdev
->vq
[i
].used_idx
= 0;
2015 vdev
->vq
[i
].last_avail_wrap_counter
= true;
2016 vdev
->vq
[i
].shadow_avail_wrap_counter
= true;
2017 vdev
->vq
[i
].used_wrap_counter
= true;
2018 virtio_queue_set_vector(vdev
, i
, VIRTIO_NO_VECTOR
);
2019 vdev
->vq
[i
].signalled_used
= 0;
2020 vdev
->vq
[i
].signalled_used_valid
= false;
2021 vdev
->vq
[i
].notification
= true;
2022 vdev
->vq
[i
].vring
.num
= vdev
->vq
[i
].vring
.num_default
;
2023 vdev
->vq
[i
].inuse
= 0;
2024 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
2028 uint32_t virtio_config_readb(VirtIODevice
*vdev
, uint32_t addr
)
2030 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2033 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2034 return (uint32_t)-1;
2037 k
->get_config(vdev
, vdev
->config
);
2039 val
= ldub_p(vdev
->config
+ addr
);
2043 uint32_t virtio_config_readw(VirtIODevice
*vdev
, uint32_t addr
)
2045 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2048 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2049 return (uint32_t)-1;
2052 k
->get_config(vdev
, vdev
->config
);
2054 val
= lduw_p(vdev
->config
+ addr
);
2058 uint32_t virtio_config_readl(VirtIODevice
*vdev
, uint32_t addr
)
2060 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2063 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2064 return (uint32_t)-1;
2067 k
->get_config(vdev
, vdev
->config
);
2069 val
= ldl_p(vdev
->config
+ addr
);
2073 void virtio_config_writeb(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
2075 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2078 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2082 stb_p(vdev
->config
+ addr
, val
);
2084 if (k
->set_config
) {
2085 k
->set_config(vdev
, vdev
->config
);
2089 void virtio_config_writew(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
2091 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2092 uint16_t val
= data
;
2094 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2098 stw_p(vdev
->config
+ addr
, val
);
2100 if (k
->set_config
) {
2101 k
->set_config(vdev
, vdev
->config
);
2105 void virtio_config_writel(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
2107 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2108 uint32_t val
= data
;
2110 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2114 stl_p(vdev
->config
+ addr
, val
);
2116 if (k
->set_config
) {
2117 k
->set_config(vdev
, vdev
->config
);
2121 uint32_t virtio_config_modern_readb(VirtIODevice
*vdev
, uint32_t addr
)
2123 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2126 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2127 return (uint32_t)-1;
2130 k
->get_config(vdev
, vdev
->config
);
2132 val
= ldub_p(vdev
->config
+ addr
);
2136 uint32_t virtio_config_modern_readw(VirtIODevice
*vdev
, uint32_t addr
)
2138 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2141 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2142 return (uint32_t)-1;
2145 k
->get_config(vdev
, vdev
->config
);
2147 val
= lduw_le_p(vdev
->config
+ addr
);
2151 uint32_t virtio_config_modern_readl(VirtIODevice
*vdev
, uint32_t addr
)
2153 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2156 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2157 return (uint32_t)-1;
2160 k
->get_config(vdev
, vdev
->config
);
2162 val
= ldl_le_p(vdev
->config
+ addr
);
2166 void virtio_config_modern_writeb(VirtIODevice
*vdev
,
2167 uint32_t addr
, uint32_t data
)
2169 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2172 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2176 stb_p(vdev
->config
+ addr
, val
);
2178 if (k
->set_config
) {
2179 k
->set_config(vdev
, vdev
->config
);
2183 void virtio_config_modern_writew(VirtIODevice
*vdev
,
2184 uint32_t addr
, uint32_t data
)
2186 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2187 uint16_t val
= data
;
2189 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2193 stw_le_p(vdev
->config
+ addr
, val
);
2195 if (k
->set_config
) {
2196 k
->set_config(vdev
, vdev
->config
);
2200 void virtio_config_modern_writel(VirtIODevice
*vdev
,
2201 uint32_t addr
, uint32_t data
)
2203 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2204 uint32_t val
= data
;
2206 if (addr
+ sizeof(val
) > vdev
->config_len
) {
2210 stl_le_p(vdev
->config
+ addr
, val
);
2212 if (k
->set_config
) {
2213 k
->set_config(vdev
, vdev
->config
);
2217 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, hwaddr addr
)
2219 if (!vdev
->vq
[n
].vring
.num
) {
2222 vdev
->vq
[n
].vring
.desc
= addr
;
2223 virtio_queue_update_rings(vdev
, n
);
2226 hwaddr
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
)
2228 return vdev
->vq
[n
].vring
.desc
;
2231 void virtio_queue_set_rings(VirtIODevice
*vdev
, int n
, hwaddr desc
,
2232 hwaddr avail
, hwaddr used
)
2234 if (!vdev
->vq
[n
].vring
.num
) {
2237 vdev
->vq
[n
].vring
.desc
= desc
;
2238 vdev
->vq
[n
].vring
.avail
= avail
;
2239 vdev
->vq
[n
].vring
.used
= used
;
2240 virtio_init_region_cache(vdev
, n
);
2243 void virtio_queue_set_num(VirtIODevice
*vdev
, int n
, int num
)
2245 /* Don't allow guest to flip queue between existent and
2246 * nonexistent states, or to set it to an invalid size.
2248 if (!!num
!= !!vdev
->vq
[n
].vring
.num
||
2249 num
> VIRTQUEUE_MAX_SIZE
||
2253 vdev
->vq
[n
].vring
.num
= num
;
2256 VirtQueue
*virtio_vector_first_queue(VirtIODevice
*vdev
, uint16_t vector
)
2258 return QLIST_FIRST(&vdev
->vector_queues
[vector
]);
2261 VirtQueue
*virtio_vector_next_queue(VirtQueue
*vq
)
2263 return QLIST_NEXT(vq
, node
);
2266 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
)
2268 return vdev
->vq
[n
].vring
.num
;
2271 int virtio_queue_get_max_num(VirtIODevice
*vdev
, int n
)
2273 return vdev
->vq
[n
].vring
.num_default
;
2276 int virtio_get_num_queues(VirtIODevice
*vdev
)
2280 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2281 if (!virtio_queue_get_num(vdev
, i
)) {
2289 void virtio_queue_set_align(VirtIODevice
*vdev
, int n
, int align
)
2291 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2292 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2294 /* virtio-1 compliant devices cannot change the alignment */
2295 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2296 error_report("tried to modify queue alignment for virtio-1 device");
2299 /* Check that the transport told us it was going to do this
2300 * (so a buggy transport will immediately assert rather than
2301 * silently failing to migrate this state)
2303 assert(k
->has_variable_vring_alignment
);
2306 vdev
->vq
[n
].vring
.align
= align
;
2307 virtio_queue_update_rings(vdev
, n
);
2311 static bool virtio_queue_notify_aio_vq(VirtQueue
*vq
)
2315 if (vq
->vring
.desc
&& vq
->handle_aio_output
) {
2316 VirtIODevice
*vdev
= vq
->vdev
;
2318 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
2319 ret
= vq
->handle_aio_output(vdev
, vq
);
2321 if (unlikely(vdev
->start_on_kick
)) {
2322 virtio_set_started(vdev
, true);
2329 static void virtio_queue_notify_vq(VirtQueue
*vq
)
2331 if (vq
->vring
.desc
&& vq
->handle_output
) {
2332 VirtIODevice
*vdev
= vq
->vdev
;
2334 if (unlikely(vdev
->broken
)) {
2338 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
2339 vq
->handle_output(vdev
, vq
);
2341 if (unlikely(vdev
->start_on_kick
)) {
2342 virtio_set_started(vdev
, true);
2347 void virtio_queue_notify(VirtIODevice
*vdev
, int n
)
2349 VirtQueue
*vq
= &vdev
->vq
[n
];
2351 if (unlikely(!vq
->vring
.desc
|| vdev
->broken
)) {
2355 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
2356 if (vq
->host_notifier_enabled
) {
2357 event_notifier_set(&vq
->host_notifier
);
2358 } else if (vq
->handle_output
) {
2359 vq
->handle_output(vdev
, vq
);
2361 if (unlikely(vdev
->start_on_kick
)) {
2362 virtio_set_started(vdev
, true);
2367 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
)
2369 return n
< VIRTIO_QUEUE_MAX
? vdev
->vq
[n
].vector
:
2373 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
)
2375 VirtQueue
*vq
= &vdev
->vq
[n
];
2377 if (n
< VIRTIO_QUEUE_MAX
) {
2378 if (vdev
->vector_queues
&&
2379 vdev
->vq
[n
].vector
!= VIRTIO_NO_VECTOR
) {
2380 QLIST_REMOVE(vq
, node
);
2382 vdev
->vq
[n
].vector
= vector
;
2383 if (vdev
->vector_queues
&&
2384 vector
!= VIRTIO_NO_VECTOR
) {
2385 QLIST_INSERT_HEAD(&vdev
->vector_queues
[vector
], vq
, node
);
2390 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
2391 VirtIOHandleOutput handle_output
)
2395 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2396 if (vdev
->vq
[i
].vring
.num
== 0)
2400 if (i
== VIRTIO_QUEUE_MAX
|| queue_size
> VIRTQUEUE_MAX_SIZE
)
2403 vdev
->vq
[i
].vring
.num
= queue_size
;
2404 vdev
->vq
[i
].vring
.num_default
= queue_size
;
2405 vdev
->vq
[i
].vring
.align
= VIRTIO_PCI_VRING_ALIGN
;
2406 vdev
->vq
[i
].handle_output
= handle_output
;
2407 vdev
->vq
[i
].handle_aio_output
= NULL
;
2408 vdev
->vq
[i
].used_elems
= g_malloc0(sizeof(VirtQueueElement
) *
2411 return &vdev
->vq
[i
];
2414 void virtio_delete_queue(VirtQueue
*vq
)
2417 vq
->vring
.num_default
= 0;
2418 vq
->handle_output
= NULL
;
2419 vq
->handle_aio_output
= NULL
;
2420 g_free(vq
->used_elems
);
2421 vq
->used_elems
= NULL
;
2422 virtio_virtqueue_reset_region_cache(vq
);
2425 void virtio_del_queue(VirtIODevice
*vdev
, int n
)
2427 if (n
< 0 || n
>= VIRTIO_QUEUE_MAX
) {
2431 virtio_delete_queue(&vdev
->vq
[n
]);
2434 static void virtio_set_isr(VirtIODevice
*vdev
, int value
)
2436 uint8_t old
= qatomic_read(&vdev
->isr
);
2438 /* Do not write ISR if it does not change, so that its cacheline remains
2439 * shared in the common case where the guest does not read it.
2441 if ((old
& value
) != value
) {
2442 qatomic_or(&vdev
->isr
, value
);
2446 /* Called within rcu_read_lock(). */
2447 static bool virtio_split_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2451 /* We need to expose used array entries before checking used event. */
2453 /* Always notify when queue is empty (when feature acknowledge) */
2454 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2455 !vq
->inuse
&& virtio_queue_empty(vq
)) {
2459 if (!virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
2460 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
2463 v
= vq
->signalled_used_valid
;
2464 vq
->signalled_used_valid
= true;
2465 old
= vq
->signalled_used
;
2466 new = vq
->signalled_used
= vq
->used_idx
;
2467 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
2470 static bool vring_packed_need_event(VirtQueue
*vq
, bool wrap
,
2471 uint16_t off_wrap
, uint16_t new,
2474 int off
= off_wrap
& ~(1 << 15);
2476 if (wrap
!= off_wrap
>> 15) {
2477 off
-= vq
->vring
.num
;
2480 return vring_need_event(off
, new, old
);
2483 /* Called within rcu_read_lock(). */
2484 static bool virtio_packed_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2486 VRingPackedDescEvent e
;
2489 VRingMemoryRegionCaches
*caches
;
2491 caches
= vring_get_region_caches(vq
);
2496 vring_packed_event_read(vdev
, &caches
->avail
, &e
);
2498 old
= vq
->signalled_used
;
2499 new = vq
->signalled_used
= vq
->used_idx
;
2500 v
= vq
->signalled_used_valid
;
2501 vq
->signalled_used_valid
= true;
2503 if (e
.flags
== VRING_PACKED_EVENT_FLAG_DISABLE
) {
2505 } else if (e
.flags
== VRING_PACKED_EVENT_FLAG_ENABLE
) {
2509 return !v
|| vring_packed_need_event(vq
, vq
->used_wrap_counter
,
2510 e
.off_wrap
, new, old
);
2513 /* Called within rcu_read_lock(). */
2514 static bool virtio_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2516 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
2517 return virtio_packed_should_notify(vdev
, vq
);
2519 return virtio_split_should_notify(vdev
, vq
);
2523 void virtio_notify_irqfd(VirtIODevice
*vdev
, VirtQueue
*vq
)
2525 WITH_RCU_READ_LOCK_GUARD() {
2526 if (!virtio_should_notify(vdev
, vq
)) {
2531 trace_virtio_notify_irqfd(vdev
, vq
);
2534 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2535 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2536 * incorrectly polling this bit during crashdump and hibernation
2537 * in MSI mode, causing a hang if this bit is never updated.
2538 * Recent releases of Windows do not really shut down, but rather
2539 * log out and hibernate to make the next startup faster. Hence,
2540 * this manifested as a more serious hang during shutdown with
2542 * Next driver release from 2016 fixed this problem, so working around it
2543 * is not a must, but it's easy to do so let's do it here.
2545 * Note: it's safe to update ISR from any thread as it was switched
2546 * to an atomic operation.
2548 virtio_set_isr(vq
->vdev
, 0x1);
2549 event_notifier_set(&vq
->guest_notifier
);
2552 static void virtio_irq(VirtQueue
*vq
)
2554 virtio_set_isr(vq
->vdev
, 0x1);
2555 virtio_notify_vector(vq
->vdev
, vq
->vector
);
2558 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2560 WITH_RCU_READ_LOCK_GUARD() {
2561 if (!virtio_should_notify(vdev
, vq
)) {
2566 trace_virtio_notify(vdev
, vq
);
2570 void virtio_notify_config(VirtIODevice
*vdev
)
2572 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))
2575 virtio_set_isr(vdev
, 0x3);
2577 virtio_notify_vector(vdev
, vdev
->config_vector
);
2580 static bool virtio_device_endian_needed(void *opaque
)
2582 VirtIODevice
*vdev
= opaque
;
2584 assert(vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_UNKNOWN
);
2585 if (!virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2586 return vdev
->device_endian
!= virtio_default_endian();
2588 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2589 return vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_LITTLE
;
2592 static bool virtio_64bit_features_needed(void *opaque
)
2594 VirtIODevice
*vdev
= opaque
;
2596 return (vdev
->host_features
>> 32) != 0;
2599 static bool virtio_virtqueue_needed(void *opaque
)
2601 VirtIODevice
*vdev
= opaque
;
2603 return virtio_host_has_feature(vdev
, VIRTIO_F_VERSION_1
);
2606 static bool virtio_packed_virtqueue_needed(void *opaque
)
2608 VirtIODevice
*vdev
= opaque
;
2610 return virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
);
2613 static bool virtio_ringsize_needed(void *opaque
)
2615 VirtIODevice
*vdev
= opaque
;
2618 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2619 if (vdev
->vq
[i
].vring
.num
!= vdev
->vq
[i
].vring
.num_default
) {
2626 static bool virtio_extra_state_needed(void *opaque
)
2628 VirtIODevice
*vdev
= opaque
;
2629 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2630 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2632 return k
->has_extra_state
&&
2633 k
->has_extra_state(qbus
->parent
);
2636 static bool virtio_broken_needed(void *opaque
)
2638 VirtIODevice
*vdev
= opaque
;
2640 return vdev
->broken
;
2643 static bool virtio_started_needed(void *opaque
)
2645 VirtIODevice
*vdev
= opaque
;
2647 return vdev
->started
;
2650 static bool virtio_disabled_needed(void *opaque
)
2652 VirtIODevice
*vdev
= opaque
;
2654 return vdev
->disabled
;
2657 static const VMStateDescription vmstate_virtqueue
= {
2658 .name
= "virtqueue_state",
2660 .minimum_version_id
= 1,
2661 .fields
= (VMStateField
[]) {
2662 VMSTATE_UINT64(vring
.avail
, struct VirtQueue
),
2663 VMSTATE_UINT64(vring
.used
, struct VirtQueue
),
2664 VMSTATE_END_OF_LIST()
2668 static const VMStateDescription vmstate_packed_virtqueue
= {
2669 .name
= "packed_virtqueue_state",
2671 .minimum_version_id
= 1,
2672 .fields
= (VMStateField
[]) {
2673 VMSTATE_UINT16(last_avail_idx
, struct VirtQueue
),
2674 VMSTATE_BOOL(last_avail_wrap_counter
, struct VirtQueue
),
2675 VMSTATE_UINT16(used_idx
, struct VirtQueue
),
2676 VMSTATE_BOOL(used_wrap_counter
, struct VirtQueue
),
2677 VMSTATE_UINT32(inuse
, struct VirtQueue
),
2678 VMSTATE_END_OF_LIST()
2682 static const VMStateDescription vmstate_virtio_virtqueues
= {
2683 .name
= "virtio/virtqueues",
2685 .minimum_version_id
= 1,
2686 .needed
= &virtio_virtqueue_needed
,
2687 .fields
= (VMStateField
[]) {
2688 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2689 VIRTIO_QUEUE_MAX
, 0, vmstate_virtqueue
, VirtQueue
),
2690 VMSTATE_END_OF_LIST()
2694 static const VMStateDescription vmstate_virtio_packed_virtqueues
= {
2695 .name
= "virtio/packed_virtqueues",
2697 .minimum_version_id
= 1,
2698 .needed
= &virtio_packed_virtqueue_needed
,
2699 .fields
= (VMStateField
[]) {
2700 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2701 VIRTIO_QUEUE_MAX
, 0, vmstate_packed_virtqueue
, VirtQueue
),
2702 VMSTATE_END_OF_LIST()
2706 static const VMStateDescription vmstate_ringsize
= {
2707 .name
= "ringsize_state",
2709 .minimum_version_id
= 1,
2710 .fields
= (VMStateField
[]) {
2711 VMSTATE_UINT32(vring
.num_default
, struct VirtQueue
),
2712 VMSTATE_END_OF_LIST()
2716 static const VMStateDescription vmstate_virtio_ringsize
= {
2717 .name
= "virtio/ringsize",
2719 .minimum_version_id
= 1,
2720 .needed
= &virtio_ringsize_needed
,
2721 .fields
= (VMStateField
[]) {
2722 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2723 VIRTIO_QUEUE_MAX
, 0, vmstate_ringsize
, VirtQueue
),
2724 VMSTATE_END_OF_LIST()
2728 static int get_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
2729 const VMStateField
*field
)
2731 VirtIODevice
*vdev
= pv
;
2732 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2733 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2735 if (!k
->load_extra_state
) {
2738 return k
->load_extra_state(qbus
->parent
, f
);
2742 static int put_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
2743 const VMStateField
*field
, JSONWriter
*vmdesc
)
2745 VirtIODevice
*vdev
= pv
;
2746 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2747 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2749 k
->save_extra_state(qbus
->parent
, f
);
2753 static const VMStateInfo vmstate_info_extra_state
= {
2754 .name
= "virtqueue_extra_state",
2755 .get
= get_extra_state
,
2756 .put
= put_extra_state
,
2759 static const VMStateDescription vmstate_virtio_extra_state
= {
2760 .name
= "virtio/extra_state",
2762 .minimum_version_id
= 1,
2763 .needed
= &virtio_extra_state_needed
,
2764 .fields
= (VMStateField
[]) {
2766 .name
= "extra_state",
2768 .field_exists
= NULL
,
2770 .info
= &vmstate_info_extra_state
,
2771 .flags
= VMS_SINGLE
,
2774 VMSTATE_END_OF_LIST()
2778 static const VMStateDescription vmstate_virtio_device_endian
= {
2779 .name
= "virtio/device_endian",
2781 .minimum_version_id
= 1,
2782 .needed
= &virtio_device_endian_needed
,
2783 .fields
= (VMStateField
[]) {
2784 VMSTATE_UINT8(device_endian
, VirtIODevice
),
2785 VMSTATE_END_OF_LIST()
2789 static const VMStateDescription vmstate_virtio_64bit_features
= {
2790 .name
= "virtio/64bit_features",
2792 .minimum_version_id
= 1,
2793 .needed
= &virtio_64bit_features_needed
,
2794 .fields
= (VMStateField
[]) {
2795 VMSTATE_UINT64(guest_features
, VirtIODevice
),
2796 VMSTATE_END_OF_LIST()
2800 static const VMStateDescription vmstate_virtio_broken
= {
2801 .name
= "virtio/broken",
2803 .minimum_version_id
= 1,
2804 .needed
= &virtio_broken_needed
,
2805 .fields
= (VMStateField
[]) {
2806 VMSTATE_BOOL(broken
, VirtIODevice
),
2807 VMSTATE_END_OF_LIST()
2811 static const VMStateDescription vmstate_virtio_started
= {
2812 .name
= "virtio/started",
2814 .minimum_version_id
= 1,
2815 .needed
= &virtio_started_needed
,
2816 .fields
= (VMStateField
[]) {
2817 VMSTATE_BOOL(started
, VirtIODevice
),
2818 VMSTATE_END_OF_LIST()
2822 static const VMStateDescription vmstate_virtio_disabled
= {
2823 .name
= "virtio/disabled",
2825 .minimum_version_id
= 1,
2826 .needed
= &virtio_disabled_needed
,
2827 .fields
= (VMStateField
[]) {
2828 VMSTATE_BOOL(disabled
, VirtIODevice
),
2829 VMSTATE_END_OF_LIST()
2833 static const VMStateDescription vmstate_virtio
= {
2836 .minimum_version_id
= 1,
2837 .minimum_version_id_old
= 1,
2838 .fields
= (VMStateField
[]) {
2839 VMSTATE_END_OF_LIST()
2841 .subsections
= (const VMStateDescription
*[]) {
2842 &vmstate_virtio_device_endian
,
2843 &vmstate_virtio_64bit_features
,
2844 &vmstate_virtio_virtqueues
,
2845 &vmstate_virtio_ringsize
,
2846 &vmstate_virtio_broken
,
2847 &vmstate_virtio_extra_state
,
2848 &vmstate_virtio_started
,
2849 &vmstate_virtio_packed_virtqueues
,
2850 &vmstate_virtio_disabled
,
2855 int virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
)
2857 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2858 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2859 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2860 uint32_t guest_features_lo
= (vdev
->guest_features
& 0xffffffff);
2863 if (k
->save_config
) {
2864 k
->save_config(qbus
->parent
, f
);
2867 qemu_put_8s(f
, &vdev
->status
);
2868 qemu_put_8s(f
, &vdev
->isr
);
2869 qemu_put_be16s(f
, &vdev
->queue_sel
);
2870 qemu_put_be32s(f
, &guest_features_lo
);
2871 qemu_put_be32(f
, vdev
->config_len
);
2872 qemu_put_buffer(f
, vdev
->config
, vdev
->config_len
);
2874 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2875 if (vdev
->vq
[i
].vring
.num
== 0)
2879 qemu_put_be32(f
, i
);
2881 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2882 if (vdev
->vq
[i
].vring
.num
== 0)
2885 qemu_put_be32(f
, vdev
->vq
[i
].vring
.num
);
2886 if (k
->has_variable_vring_alignment
) {
2887 qemu_put_be32(f
, vdev
->vq
[i
].vring
.align
);
2890 * Save desc now, the rest of the ring addresses are saved in
2891 * subsections for VIRTIO-1 devices.
2893 qemu_put_be64(f
, vdev
->vq
[i
].vring
.desc
);
2894 qemu_put_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
2895 if (k
->save_queue
) {
2896 k
->save_queue(qbus
->parent
, i
, f
);
2900 if (vdc
->save
!= NULL
) {
2905 int ret
= vmstate_save_state(f
, vdc
->vmsd
, vdev
, NULL
);
2912 return vmstate_save_state(f
, &vmstate_virtio
, vdev
, NULL
);
2915 /* A wrapper for use as a VMState .put function */
2916 static int virtio_device_put(QEMUFile
*f
, void *opaque
, size_t size
,
2917 const VMStateField
*field
, JSONWriter
*vmdesc
)
2919 return virtio_save(VIRTIO_DEVICE(opaque
), f
);
2922 /* A wrapper for use as a VMState .get function */
2923 static int virtio_device_get(QEMUFile
*f
, void *opaque
, size_t size
,
2924 const VMStateField
*field
)
2926 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
2927 DeviceClass
*dc
= DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev
));
2929 return virtio_load(vdev
, f
, dc
->vmsd
->version_id
);
2932 const VMStateInfo virtio_vmstate_info
= {
2934 .get
= virtio_device_get
,
2935 .put
= virtio_device_put
,
2938 static int virtio_set_features_nocheck(VirtIODevice
*vdev
, uint64_t val
)
2940 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2941 bool bad
= (val
& ~(vdev
->host_features
)) != 0;
2943 val
&= vdev
->host_features
;
2944 if (k
->set_features
) {
2945 k
->set_features(vdev
, val
);
2947 vdev
->guest_features
= val
;
2948 return bad
? -1 : 0;
2951 int virtio_set_features(VirtIODevice
*vdev
, uint64_t val
)
2955 * The driver must not attempt to set features after feature negotiation
2958 if (vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) {
2961 ret
= virtio_set_features_nocheck(vdev
, val
);
2962 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
2963 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
2965 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2966 if (vdev
->vq
[i
].vring
.num
!= 0) {
2967 virtio_init_region_cache(vdev
, i
);
2972 if (!virtio_device_started(vdev
, vdev
->status
) &&
2973 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2974 vdev
->start_on_kick
= true;
2980 size_t virtio_feature_get_config_size(const VirtIOFeature
*feature_sizes
,
2981 uint64_t host_features
)
2983 size_t config_size
= 0;
2986 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
2987 if (host_features
& feature_sizes
[i
].flags
) {
2988 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
2995 int virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
, int version_id
)
3001 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3002 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3003 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
3006 * We poison the endianness to ensure it does not get used before
3007 * subsections have been loaded.
3009 vdev
->device_endian
= VIRTIO_DEVICE_ENDIAN_UNKNOWN
;
3011 if (k
->load_config
) {
3012 ret
= k
->load_config(qbus
->parent
, f
);
3017 qemu_get_8s(f
, &vdev
->status
);
3018 qemu_get_8s(f
, &vdev
->isr
);
3019 qemu_get_be16s(f
, &vdev
->queue_sel
);
3020 if (vdev
->queue_sel
>= VIRTIO_QUEUE_MAX
) {
3023 qemu_get_be32s(f
, &features
);
3026 * Temporarily set guest_features low bits - needed by
3027 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3028 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3030 * Note: devices should always test host features in future - don't create
3031 * new dependencies like this.
3033 vdev
->guest_features
= features
;
3035 config_len
= qemu_get_be32(f
);
3038 * There are cases where the incoming config can be bigger or smaller
3039 * than what we have; so load what we have space for, and skip
3040 * any excess that's in the stream.
3042 qemu_get_buffer(f
, vdev
->config
, MIN(config_len
, vdev
->config_len
));
3044 while (config_len
> vdev
->config_len
) {
3049 num
= qemu_get_be32(f
);
3051 if (num
> VIRTIO_QUEUE_MAX
) {
3052 error_report("Invalid number of virtqueues: 0x%x", num
);
3056 for (i
= 0; i
< num
; i
++) {
3057 vdev
->vq
[i
].vring
.num
= qemu_get_be32(f
);
3058 if (k
->has_variable_vring_alignment
) {
3059 vdev
->vq
[i
].vring
.align
= qemu_get_be32(f
);
3061 vdev
->vq
[i
].vring
.desc
= qemu_get_be64(f
);
3062 qemu_get_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
3063 vdev
->vq
[i
].signalled_used_valid
= false;
3064 vdev
->vq
[i
].notification
= true;
3066 if (!vdev
->vq
[i
].vring
.desc
&& vdev
->vq
[i
].last_avail_idx
) {
3067 error_report("VQ %d address 0x0 "
3068 "inconsistent with Host index 0x%x",
3069 i
, vdev
->vq
[i
].last_avail_idx
);
3072 if (k
->load_queue
) {
3073 ret
= k
->load_queue(qbus
->parent
, i
, f
);
3079 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
3081 if (vdc
->load
!= NULL
) {
3082 ret
= vdc
->load(vdev
, f
, version_id
);
3089 ret
= vmstate_load_state(f
, vdc
->vmsd
, vdev
, version_id
);
3096 ret
= vmstate_load_state(f
, &vmstate_virtio
, vdev
, 1);
3101 if (vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_UNKNOWN
) {
3102 vdev
->device_endian
= virtio_default_endian();
3105 if (virtio_64bit_features_needed(vdev
)) {
3107 * Subsection load filled vdev->guest_features. Run them
3108 * through virtio_set_features to sanity-check them against
3111 uint64_t features64
= vdev
->guest_features
;
3112 if (virtio_set_features_nocheck(vdev
, features64
) < 0) {
3113 error_report("Features 0x%" PRIx64
" unsupported. "
3114 "Allowed features: 0x%" PRIx64
,
3115 features64
, vdev
->host_features
);
3119 if (virtio_set_features_nocheck(vdev
, features
) < 0) {
3120 error_report("Features 0x%x unsupported. "
3121 "Allowed features: 0x%" PRIx64
,
3122 features
, vdev
->host_features
);
3127 if (!virtio_device_started(vdev
, vdev
->status
) &&
3128 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3129 vdev
->start_on_kick
= true;
3132 RCU_READ_LOCK_GUARD();
3133 for (i
= 0; i
< num
; i
++) {
3134 if (vdev
->vq
[i
].vring
.desc
) {
3138 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3139 * only the region cache needs to be set up. Legacy devices need
3140 * to calculate used and avail ring addresses based on the desc
3143 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3144 virtio_init_region_cache(vdev
, i
);
3146 virtio_queue_update_rings(vdev
, i
);
3149 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3150 vdev
->vq
[i
].shadow_avail_idx
= vdev
->vq
[i
].last_avail_idx
;
3151 vdev
->vq
[i
].shadow_avail_wrap_counter
=
3152 vdev
->vq
[i
].last_avail_wrap_counter
;
3156 nheads
= vring_avail_idx(&vdev
->vq
[i
]) - vdev
->vq
[i
].last_avail_idx
;
3157 /* Check it isn't doing strange things with descriptor numbers. */
3158 if (nheads
> vdev
->vq
[i
].vring
.num
) {
3159 virtio_error(vdev
, "VQ %d size 0x%x Guest index 0x%x "
3160 "inconsistent with Host index 0x%x: delta 0x%x",
3161 i
, vdev
->vq
[i
].vring
.num
,
3162 vring_avail_idx(&vdev
->vq
[i
]),
3163 vdev
->vq
[i
].last_avail_idx
, nheads
);
3164 vdev
->vq
[i
].used_idx
= 0;
3165 vdev
->vq
[i
].shadow_avail_idx
= 0;
3166 vdev
->vq
[i
].inuse
= 0;
3169 vdev
->vq
[i
].used_idx
= vring_used_idx(&vdev
->vq
[i
]);
3170 vdev
->vq
[i
].shadow_avail_idx
= vring_avail_idx(&vdev
->vq
[i
]);
3173 * Some devices migrate VirtQueueElements that have been popped
3174 * from the avail ring but not yet returned to the used ring.
3175 * Since max ring size < UINT16_MAX it's safe to use modulo
3176 * UINT16_MAX + 1 subtraction.
3178 vdev
->vq
[i
].inuse
= (uint16_t)(vdev
->vq
[i
].last_avail_idx
-
3179 vdev
->vq
[i
].used_idx
);
3180 if (vdev
->vq
[i
].inuse
> vdev
->vq
[i
].vring
.num
) {
3181 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3183 i
, vdev
->vq
[i
].vring
.num
,
3184 vdev
->vq
[i
].last_avail_idx
,
3185 vdev
->vq
[i
].used_idx
);
3191 if (vdc
->post_load
) {
3192 ret
= vdc
->post_load(vdev
);
3201 void virtio_cleanup(VirtIODevice
*vdev
)
3203 qemu_del_vm_change_state_handler(vdev
->vmstate
);
3206 static void virtio_vmstate_change(void *opaque
, bool running
, RunState state
)
3208 VirtIODevice
*vdev
= opaque
;
3209 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3210 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3211 bool backend_run
= running
&& virtio_device_started(vdev
, vdev
->status
);
3212 vdev
->vm_running
= running
;
3215 virtio_set_status(vdev
, vdev
->status
);
3218 if (k
->vmstate_change
) {
3219 k
->vmstate_change(qbus
->parent
, backend_run
);
3223 virtio_set_status(vdev
, vdev
->status
);
3227 void virtio_instance_init_common(Object
*proxy_obj
, void *data
,
3228 size_t vdev_size
, const char *vdev_name
)
3230 DeviceState
*vdev
= data
;
3232 object_initialize_child_with_props(proxy_obj
, "virtio-backend", vdev
,
3233 vdev_size
, vdev_name
, &error_abort
,
3235 qdev_alias_all_properties(vdev
, proxy_obj
);
3238 void virtio_init(VirtIODevice
*vdev
, const char *name
,
3239 uint16_t device_id
, size_t config_size
)
3241 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3242 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3244 int nvectors
= k
->query_nvectors
? k
->query_nvectors(qbus
->parent
) : 0;
3247 vdev
->vector_queues
=
3248 g_malloc0(sizeof(*vdev
->vector_queues
) * nvectors
);
3251 vdev
->start_on_kick
= false;
3252 vdev
->started
= false;
3253 vdev
->device_id
= device_id
;
3255 qatomic_set(&vdev
->isr
, 0);
3256 vdev
->queue_sel
= 0;
3257 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
3258 vdev
->vq
= g_malloc0(sizeof(VirtQueue
) * VIRTIO_QUEUE_MAX
);
3259 vdev
->vm_running
= runstate_is_running();
3260 vdev
->broken
= false;
3261 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3262 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
3263 vdev
->vq
[i
].vdev
= vdev
;
3264 vdev
->vq
[i
].queue_index
= i
;
3265 vdev
->vq
[i
].host_notifier_enabled
= false;
3269 vdev
->config_len
= config_size
;
3270 if (vdev
->config_len
) {
3271 vdev
->config
= g_malloc0(config_size
);
3273 vdev
->config
= NULL
;
3275 vdev
->vmstate
= qdev_add_vm_change_state_handler(DEVICE(vdev
),
3276 virtio_vmstate_change
, vdev
);
3277 vdev
->device_endian
= virtio_default_endian();
3278 vdev
->use_guest_notifier_mask
= true;
3282 * Only devices that have already been around prior to defining the virtio
3283 * standard support legacy mode; this includes devices not specified in the
3284 * standard. All newer devices conform to the virtio standard only.
3286 bool virtio_legacy_allowed(VirtIODevice
*vdev
)
3288 switch (vdev
->device_id
) {
3290 case VIRTIO_ID_BLOCK
:
3291 case VIRTIO_ID_CONSOLE
:
3293 case VIRTIO_ID_BALLOON
:
3294 case VIRTIO_ID_RPMSG
:
3295 case VIRTIO_ID_SCSI
:
3297 case VIRTIO_ID_RPROC_SERIAL
:
3298 case VIRTIO_ID_CAIF
:
3305 bool virtio_legacy_check_disabled(VirtIODevice
*vdev
)
3307 return vdev
->disable_legacy_check
;
3310 hwaddr
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
)
3312 return vdev
->vq
[n
].vring
.desc
;
3315 bool virtio_queue_enabled_legacy(VirtIODevice
*vdev
, int n
)
3317 return virtio_queue_get_desc_addr(vdev
, n
) != 0;
3320 bool virtio_queue_enabled(VirtIODevice
*vdev
, int n
)
3322 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3323 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3325 if (k
->queue_enabled
) {
3326 return k
->queue_enabled(qbus
->parent
, n
);
3328 return virtio_queue_enabled_legacy(vdev
, n
);
3331 hwaddr
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
)
3333 return vdev
->vq
[n
].vring
.avail
;
3336 hwaddr
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
)
3338 return vdev
->vq
[n
].vring
.used
;
3341 hwaddr
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
)
3343 return sizeof(VRingDesc
) * vdev
->vq
[n
].vring
.num
;
3346 hwaddr
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
)
3350 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3351 return sizeof(struct VRingPackedDescEvent
);
3354 s
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
3355 return offsetof(VRingAvail
, ring
) +
3356 sizeof(uint16_t) * vdev
->vq
[n
].vring
.num
+ s
;
3359 hwaddr
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
)
3363 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3364 return sizeof(struct VRingPackedDescEvent
);
3367 s
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
3368 return offsetof(VRingUsed
, ring
) +
3369 sizeof(VRingUsedElem
) * vdev
->vq
[n
].vring
.num
+ s
;
3372 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice
*vdev
,
3375 unsigned int avail
, used
;
3377 avail
= vdev
->vq
[n
].last_avail_idx
;
3378 avail
|= ((uint16_t)vdev
->vq
[n
].last_avail_wrap_counter
) << 15;
3380 used
= vdev
->vq
[n
].used_idx
;
3381 used
|= ((uint16_t)vdev
->vq
[n
].used_wrap_counter
) << 15;
3383 return avail
| used
<< 16;
3386 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice
*vdev
,
3389 return vdev
->vq
[n
].last_avail_idx
;
3392 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
)
3394 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3395 return virtio_queue_packed_get_last_avail_idx(vdev
, n
);
3397 return virtio_queue_split_get_last_avail_idx(vdev
, n
);
3401 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice
*vdev
,
3402 int n
, unsigned int idx
)
3404 struct VirtQueue
*vq
= &vdev
->vq
[n
];
3406 vq
->last_avail_idx
= vq
->shadow_avail_idx
= idx
& 0x7fff;
3407 vq
->last_avail_wrap_counter
=
3408 vq
->shadow_avail_wrap_counter
= !!(idx
& 0x8000);
3410 vq
->used_idx
= idx
& 0x7ffff;
3411 vq
->used_wrap_counter
= !!(idx
& 0x8000);
3414 static void virtio_queue_split_set_last_avail_idx(VirtIODevice
*vdev
,
3415 int n
, unsigned int idx
)
3417 vdev
->vq
[n
].last_avail_idx
= idx
;
3418 vdev
->vq
[n
].shadow_avail_idx
= idx
;
3421 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
,
3424 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3425 virtio_queue_packed_set_last_avail_idx(vdev
, n
, idx
);
3427 virtio_queue_split_set_last_avail_idx(vdev
, n
, idx
);
3431 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice
*vdev
,
3434 /* We don't have a reference like avail idx in shared memory */
3438 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice
*vdev
,
3441 RCU_READ_LOCK_GUARD();
3442 if (vdev
->vq
[n
].vring
.desc
) {
3443 vdev
->vq
[n
].last_avail_idx
= vring_used_idx(&vdev
->vq
[n
]);
3444 vdev
->vq
[n
].shadow_avail_idx
= vdev
->vq
[n
].last_avail_idx
;
3448 void virtio_queue_restore_last_avail_idx(VirtIODevice
*vdev
, int n
)
3450 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3451 virtio_queue_packed_restore_last_avail_idx(vdev
, n
);
3453 virtio_queue_split_restore_last_avail_idx(vdev
, n
);
3457 static void virtio_queue_packed_update_used_idx(VirtIODevice
*vdev
, int n
)
3459 /* used idx was updated through set_last_avail_idx() */
3463 static void virtio_split_packed_update_used_idx(VirtIODevice
*vdev
, int n
)
3465 RCU_READ_LOCK_GUARD();
3466 if (vdev
->vq
[n
].vring
.desc
) {
3467 vdev
->vq
[n
].used_idx
= vring_used_idx(&vdev
->vq
[n
]);
3471 void virtio_queue_update_used_idx(VirtIODevice
*vdev
, int n
)
3473 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3474 return virtio_queue_packed_update_used_idx(vdev
, n
);
3476 return virtio_split_packed_update_used_idx(vdev
, n
);
3480 void virtio_queue_invalidate_signalled_used(VirtIODevice
*vdev
, int n
)
3482 vdev
->vq
[n
].signalled_used_valid
= false;
3485 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
)
3487 return vdev
->vq
+ n
;
3490 uint16_t virtio_get_queue_index(VirtQueue
*vq
)
3492 return vq
->queue_index
;
3495 static void virtio_queue_guest_notifier_read(EventNotifier
*n
)
3497 VirtQueue
*vq
= container_of(n
, VirtQueue
, guest_notifier
);
3498 if (event_notifier_test_and_clear(n
)) {
3503 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
3506 if (assign
&& !with_irqfd
) {
3507 event_notifier_set_handler(&vq
->guest_notifier
,
3508 virtio_queue_guest_notifier_read
);
3510 event_notifier_set_handler(&vq
->guest_notifier
, NULL
);
3513 /* Test and clear notifier before closing it,
3514 * in case poll callback didn't have time to run. */
3515 virtio_queue_guest_notifier_read(&vq
->guest_notifier
);
3519 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
)
3521 return &vq
->guest_notifier
;
3524 static void virtio_queue_host_notifier_aio_read(EventNotifier
*n
)
3526 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3527 if (event_notifier_test_and_clear(n
)) {
3528 virtio_queue_notify_aio_vq(vq
);
3532 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier
*n
)
3534 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3536 virtio_queue_set_notification(vq
, 0);
3539 static bool virtio_queue_host_notifier_aio_poll(void *opaque
)
3541 EventNotifier
*n
= opaque
;
3542 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3544 if (!vq
->vring
.desc
|| virtio_queue_empty(vq
)) {
3548 return virtio_queue_notify_aio_vq(vq
);
3551 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier
*n
)
3553 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3555 /* Caller polls once more after this to catch requests that race with us */
3556 virtio_queue_set_notification(vq
, 1);
3559 void virtio_queue_aio_set_host_notifier_handler(VirtQueue
*vq
, AioContext
*ctx
,
3560 VirtIOHandleAIOOutput handle_output
)
3562 if (handle_output
) {
3563 vq
->handle_aio_output
= handle_output
;
3564 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true,
3565 virtio_queue_host_notifier_aio_read
,
3566 virtio_queue_host_notifier_aio_poll
);
3567 aio_set_event_notifier_poll(ctx
, &vq
->host_notifier
,
3568 virtio_queue_host_notifier_aio_poll_begin
,
3569 virtio_queue_host_notifier_aio_poll_end
);
3571 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true, NULL
, NULL
);
3572 /* Test and clear notifier before after disabling event,
3573 * in case poll callback didn't have time to run. */
3574 virtio_queue_host_notifier_aio_read(&vq
->host_notifier
);
3575 vq
->handle_aio_output
= NULL
;
3579 void virtio_queue_host_notifier_read(EventNotifier
*n
)
3581 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3582 if (event_notifier_test_and_clear(n
)) {
3583 virtio_queue_notify_vq(vq
);
3587 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
)
3589 return &vq
->host_notifier
;
3592 void virtio_queue_set_host_notifier_enabled(VirtQueue
*vq
, bool enabled
)
3594 vq
->host_notifier_enabled
= enabled
;
3597 int virtio_queue_set_host_notifier_mr(VirtIODevice
*vdev
, int n
,
3598 MemoryRegion
*mr
, bool assign
)
3600 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3601 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3603 if (k
->set_host_notifier_mr
) {
3604 return k
->set_host_notifier_mr(qbus
->parent
, n
, mr
, assign
);
3610 void virtio_device_set_child_bus_name(VirtIODevice
*vdev
, char *bus_name
)
3612 g_free(vdev
->bus_name
);
3613 vdev
->bus_name
= g_strdup(bus_name
);
3616 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice
*vdev
, const char *fmt
, ...)
3621 error_vreport(fmt
, ap
);
3624 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3625 vdev
->status
= vdev
->status
| VIRTIO_CONFIG_S_NEEDS_RESET
;
3626 virtio_notify_config(vdev
);
3629 vdev
->broken
= true;
3632 static void virtio_memory_listener_commit(MemoryListener
*listener
)
3634 VirtIODevice
*vdev
= container_of(listener
, VirtIODevice
, listener
);
3637 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3638 if (vdev
->vq
[i
].vring
.num
== 0) {
3641 virtio_init_region_cache(vdev
, i
);
3645 static void virtio_device_realize(DeviceState
*dev
, Error
**errp
)
3647 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
3648 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
3651 /* Devices should either use vmsd or the load/save methods */
3652 assert(!vdc
->vmsd
|| !vdc
->load
);
3654 if (vdc
->realize
!= NULL
) {
3655 vdc
->realize(dev
, &err
);
3657 error_propagate(errp
, err
);
3662 virtio_bus_device_plugged(vdev
, &err
);
3664 error_propagate(errp
, err
);
3665 vdc
->unrealize(dev
);
3669 vdev
->listener
.commit
= virtio_memory_listener_commit
;
3670 vdev
->listener
.name
= "virtio";
3671 memory_listener_register(&vdev
->listener
, vdev
->dma_as
);
3674 static void virtio_device_unrealize(DeviceState
*dev
)
3676 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
3677 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
3679 memory_listener_unregister(&vdev
->listener
);
3680 virtio_bus_device_unplugged(vdev
);
3682 if (vdc
->unrealize
!= NULL
) {
3683 vdc
->unrealize(dev
);
3686 g_free(vdev
->bus_name
);
3687 vdev
->bus_name
= NULL
;
3690 static void virtio_device_free_virtqueues(VirtIODevice
*vdev
)
3697 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3698 if (vdev
->vq
[i
].vring
.num
== 0) {
3701 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
3706 static void virtio_device_instance_finalize(Object
*obj
)
3708 VirtIODevice
*vdev
= VIRTIO_DEVICE(obj
);
3710 virtio_device_free_virtqueues(vdev
);
3712 g_free(vdev
->config
);
3713 g_free(vdev
->vector_queues
);
3716 static Property virtio_properties
[] = {
3717 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice
, host_features
),
3718 DEFINE_PROP_BOOL("use-started", VirtIODevice
, use_started
, true),
3719 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice
, use_disabled_flag
, true),
3720 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice
,
3721 disable_legacy_check
, false),
3722 DEFINE_PROP_END_OF_LIST(),
3725 static int virtio_device_start_ioeventfd_impl(VirtIODevice
*vdev
)
3727 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
3731 * Batch all the host notifiers in a single transaction to avoid
3732 * quadratic time complexity in address_space_update_ioeventfds().
3734 memory_region_transaction_begin();
3735 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3736 VirtQueue
*vq
= &vdev
->vq
[n
];
3737 if (!virtio_queue_get_num(vdev
, n
)) {
3740 r
= virtio_bus_set_host_notifier(qbus
, n
, true);
3745 event_notifier_set_handler(&vq
->host_notifier
,
3746 virtio_queue_host_notifier_read
);
3749 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3750 /* Kick right away to begin processing requests already in vring */
3751 VirtQueue
*vq
= &vdev
->vq
[n
];
3752 if (!vq
->vring
.num
) {
3755 event_notifier_set(&vq
->host_notifier
);
3757 memory_region_transaction_commit();
3761 i
= n
; /* save n for a second iteration after transaction is committed. */
3763 VirtQueue
*vq
= &vdev
->vq
[n
];
3764 if (!virtio_queue_get_num(vdev
, n
)) {
3768 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
3769 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
3773 * The transaction expects the ioeventfds to be open when it
3774 * commits. Do it now, before the cleanup loop.
3776 memory_region_transaction_commit();
3779 if (!virtio_queue_get_num(vdev
, i
)) {
3782 virtio_bus_cleanup_host_notifier(qbus
, i
);
3787 int virtio_device_start_ioeventfd(VirtIODevice
*vdev
)
3789 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3790 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3792 return virtio_bus_start_ioeventfd(vbus
);
3795 static void virtio_device_stop_ioeventfd_impl(VirtIODevice
*vdev
)
3797 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
3801 * Batch all the host notifiers in a single transaction to avoid
3802 * quadratic time complexity in address_space_update_ioeventfds().
3804 memory_region_transaction_begin();
3805 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3806 VirtQueue
*vq
= &vdev
->vq
[n
];
3808 if (!virtio_queue_get_num(vdev
, n
)) {
3811 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
3812 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
3816 * The transaction expects the ioeventfds to be open when it
3817 * commits. Do it now, before the cleanup loop.
3819 memory_region_transaction_commit();
3821 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3822 if (!virtio_queue_get_num(vdev
, n
)) {
3825 virtio_bus_cleanup_host_notifier(qbus
, n
);
3829 int virtio_device_grab_ioeventfd(VirtIODevice
*vdev
)
3831 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3832 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3834 return virtio_bus_grab_ioeventfd(vbus
);
3837 void virtio_device_release_ioeventfd(VirtIODevice
*vdev
)
3839 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3840 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3842 virtio_bus_release_ioeventfd(vbus
);
3845 static void virtio_device_class_init(ObjectClass
*klass
, void *data
)
3847 /* Set the default value here. */
3848 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
3849 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3851 dc
->realize
= virtio_device_realize
;
3852 dc
->unrealize
= virtio_device_unrealize
;
3853 dc
->bus_type
= TYPE_VIRTIO_BUS
;
3854 device_class_set_props(dc
, virtio_properties
);
3855 vdc
->start_ioeventfd
= virtio_device_start_ioeventfd_impl
;
3856 vdc
->stop_ioeventfd
= virtio_device_stop_ioeventfd_impl
;
3858 vdc
->legacy_features
|= VIRTIO_LEGACY_FEATURES
;
3861 bool virtio_device_ioeventfd_enabled(VirtIODevice
*vdev
)
3863 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3864 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3866 return virtio_bus_ioeventfd_enabled(vbus
);
3869 static const TypeInfo virtio_device_info
= {
3870 .name
= TYPE_VIRTIO_DEVICE
,
3871 .parent
= TYPE_DEVICE
,
3872 .instance_size
= sizeof(VirtIODevice
),
3873 .class_init
= virtio_device_class_init
,
3874 .instance_finalize
= virtio_device_instance_finalize
,
3876 .class_size
= sizeof(VirtioDeviceClass
),
3879 static void virtio_register_types(void)
3881 type_register_static(&virtio_device_info
);
3884 type_init(virtio_register_types
)