4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/qapi-commands-virtio.h"
18 #include "qemu/defer-call.h"
19 #include "qemu/error-report.h"
21 #include "qemu/main-loop.h"
22 #include "qemu/module.h"
23 #include "qom/object_interfaces.h"
24 #include "hw/core/cpu.h"
25 #include "hw/virtio/virtio.h"
26 #include "hw/virtio/vhost.h"
27 #include "migration/qemu-file-types.h"
28 #include "qemu/atomic.h"
29 #include "hw/virtio/virtio-bus.h"
30 #include "hw/qdev-properties.h"
31 #include "hw/virtio/virtio-access.h"
32 #include "sysemu/dma.h"
33 #include "sysemu/runstate.h"
34 #include "virtio-qmp.h"
36 #include "standard-headers/linux/virtio_ids.h"
37 #include "standard-headers/linux/vhost_types.h"
38 #include "standard-headers/linux/virtio_blk.h"
39 #include "standard-headers/linux/virtio_console.h"
40 #include "standard-headers/linux/virtio_gpu.h"
41 #include "standard-headers/linux/virtio_net.h"
42 #include "standard-headers/linux/virtio_scsi.h"
43 #include "standard-headers/linux/virtio_i2c.h"
44 #include "standard-headers/linux/virtio_balloon.h"
45 #include "standard-headers/linux/virtio_iommu.h"
46 #include "standard-headers/linux/virtio_mem.h"
47 #include "standard-headers/linux/virtio_vsock.h"
50 * Maximum size of virtio device config space
52 #define VHOST_USER_MAX_CONFIG_SIZE 256
55 * The alignment to use between consumer and producer parts of vring.
56 * x86 pagesize again. This is the default, used by transports like PCI
57 * which don't provide a means for the guest to tell the host the alignment.
59 #define VIRTIO_PCI_VRING_ALIGN 4096
61 typedef struct VRingDesc
69 typedef struct VRingPackedDesc
{
76 typedef struct VRingAvail
83 typedef struct VRingUsedElem
89 typedef struct VRingUsed
96 typedef struct VRingMemoryRegionCaches
{
98 MemoryRegionCache desc
;
99 MemoryRegionCache avail
;
100 MemoryRegionCache used
;
101 } VRingMemoryRegionCaches
;
106 unsigned int num_default
;
111 VRingMemoryRegionCaches
*caches
;
114 typedef struct VRingPackedDescEvent
{
117 } VRingPackedDescEvent
;
122 VirtQueueElement
*used_elems
;
124 /* Next head to pop */
125 uint16_t last_avail_idx
;
126 bool last_avail_wrap_counter
;
128 /* Last avail_idx read from VQ. */
129 uint16_t shadow_avail_idx
;
130 bool shadow_avail_wrap_counter
;
133 bool used_wrap_counter
;
135 /* Last used index value we have signalled on */
136 uint16_t signalled_used
;
138 /* Last used index value we have signalled on */
139 bool signalled_used_valid
;
141 /* Notification enabled? */
144 uint16_t queue_index
;
149 VirtIOHandleOutput handle_output
;
151 EventNotifier guest_notifier
;
152 EventNotifier host_notifier
;
153 bool host_notifier_enabled
;
154 QLIST_ENTRY(VirtQueue
) node
;
157 const char *virtio_device_names
[] = {
158 [VIRTIO_ID_NET
] = "virtio-net",
159 [VIRTIO_ID_BLOCK
] = "virtio-blk",
160 [VIRTIO_ID_CONSOLE
] = "virtio-serial",
161 [VIRTIO_ID_RNG
] = "virtio-rng",
162 [VIRTIO_ID_BALLOON
] = "virtio-balloon",
163 [VIRTIO_ID_IOMEM
] = "virtio-iomem",
164 [VIRTIO_ID_RPMSG
] = "virtio-rpmsg",
165 [VIRTIO_ID_SCSI
] = "virtio-scsi",
166 [VIRTIO_ID_9P
] = "virtio-9p",
167 [VIRTIO_ID_MAC80211_WLAN
] = "virtio-mac-wlan",
168 [VIRTIO_ID_RPROC_SERIAL
] = "virtio-rproc-serial",
169 [VIRTIO_ID_CAIF
] = "virtio-caif",
170 [VIRTIO_ID_MEMORY_BALLOON
] = "virtio-mem-balloon",
171 [VIRTIO_ID_GPU
] = "virtio-gpu",
172 [VIRTIO_ID_CLOCK
] = "virtio-clk",
173 [VIRTIO_ID_INPUT
] = "virtio-input",
174 [VIRTIO_ID_VSOCK
] = "vhost-vsock",
175 [VIRTIO_ID_CRYPTO
] = "virtio-crypto",
176 [VIRTIO_ID_SIGNAL_DIST
] = "virtio-signal",
177 [VIRTIO_ID_PSTORE
] = "virtio-pstore",
178 [VIRTIO_ID_IOMMU
] = "virtio-iommu",
179 [VIRTIO_ID_MEM
] = "virtio-mem",
180 [VIRTIO_ID_SOUND
] = "virtio-sound",
181 [VIRTIO_ID_FS
] = "virtio-user-fs",
182 [VIRTIO_ID_PMEM
] = "virtio-pmem",
183 [VIRTIO_ID_RPMB
] = "virtio-rpmb",
184 [VIRTIO_ID_MAC80211_HWSIM
] = "virtio-mac-hwsim",
185 [VIRTIO_ID_VIDEO_ENCODER
] = "virtio-vid-encoder",
186 [VIRTIO_ID_VIDEO_DECODER
] = "virtio-vid-decoder",
187 [VIRTIO_ID_SCMI
] = "virtio-scmi",
188 [VIRTIO_ID_NITRO_SEC_MOD
] = "virtio-nitro-sec-mod",
189 [VIRTIO_ID_I2C_ADAPTER
] = "vhost-user-i2c",
190 [VIRTIO_ID_WATCHDOG
] = "virtio-watchdog",
191 [VIRTIO_ID_CAN
] = "virtio-can",
192 [VIRTIO_ID_DMABUF
] = "virtio-dmabuf",
193 [VIRTIO_ID_PARAM_SERV
] = "virtio-param-serv",
194 [VIRTIO_ID_AUDIO_POLICY
] = "virtio-audio-pol",
195 [VIRTIO_ID_BT
] = "virtio-bluetooth",
196 [VIRTIO_ID_GPIO
] = "virtio-gpio"
199 static const char *virtio_id_to_name(uint16_t device_id
)
201 assert(device_id
< G_N_ELEMENTS(virtio_device_names
));
202 const char *name
= virtio_device_names
[device_id
];
203 assert(name
!= NULL
);
207 /* Called within call_rcu(). */
208 static void virtio_free_region_cache(VRingMemoryRegionCaches
*caches
)
210 assert(caches
!= NULL
);
211 address_space_cache_destroy(&caches
->desc
);
212 address_space_cache_destroy(&caches
->avail
);
213 address_space_cache_destroy(&caches
->used
);
217 static void virtio_virtqueue_reset_region_cache(struct VirtQueue
*vq
)
219 VRingMemoryRegionCaches
*caches
;
221 caches
= qatomic_read(&vq
->vring
.caches
);
222 qatomic_rcu_set(&vq
->vring
.caches
, NULL
);
224 call_rcu(caches
, virtio_free_region_cache
, rcu
);
228 void virtio_init_region_cache(VirtIODevice
*vdev
, int n
)
230 VirtQueue
*vq
= &vdev
->vq
[n
];
231 VRingMemoryRegionCaches
*old
= vq
->vring
.caches
;
232 VRingMemoryRegionCaches
*new = NULL
;
238 addr
= vq
->vring
.desc
;
242 new = g_new0(VRingMemoryRegionCaches
, 1);
243 size
= virtio_queue_get_desc_size(vdev
, n
);
244 packed
= virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
) ?
246 len
= address_space_cache_init(&new->desc
, vdev
->dma_as
,
249 virtio_error(vdev
, "Cannot map desc");
253 size
= virtio_queue_get_used_size(vdev
, n
);
254 len
= address_space_cache_init(&new->used
, vdev
->dma_as
,
255 vq
->vring
.used
, size
, true);
257 virtio_error(vdev
, "Cannot map used");
261 size
= virtio_queue_get_avail_size(vdev
, n
);
262 len
= address_space_cache_init(&new->avail
, vdev
->dma_as
,
263 vq
->vring
.avail
, size
, false);
265 virtio_error(vdev
, "Cannot map avail");
269 qatomic_rcu_set(&vq
->vring
.caches
, new);
271 call_rcu(old
, virtio_free_region_cache
, rcu
);
276 address_space_cache_destroy(&new->avail
);
278 address_space_cache_destroy(&new->used
);
280 address_space_cache_destroy(&new->desc
);
283 virtio_virtqueue_reset_region_cache(vq
);
286 /* virt queue functions */
287 void virtio_queue_update_rings(VirtIODevice
*vdev
, int n
)
289 VRing
*vring
= &vdev
->vq
[n
].vring
;
291 if (!vring
->num
|| !vring
->desc
|| !vring
->align
) {
292 /* not yet setup -> nothing to do */
295 vring
->avail
= vring
->desc
+ vring
->num
* sizeof(VRingDesc
);
296 vring
->used
= vring_align(vring
->avail
+
297 offsetof(VRingAvail
, ring
[vring
->num
]),
299 virtio_init_region_cache(vdev
, n
);
302 /* Called within rcu_read_lock(). */
303 static void vring_split_desc_read(VirtIODevice
*vdev
, VRingDesc
*desc
,
304 MemoryRegionCache
*cache
, int i
)
306 address_space_read_cached(cache
, i
* sizeof(VRingDesc
),
307 desc
, sizeof(VRingDesc
));
308 virtio_tswap64s(vdev
, &desc
->addr
);
309 virtio_tswap32s(vdev
, &desc
->len
);
310 virtio_tswap16s(vdev
, &desc
->flags
);
311 virtio_tswap16s(vdev
, &desc
->next
);
314 static void vring_packed_event_read(VirtIODevice
*vdev
,
315 MemoryRegionCache
*cache
,
316 VRingPackedDescEvent
*e
)
318 hwaddr off_off
= offsetof(VRingPackedDescEvent
, off_wrap
);
319 hwaddr off_flags
= offsetof(VRingPackedDescEvent
, flags
);
321 e
->flags
= virtio_lduw_phys_cached(vdev
, cache
, off_flags
);
322 /* Make sure flags is seen before off_wrap */
324 e
->off_wrap
= virtio_lduw_phys_cached(vdev
, cache
, off_off
);
325 virtio_tswap16s(vdev
, &e
->flags
);
328 static void vring_packed_off_wrap_write(VirtIODevice
*vdev
,
329 MemoryRegionCache
*cache
,
332 hwaddr off
= offsetof(VRingPackedDescEvent
, off_wrap
);
334 virtio_stw_phys_cached(vdev
, cache
, off
, off_wrap
);
335 address_space_cache_invalidate(cache
, off
, sizeof(off_wrap
));
338 static void vring_packed_flags_write(VirtIODevice
*vdev
,
339 MemoryRegionCache
*cache
, uint16_t flags
)
341 hwaddr off
= offsetof(VRingPackedDescEvent
, flags
);
343 virtio_stw_phys_cached(vdev
, cache
, off
, flags
);
344 address_space_cache_invalidate(cache
, off
, sizeof(flags
));
347 /* Called within rcu_read_lock(). */
348 static VRingMemoryRegionCaches
*vring_get_region_caches(struct VirtQueue
*vq
)
350 return qatomic_rcu_read(&vq
->vring
.caches
);
353 /* Called within rcu_read_lock(). */
354 static inline uint16_t vring_avail_flags(VirtQueue
*vq
)
356 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
357 hwaddr pa
= offsetof(VRingAvail
, flags
);
363 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
366 /* Called within rcu_read_lock(). */
367 static inline uint16_t vring_avail_idx(VirtQueue
*vq
)
369 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
370 hwaddr pa
= offsetof(VRingAvail
, idx
);
376 vq
->shadow_avail_idx
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
377 return vq
->shadow_avail_idx
;
380 /* Called within rcu_read_lock(). */
381 static inline uint16_t vring_avail_ring(VirtQueue
*vq
, int i
)
383 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
384 hwaddr pa
= offsetof(VRingAvail
, ring
[i
]);
390 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
393 /* Called within rcu_read_lock(). */
394 static inline uint16_t vring_get_used_event(VirtQueue
*vq
)
396 return vring_avail_ring(vq
, vq
->vring
.num
);
399 /* Called within rcu_read_lock(). */
400 static inline void vring_used_write(VirtQueue
*vq
, VRingUsedElem
*uelem
,
403 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
404 hwaddr pa
= offsetof(VRingUsed
, ring
[i
]);
410 virtio_tswap32s(vq
->vdev
, &uelem
->id
);
411 virtio_tswap32s(vq
->vdev
, &uelem
->len
);
412 address_space_write_cached(&caches
->used
, pa
, uelem
, sizeof(VRingUsedElem
));
413 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(VRingUsedElem
));
416 /* Called within rcu_read_lock(). */
417 static inline uint16_t vring_used_flags(VirtQueue
*vq
)
419 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
420 hwaddr pa
= offsetof(VRingUsed
, flags
);
426 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
429 /* Called within rcu_read_lock(). */
430 static uint16_t vring_used_idx(VirtQueue
*vq
)
432 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
433 hwaddr pa
= offsetof(VRingUsed
, idx
);
439 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
442 /* Called within rcu_read_lock(). */
443 static inline void vring_used_idx_set(VirtQueue
*vq
, uint16_t val
)
445 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
446 hwaddr pa
= offsetof(VRingUsed
, idx
);
449 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
450 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
456 /* Called within rcu_read_lock(). */
457 static inline void vring_used_flags_set_bit(VirtQueue
*vq
, int mask
)
459 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
460 VirtIODevice
*vdev
= vq
->vdev
;
461 hwaddr pa
= offsetof(VRingUsed
, flags
);
468 flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
469 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
| mask
);
470 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
473 /* Called within rcu_read_lock(). */
474 static inline void vring_used_flags_unset_bit(VirtQueue
*vq
, int mask
)
476 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
477 VirtIODevice
*vdev
= vq
->vdev
;
478 hwaddr pa
= offsetof(VRingUsed
, flags
);
485 flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
486 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
& ~mask
);
487 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
490 /* Called within rcu_read_lock(). */
491 static inline void vring_set_avail_event(VirtQueue
*vq
, uint16_t val
)
493 VRingMemoryRegionCaches
*caches
;
495 if (!vq
->notification
) {
499 caches
= vring_get_region_caches(vq
);
504 pa
= offsetof(VRingUsed
, ring
[vq
->vring
.num
]);
505 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
506 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
509 static void virtio_queue_split_set_notification(VirtQueue
*vq
, int enable
)
511 RCU_READ_LOCK_GUARD();
513 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
514 vring_set_avail_event(vq
, vring_avail_idx(vq
));
516 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
518 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
521 /* Expose avail event/used flags before caller checks the avail idx. */
526 static void virtio_queue_packed_set_notification(VirtQueue
*vq
, int enable
)
529 VRingPackedDescEvent e
;
530 VRingMemoryRegionCaches
*caches
;
532 RCU_READ_LOCK_GUARD();
533 caches
= vring_get_region_caches(vq
);
538 vring_packed_event_read(vq
->vdev
, &caches
->used
, &e
);
541 e
.flags
= VRING_PACKED_EVENT_FLAG_DISABLE
;
542 } else if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
543 off_wrap
= vq
->shadow_avail_idx
| vq
->shadow_avail_wrap_counter
<< 15;
544 vring_packed_off_wrap_write(vq
->vdev
, &caches
->used
, off_wrap
);
545 /* Make sure off_wrap is wrote before flags */
547 e
.flags
= VRING_PACKED_EVENT_FLAG_DESC
;
549 e
.flags
= VRING_PACKED_EVENT_FLAG_ENABLE
;
552 vring_packed_flags_write(vq
->vdev
, &caches
->used
, e
.flags
);
554 /* Expose avail event/used flags before caller checks the avail idx. */
559 bool virtio_queue_get_notification(VirtQueue
*vq
)
561 return vq
->notification
;
564 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
)
566 vq
->notification
= enable
;
568 if (!vq
->vring
.desc
) {
572 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
573 virtio_queue_packed_set_notification(vq
, enable
);
575 virtio_queue_split_set_notification(vq
, enable
);
579 int virtio_queue_ready(VirtQueue
*vq
)
581 return vq
->vring
.avail
!= 0;
584 static void vring_packed_desc_read_flags(VirtIODevice
*vdev
,
586 MemoryRegionCache
*cache
,
589 hwaddr off
= i
* sizeof(VRingPackedDesc
) + offsetof(VRingPackedDesc
, flags
);
591 *flags
= virtio_lduw_phys_cached(vdev
, cache
, off
);
594 static void vring_packed_desc_read(VirtIODevice
*vdev
,
595 VRingPackedDesc
*desc
,
596 MemoryRegionCache
*cache
,
597 int i
, bool strict_order
)
599 hwaddr off
= i
* sizeof(VRingPackedDesc
);
601 vring_packed_desc_read_flags(vdev
, &desc
->flags
, cache
, i
);
604 /* Make sure flags is read before the rest fields. */
608 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, addr
),
609 &desc
->addr
, sizeof(desc
->addr
));
610 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, id
),
611 &desc
->id
, sizeof(desc
->id
));
612 address_space_read_cached(cache
, off
+ offsetof(VRingPackedDesc
, len
),
613 &desc
->len
, sizeof(desc
->len
));
614 virtio_tswap64s(vdev
, &desc
->addr
);
615 virtio_tswap16s(vdev
, &desc
->id
);
616 virtio_tswap32s(vdev
, &desc
->len
);
619 static void vring_packed_desc_write_data(VirtIODevice
*vdev
,
620 VRingPackedDesc
*desc
,
621 MemoryRegionCache
*cache
,
624 hwaddr off_id
= i
* sizeof(VRingPackedDesc
) +
625 offsetof(VRingPackedDesc
, id
);
626 hwaddr off_len
= i
* sizeof(VRingPackedDesc
) +
627 offsetof(VRingPackedDesc
, len
);
629 virtio_tswap32s(vdev
, &desc
->len
);
630 virtio_tswap16s(vdev
, &desc
->id
);
631 address_space_write_cached(cache
, off_id
, &desc
->id
, sizeof(desc
->id
));
632 address_space_cache_invalidate(cache
, off_id
, sizeof(desc
->id
));
633 address_space_write_cached(cache
, off_len
, &desc
->len
, sizeof(desc
->len
));
634 address_space_cache_invalidate(cache
, off_len
, sizeof(desc
->len
));
637 static void vring_packed_desc_write_flags(VirtIODevice
*vdev
,
638 VRingPackedDesc
*desc
,
639 MemoryRegionCache
*cache
,
642 hwaddr off
= i
* sizeof(VRingPackedDesc
) + offsetof(VRingPackedDesc
, flags
);
644 virtio_stw_phys_cached(vdev
, cache
, off
, desc
->flags
);
645 address_space_cache_invalidate(cache
, off
, sizeof(desc
->flags
));
648 static void vring_packed_desc_write(VirtIODevice
*vdev
,
649 VRingPackedDesc
*desc
,
650 MemoryRegionCache
*cache
,
651 int i
, bool strict_order
)
653 vring_packed_desc_write_data(vdev
, desc
, cache
, i
);
655 /* Make sure data is wrote before flags. */
658 vring_packed_desc_write_flags(vdev
, desc
, cache
, i
);
661 static inline bool is_desc_avail(uint16_t flags
, bool wrap_counter
)
665 avail
= !!(flags
& (1 << VRING_PACKED_DESC_F_AVAIL
));
666 used
= !!(flags
& (1 << VRING_PACKED_DESC_F_USED
));
667 return (avail
!= used
) && (avail
== wrap_counter
);
670 /* Fetch avail_idx from VQ memory only when we really need to know if
671 * guest has added some buffers.
672 * Called within rcu_read_lock(). */
673 static int virtio_queue_empty_rcu(VirtQueue
*vq
)
675 if (virtio_device_disabled(vq
->vdev
)) {
679 if (unlikely(!vq
->vring
.avail
)) {
683 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
687 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
690 static int virtio_queue_split_empty(VirtQueue
*vq
)
694 if (virtio_device_disabled(vq
->vdev
)) {
698 if (unlikely(!vq
->vring
.avail
)) {
702 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
706 RCU_READ_LOCK_GUARD();
707 empty
= vring_avail_idx(vq
) == vq
->last_avail_idx
;
711 /* Called within rcu_read_lock(). */
712 static int virtio_queue_packed_empty_rcu(VirtQueue
*vq
)
714 struct VRingPackedDesc desc
;
715 VRingMemoryRegionCaches
*cache
;
717 if (unlikely(!vq
->vring
.desc
)) {
721 cache
= vring_get_region_caches(vq
);
726 vring_packed_desc_read_flags(vq
->vdev
, &desc
.flags
, &cache
->desc
,
729 return !is_desc_avail(desc
.flags
, vq
->last_avail_wrap_counter
);
732 static int virtio_queue_packed_empty(VirtQueue
*vq
)
734 RCU_READ_LOCK_GUARD();
735 return virtio_queue_packed_empty_rcu(vq
);
738 int virtio_queue_empty(VirtQueue
*vq
)
740 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
741 return virtio_queue_packed_empty(vq
);
743 return virtio_queue_split_empty(vq
);
747 static void virtqueue_unmap_sg(VirtQueue
*vq
, const VirtQueueElement
*elem
,
750 AddressSpace
*dma_as
= vq
->vdev
->dma_as
;
755 for (i
= 0; i
< elem
->in_num
; i
++) {
756 size_t size
= MIN(len
- offset
, elem
->in_sg
[i
].iov_len
);
758 dma_memory_unmap(dma_as
, elem
->in_sg
[i
].iov_base
,
759 elem
->in_sg
[i
].iov_len
,
760 DMA_DIRECTION_FROM_DEVICE
, size
);
765 for (i
= 0; i
< elem
->out_num
; i
++)
766 dma_memory_unmap(dma_as
, elem
->out_sg
[i
].iov_base
,
767 elem
->out_sg
[i
].iov_len
,
768 DMA_DIRECTION_TO_DEVICE
,
769 elem
->out_sg
[i
].iov_len
);
772 /* virtqueue_detach_element:
773 * @vq: The #VirtQueue
774 * @elem: The #VirtQueueElement
775 * @len: number of bytes written
777 * Detach the element from the virtqueue. This function is suitable for device
778 * reset or other situations where a #VirtQueueElement is simply freed and will
779 * not be pushed or discarded.
781 void virtqueue_detach_element(VirtQueue
*vq
, const VirtQueueElement
*elem
,
784 vq
->inuse
-= elem
->ndescs
;
785 virtqueue_unmap_sg(vq
, elem
, len
);
788 static void virtqueue_split_rewind(VirtQueue
*vq
, unsigned int num
)
790 vq
->last_avail_idx
-= num
;
793 static void virtqueue_packed_rewind(VirtQueue
*vq
, unsigned int num
)
795 if (vq
->last_avail_idx
< num
) {
796 vq
->last_avail_idx
= vq
->vring
.num
+ vq
->last_avail_idx
- num
;
797 vq
->last_avail_wrap_counter
^= 1;
799 vq
->last_avail_idx
-= num
;
804 * @vq: The #VirtQueue
805 * @elem: The #VirtQueueElement
806 * @len: number of bytes written
808 * Pretend the most recent element wasn't popped from the virtqueue. The next
809 * call to virtqueue_pop() will refetch the element.
811 void virtqueue_unpop(VirtQueue
*vq
, const VirtQueueElement
*elem
,
815 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
816 virtqueue_packed_rewind(vq
, 1);
818 virtqueue_split_rewind(vq
, 1);
821 virtqueue_detach_element(vq
, elem
, len
);
825 * @vq: The #VirtQueue
826 * @num: Number of elements to push back
828 * Pretend that elements weren't popped from the virtqueue. The next
829 * virtqueue_pop() will refetch the oldest element.
831 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
833 * Returns: true on success, false if @num is greater than the number of in use
836 bool virtqueue_rewind(VirtQueue
*vq
, unsigned int num
)
838 if (num
> vq
->inuse
) {
843 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
844 virtqueue_packed_rewind(vq
, num
);
846 virtqueue_split_rewind(vq
, num
);
851 static void virtqueue_split_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
852 unsigned int len
, unsigned int idx
)
856 if (unlikely(!vq
->vring
.used
)) {
860 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
862 uelem
.id
= elem
->index
;
864 vring_used_write(vq
, &uelem
, idx
);
867 static void virtqueue_packed_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
868 unsigned int len
, unsigned int idx
)
870 vq
->used_elems
[idx
].index
= elem
->index
;
871 vq
->used_elems
[idx
].len
= len
;
872 vq
->used_elems
[idx
].ndescs
= elem
->ndescs
;
875 static void virtqueue_packed_fill_desc(VirtQueue
*vq
,
876 const VirtQueueElement
*elem
,
881 VRingMemoryRegionCaches
*caches
;
882 VRingPackedDesc desc
= {
886 bool wrap_counter
= vq
->used_wrap_counter
;
888 if (unlikely(!vq
->vring
.desc
)) {
892 head
= vq
->used_idx
+ idx
;
893 if (head
>= vq
->vring
.num
) {
894 head
-= vq
->vring
.num
;
898 desc
.flags
|= (1 << VRING_PACKED_DESC_F_AVAIL
);
899 desc
.flags
|= (1 << VRING_PACKED_DESC_F_USED
);
901 desc
.flags
&= ~(1 << VRING_PACKED_DESC_F_AVAIL
);
902 desc
.flags
&= ~(1 << VRING_PACKED_DESC_F_USED
);
905 caches
= vring_get_region_caches(vq
);
910 vring_packed_desc_write(vq
->vdev
, &desc
, &caches
->desc
, head
, strict_order
);
913 /* Called within rcu_read_lock(). */
914 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
915 unsigned int len
, unsigned int idx
)
917 trace_virtqueue_fill(vq
, elem
, len
, idx
);
919 virtqueue_unmap_sg(vq
, elem
, len
);
921 if (virtio_device_disabled(vq
->vdev
)) {
925 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
926 virtqueue_packed_fill(vq
, elem
, len
, idx
);
928 virtqueue_split_fill(vq
, elem
, len
, idx
);
932 /* Called within rcu_read_lock(). */
933 static void virtqueue_split_flush(VirtQueue
*vq
, unsigned int count
)
937 if (unlikely(!vq
->vring
.used
)) {
941 /* Make sure buffer is written before we update index. */
943 trace_virtqueue_flush(vq
, count
);
946 vring_used_idx_set(vq
, new);
948 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
)))
949 vq
->signalled_used_valid
= false;
952 static void virtqueue_packed_flush(VirtQueue
*vq
, unsigned int count
)
954 unsigned int i
, ndescs
= 0;
956 if (unlikely(!vq
->vring
.desc
)) {
960 for (i
= 1; i
< count
; i
++) {
961 virtqueue_packed_fill_desc(vq
, &vq
->used_elems
[i
], i
, false);
962 ndescs
+= vq
->used_elems
[i
].ndescs
;
964 virtqueue_packed_fill_desc(vq
, &vq
->used_elems
[0], 0, true);
965 ndescs
+= vq
->used_elems
[0].ndescs
;
968 vq
->used_idx
+= ndescs
;
969 if (vq
->used_idx
>= vq
->vring
.num
) {
970 vq
->used_idx
-= vq
->vring
.num
;
971 vq
->used_wrap_counter
^= 1;
972 vq
->signalled_used_valid
= false;
976 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
)
978 if (virtio_device_disabled(vq
->vdev
)) {
983 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
984 virtqueue_packed_flush(vq
, count
);
986 virtqueue_split_flush(vq
, count
);
990 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
993 RCU_READ_LOCK_GUARD();
994 virtqueue_fill(vq
, elem
, len
, 0);
995 virtqueue_flush(vq
, 1);
998 /* Called within rcu_read_lock(). */
999 static int virtqueue_num_heads(VirtQueue
*vq
, unsigned int idx
)
1001 uint16_t avail_idx
, num_heads
;
1003 /* Use shadow index whenever possible. */
1004 avail_idx
= (vq
->shadow_avail_idx
!= idx
) ? vq
->shadow_avail_idx
1005 : vring_avail_idx(vq
);
1006 num_heads
= avail_idx
- idx
;
1008 /* Check it isn't doing very strange things with descriptor numbers. */
1009 if (num_heads
> vq
->vring
.num
) {
1010 virtio_error(vq
->vdev
, "Guest moved used index from %u to %u",
1011 idx
, vq
->shadow_avail_idx
);
1015 * On success, callers read a descriptor at vq->last_avail_idx.
1016 * Make sure descriptor read does not bypass avail index read.
1018 * This is necessary even if we are using a shadow index, since
1019 * the shadow index could have been initialized by calling
1020 * vring_avail_idx() outside of this function, i.e., by a guest
1021 * memory read not accompanied by a barrier.
1030 /* Called within rcu_read_lock(). */
1031 static bool virtqueue_get_head(VirtQueue
*vq
, unsigned int idx
,
1034 /* Grab the next descriptor number they're advertising, and increment
1035 * the index we've seen. */
1036 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
1038 /* If their number is silly, that's a fatal mistake. */
1039 if (*head
>= vq
->vring
.num
) {
1040 virtio_error(vq
->vdev
, "Guest says index %u is available", *head
);
1048 VIRTQUEUE_READ_DESC_ERROR
= -1,
1049 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
1050 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
1053 /* Reads the 'desc->next' descriptor into '*desc'. */
1054 static int virtqueue_split_read_next_desc(VirtIODevice
*vdev
, VRingDesc
*desc
,
1055 MemoryRegionCache
*desc_cache
,
1058 /* If this descriptor says it doesn't chain, we're done. */
1059 if (!(desc
->flags
& VRING_DESC_F_NEXT
)) {
1060 return VIRTQUEUE_READ_DESC_DONE
;
1063 /* Check they're not leading us off end of descriptors. */
1064 if (desc
->next
>= max
) {
1065 virtio_error(vdev
, "Desc next is %u", desc
->next
);
1066 return VIRTQUEUE_READ_DESC_ERROR
;
1069 vring_split_desc_read(vdev
, desc
, desc_cache
, desc
->next
);
1070 return VIRTQUEUE_READ_DESC_MORE
;
1073 /* Called within rcu_read_lock(). */
1074 static void virtqueue_split_get_avail_bytes(VirtQueue
*vq
,
1075 unsigned int *in_bytes
, unsigned int *out_bytes
,
1076 unsigned max_in_bytes
, unsigned max_out_bytes
,
1077 VRingMemoryRegionCaches
*caches
)
1079 VirtIODevice
*vdev
= vq
->vdev
;
1081 unsigned int total_bufs
, in_total
, out_total
;
1082 MemoryRegionCache indirect_desc_cache
;
1086 address_space_cache_init_empty(&indirect_desc_cache
);
1088 idx
= vq
->last_avail_idx
;
1089 total_bufs
= in_total
= out_total
= 0;
1091 while ((rc
= virtqueue_num_heads(vq
, idx
)) > 0) {
1092 MemoryRegionCache
*desc_cache
= &caches
->desc
;
1093 unsigned int num_bufs
;
1096 unsigned int max
= vq
->vring
.num
;
1098 num_bufs
= total_bufs
;
1100 if (!virtqueue_get_head(vq
, idx
++, &i
)) {
1104 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1106 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1107 if (!desc
.len
|| (desc
.len
% sizeof(VRingDesc
))) {
1108 virtio_error(vdev
, "Invalid size for indirect buffer table");
1112 /* If we've got too many, that implies a descriptor loop. */
1113 if (num_bufs
>= max
) {
1114 virtio_error(vdev
, "Looped descriptor");
1118 /* loop over the indirect descriptor table */
1119 len
= address_space_cache_init(&indirect_desc_cache
,
1121 desc
.addr
, desc
.len
, false);
1122 desc_cache
= &indirect_desc_cache
;
1123 if (len
< desc
.len
) {
1124 virtio_error(vdev
, "Cannot map indirect buffer");
1128 max
= desc
.len
/ sizeof(VRingDesc
);
1130 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1134 /* If we've got too many, that implies a descriptor loop. */
1135 if (++num_bufs
> max
) {
1136 virtio_error(vdev
, "Looped descriptor");
1140 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1141 in_total
+= desc
.len
;
1143 out_total
+= desc
.len
;
1145 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1149 rc
= virtqueue_split_read_next_desc(vdev
, &desc
, desc_cache
, max
);
1150 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1152 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1156 if (desc_cache
== &indirect_desc_cache
) {
1157 address_space_cache_destroy(&indirect_desc_cache
);
1160 total_bufs
= num_bufs
;
1169 address_space_cache_destroy(&indirect_desc_cache
);
1171 *in_bytes
= in_total
;
1174 *out_bytes
= out_total
;
1179 in_total
= out_total
= 0;
1183 static int virtqueue_packed_read_next_desc(VirtQueue
*vq
,
1184 VRingPackedDesc
*desc
,
1191 /* If this descriptor says it doesn't chain, we're done. */
1192 if (!indirect
&& !(desc
->flags
& VRING_DESC_F_NEXT
)) {
1193 return VIRTQUEUE_READ_DESC_DONE
;
1199 return VIRTQUEUE_READ_DESC_DONE
;
1201 (*next
) -= vq
->vring
.num
;
1205 vring_packed_desc_read(vq
->vdev
, desc
, desc_cache
, *next
, false);
1206 return VIRTQUEUE_READ_DESC_MORE
;
1209 /* Called within rcu_read_lock(). */
1210 static void virtqueue_packed_get_avail_bytes(VirtQueue
*vq
,
1211 unsigned int *in_bytes
,
1212 unsigned int *out_bytes
,
1213 unsigned max_in_bytes
,
1214 unsigned max_out_bytes
,
1215 VRingMemoryRegionCaches
*caches
)
1217 VirtIODevice
*vdev
= vq
->vdev
;
1219 unsigned int total_bufs
, in_total
, out_total
;
1220 MemoryRegionCache indirect_desc_cache
;
1221 MemoryRegionCache
*desc_cache
;
1223 VRingPackedDesc desc
;
1226 address_space_cache_init_empty(&indirect_desc_cache
);
1228 idx
= vq
->last_avail_idx
;
1229 wrap_counter
= vq
->last_avail_wrap_counter
;
1230 total_bufs
= in_total
= out_total
= 0;
1233 unsigned int num_bufs
= total_bufs
;
1234 unsigned int i
= idx
;
1236 unsigned int max
= vq
->vring
.num
;
1238 desc_cache
= &caches
->desc
;
1240 vring_packed_desc_read(vdev
, &desc
, desc_cache
, idx
, true);
1241 if (!is_desc_avail(desc
.flags
, wrap_counter
)) {
1245 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1246 if (desc
.len
% sizeof(VRingPackedDesc
)) {
1247 virtio_error(vdev
, "Invalid size for indirect buffer table");
1251 /* If we've got too many, that implies a descriptor loop. */
1252 if (num_bufs
>= max
) {
1253 virtio_error(vdev
, "Looped descriptor");
1257 /* loop over the indirect descriptor table */
1258 len
= address_space_cache_init(&indirect_desc_cache
,
1260 desc
.addr
, desc
.len
, false);
1261 desc_cache
= &indirect_desc_cache
;
1262 if (len
< desc
.len
) {
1263 virtio_error(vdev
, "Cannot map indirect buffer");
1267 max
= desc
.len
/ sizeof(VRingPackedDesc
);
1269 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, false);
1273 /* If we've got too many, that implies a descriptor loop. */
1274 if (++num_bufs
> max
) {
1275 virtio_error(vdev
, "Looped descriptor");
1279 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1280 in_total
+= desc
.len
;
1282 out_total
+= desc
.len
;
1284 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1288 rc
= virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
, max
,
1290 &indirect_desc_cache
);
1291 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1293 if (desc_cache
== &indirect_desc_cache
) {
1294 address_space_cache_destroy(&indirect_desc_cache
);
1298 idx
+= num_bufs
- total_bufs
;
1299 total_bufs
= num_bufs
;
1302 if (idx
>= vq
->vring
.num
) {
1303 idx
-= vq
->vring
.num
;
1308 /* Record the index and wrap counter for a kick we want */
1309 vq
->shadow_avail_idx
= idx
;
1310 vq
->shadow_avail_wrap_counter
= wrap_counter
;
1312 address_space_cache_destroy(&indirect_desc_cache
);
1314 *in_bytes
= in_total
;
1317 *out_bytes
= out_total
;
1322 in_total
= out_total
= 0;
1326 void virtqueue_get_avail_bytes(VirtQueue
*vq
, unsigned int *in_bytes
,
1327 unsigned int *out_bytes
,
1328 unsigned max_in_bytes
, unsigned max_out_bytes
)
1331 VRingMemoryRegionCaches
*caches
;
1333 RCU_READ_LOCK_GUARD();
1335 if (unlikely(!vq
->vring
.desc
)) {
1339 caches
= vring_get_region_caches(vq
);
1344 desc_size
= virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
) ?
1345 sizeof(VRingPackedDesc
) : sizeof(VRingDesc
);
1346 if (caches
->desc
.len
< vq
->vring
.num
* desc_size
) {
1347 virtio_error(vq
->vdev
, "Cannot map descriptor ring");
1351 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
1352 virtqueue_packed_get_avail_bytes(vq
, in_bytes
, out_bytes
,
1353 max_in_bytes
, max_out_bytes
,
1356 virtqueue_split_get_avail_bytes(vq
, in_bytes
, out_bytes
,
1357 max_in_bytes
, max_out_bytes
,
1371 int virtqueue_avail_bytes(VirtQueue
*vq
, unsigned int in_bytes
,
1372 unsigned int out_bytes
)
1374 unsigned int in_total
, out_total
;
1376 virtqueue_get_avail_bytes(vq
, &in_total
, &out_total
, in_bytes
, out_bytes
);
1377 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
1380 static bool virtqueue_map_desc(VirtIODevice
*vdev
, unsigned int *p_num_sg
,
1381 hwaddr
*addr
, struct iovec
*iov
,
1382 unsigned int max_num_sg
, bool is_write
,
1383 hwaddr pa
, size_t sz
)
1386 unsigned num_sg
= *p_num_sg
;
1387 assert(num_sg
<= max_num_sg
);
1390 virtio_error(vdev
, "virtio: zero sized buffers are not allowed");
1397 if (num_sg
== max_num_sg
) {
1398 virtio_error(vdev
, "virtio: too many write descriptors in "
1403 iov
[num_sg
].iov_base
= dma_memory_map(vdev
->dma_as
, pa
, &len
,
1405 DMA_DIRECTION_FROM_DEVICE
:
1406 DMA_DIRECTION_TO_DEVICE
,
1407 MEMTXATTRS_UNSPECIFIED
);
1408 if (!iov
[num_sg
].iov_base
) {
1409 virtio_error(vdev
, "virtio: bogus descriptor or out of resources");
1413 iov
[num_sg
].iov_len
= len
;
1427 /* Only used by error code paths before we have a VirtQueueElement (therefore
1428 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
1431 static void virtqueue_undo_map_desc(unsigned int out_num
, unsigned int in_num
,
1436 for (i
= 0; i
< out_num
+ in_num
; i
++) {
1437 int is_write
= i
>= out_num
;
1439 cpu_physical_memory_unmap(iov
->iov_base
, iov
->iov_len
, is_write
, 0);
1444 static void virtqueue_map_iovec(VirtIODevice
*vdev
, struct iovec
*sg
,
1445 hwaddr
*addr
, unsigned int num_sg
,
1451 for (i
= 0; i
< num_sg
; i
++) {
1452 len
= sg
[i
].iov_len
;
1453 sg
[i
].iov_base
= dma_memory_map(vdev
->dma_as
,
1454 addr
[i
], &len
, is_write
?
1455 DMA_DIRECTION_FROM_DEVICE
:
1456 DMA_DIRECTION_TO_DEVICE
,
1457 MEMTXATTRS_UNSPECIFIED
);
1458 if (!sg
[i
].iov_base
) {
1459 error_report("virtio: error trying to map MMIO memory");
1462 if (len
!= sg
[i
].iov_len
) {
1463 error_report("virtio: unexpected memory split");
1469 void virtqueue_map(VirtIODevice
*vdev
, VirtQueueElement
*elem
)
1471 virtqueue_map_iovec(vdev
, elem
->in_sg
, elem
->in_addr
, elem
->in_num
, true);
1472 virtqueue_map_iovec(vdev
, elem
->out_sg
, elem
->out_addr
, elem
->out_num
,
1476 static void *virtqueue_alloc_element(size_t sz
, unsigned out_num
, unsigned in_num
)
1478 VirtQueueElement
*elem
;
1479 size_t in_addr_ofs
= QEMU_ALIGN_UP(sz
, __alignof__(elem
->in_addr
[0]));
1480 size_t out_addr_ofs
= in_addr_ofs
+ in_num
* sizeof(elem
->in_addr
[0]);
1481 size_t out_addr_end
= out_addr_ofs
+ out_num
* sizeof(elem
->out_addr
[0]);
1482 size_t in_sg_ofs
= QEMU_ALIGN_UP(out_addr_end
, __alignof__(elem
->in_sg
[0]));
1483 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
1484 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
1486 assert(sz
>= sizeof(VirtQueueElement
));
1487 elem
= g_malloc(out_sg_end
);
1488 trace_virtqueue_alloc_element(elem
, sz
, in_num
, out_num
);
1489 elem
->out_num
= out_num
;
1490 elem
->in_num
= in_num
;
1491 elem
->in_addr
= (void *)elem
+ in_addr_ofs
;
1492 elem
->out_addr
= (void *)elem
+ out_addr_ofs
;
1493 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
1494 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
1498 static void *virtqueue_split_pop(VirtQueue
*vq
, size_t sz
)
1500 unsigned int i
, head
, max
;
1501 VRingMemoryRegionCaches
*caches
;
1502 MemoryRegionCache indirect_desc_cache
;
1503 MemoryRegionCache
*desc_cache
;
1505 VirtIODevice
*vdev
= vq
->vdev
;
1506 VirtQueueElement
*elem
= NULL
;
1507 unsigned out_num
, in_num
, elem_entries
;
1508 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
1509 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
1513 address_space_cache_init_empty(&indirect_desc_cache
);
1515 RCU_READ_LOCK_GUARD();
1516 if (virtio_queue_empty_rcu(vq
)) {
1519 /* Needed after virtio_queue_empty(), see comment in
1520 * virtqueue_num_heads(). */
1523 /* When we start there are none of either input nor output. */
1524 out_num
= in_num
= elem_entries
= 0;
1526 max
= vq
->vring
.num
;
1528 if (vq
->inuse
>= vq
->vring
.num
) {
1529 virtio_error(vdev
, "Virtqueue size exceeded");
1533 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
++, &head
)) {
1537 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
1538 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1543 caches
= vring_get_region_caches(vq
);
1545 virtio_error(vdev
, "Region caches not initialized");
1549 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
1550 virtio_error(vdev
, "Cannot map descriptor ring");
1554 desc_cache
= &caches
->desc
;
1555 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1556 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1557 if (!desc
.len
|| (desc
.len
% sizeof(VRingDesc
))) {
1558 virtio_error(vdev
, "Invalid size for indirect buffer table");
1562 /* loop over the indirect descriptor table */
1563 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
1564 desc
.addr
, desc
.len
, false);
1565 desc_cache
= &indirect_desc_cache
;
1566 if (len
< desc
.len
) {
1567 virtio_error(vdev
, "Cannot map indirect buffer");
1571 max
= desc
.len
/ sizeof(VRingDesc
);
1573 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
1576 /* Collect all the descriptors */
1580 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1581 map_ok
= virtqueue_map_desc(vdev
, &in_num
, addr
+ out_num
,
1583 VIRTQUEUE_MAX_SIZE
- out_num
, true,
1584 desc
.addr
, desc
.len
);
1587 virtio_error(vdev
, "Incorrect order for descriptors");
1590 map_ok
= virtqueue_map_desc(vdev
, &out_num
, addr
, iov
,
1591 VIRTQUEUE_MAX_SIZE
, false,
1592 desc
.addr
, desc
.len
);
1598 /* If we've got too many, that implies a descriptor loop. */
1599 if (++elem_entries
> max
) {
1600 virtio_error(vdev
, "Looped descriptor");
1604 rc
= virtqueue_split_read_next_desc(vdev
, &desc
, desc_cache
, max
);
1605 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1607 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1611 /* Now copy what we have collected and mapped */
1612 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
1615 for (i
= 0; i
< out_num
; i
++) {
1616 elem
->out_addr
[i
] = addr
[i
];
1617 elem
->out_sg
[i
] = iov
[i
];
1619 for (i
= 0; i
< in_num
; i
++) {
1620 elem
->in_addr
[i
] = addr
[out_num
+ i
];
1621 elem
->in_sg
[i
] = iov
[out_num
+ i
];
1626 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
1628 address_space_cache_destroy(&indirect_desc_cache
);
1633 virtqueue_undo_map_desc(out_num
, in_num
, iov
);
1637 static void *virtqueue_packed_pop(VirtQueue
*vq
, size_t sz
)
1639 unsigned int i
, max
;
1640 VRingMemoryRegionCaches
*caches
;
1641 MemoryRegionCache indirect_desc_cache
;
1642 MemoryRegionCache
*desc_cache
;
1644 VirtIODevice
*vdev
= vq
->vdev
;
1645 VirtQueueElement
*elem
= NULL
;
1646 unsigned out_num
, in_num
, elem_entries
;
1647 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
1648 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
1649 VRingPackedDesc desc
;
1653 address_space_cache_init_empty(&indirect_desc_cache
);
1655 RCU_READ_LOCK_GUARD();
1656 if (virtio_queue_packed_empty_rcu(vq
)) {
1660 /* When we start there are none of either input nor output. */
1661 out_num
= in_num
= elem_entries
= 0;
1663 max
= vq
->vring
.num
;
1665 if (vq
->inuse
>= vq
->vring
.num
) {
1666 virtio_error(vdev
, "Virtqueue size exceeded");
1670 i
= vq
->last_avail_idx
;
1672 caches
= vring_get_region_caches(vq
);
1674 virtio_error(vdev
, "Region caches not initialized");
1678 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
1679 virtio_error(vdev
, "Cannot map descriptor ring");
1683 desc_cache
= &caches
->desc
;
1684 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, true);
1686 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1687 if (desc
.len
% sizeof(VRingPackedDesc
)) {
1688 virtio_error(vdev
, "Invalid size for indirect buffer table");
1692 /* loop over the indirect descriptor table */
1693 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
1694 desc
.addr
, desc
.len
, false);
1695 desc_cache
= &indirect_desc_cache
;
1696 if (len
< desc
.len
) {
1697 virtio_error(vdev
, "Cannot map indirect buffer");
1701 max
= desc
.len
/ sizeof(VRingPackedDesc
);
1703 vring_packed_desc_read(vdev
, &desc
, desc_cache
, i
, false);
1706 /* Collect all the descriptors */
1710 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1711 map_ok
= virtqueue_map_desc(vdev
, &in_num
, addr
+ out_num
,
1713 VIRTQUEUE_MAX_SIZE
- out_num
, true,
1714 desc
.addr
, desc
.len
);
1717 virtio_error(vdev
, "Incorrect order for descriptors");
1720 map_ok
= virtqueue_map_desc(vdev
, &out_num
, addr
, iov
,
1721 VIRTQUEUE_MAX_SIZE
, false,
1722 desc
.addr
, desc
.len
);
1728 /* If we've got too many, that implies a descriptor loop. */
1729 if (++elem_entries
> max
) {
1730 virtio_error(vdev
, "Looped descriptor");
1734 rc
= virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
, max
, &i
,
1736 &indirect_desc_cache
);
1737 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1739 /* Now copy what we have collected and mapped */
1740 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
1741 for (i
= 0; i
< out_num
; i
++) {
1742 elem
->out_addr
[i
] = addr
[i
];
1743 elem
->out_sg
[i
] = iov
[i
];
1745 for (i
= 0; i
< in_num
; i
++) {
1746 elem
->in_addr
[i
] = addr
[out_num
+ i
];
1747 elem
->in_sg
[i
] = iov
[out_num
+ i
];
1751 elem
->ndescs
= (desc_cache
== &indirect_desc_cache
) ? 1 : elem_entries
;
1752 vq
->last_avail_idx
+= elem
->ndescs
;
1753 vq
->inuse
+= elem
->ndescs
;
1755 if (vq
->last_avail_idx
>= vq
->vring
.num
) {
1756 vq
->last_avail_idx
-= vq
->vring
.num
;
1757 vq
->last_avail_wrap_counter
^= 1;
1760 vq
->shadow_avail_idx
= vq
->last_avail_idx
;
1761 vq
->shadow_avail_wrap_counter
= vq
->last_avail_wrap_counter
;
1763 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
1765 address_space_cache_destroy(&indirect_desc_cache
);
1770 virtqueue_undo_map_desc(out_num
, in_num
, iov
);
1774 void *virtqueue_pop(VirtQueue
*vq
, size_t sz
)
1776 if (virtio_device_disabled(vq
->vdev
)) {
1780 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_F_RING_PACKED
)) {
1781 return virtqueue_packed_pop(vq
, sz
);
1783 return virtqueue_split_pop(vq
, sz
);
1787 static unsigned int virtqueue_packed_drop_all(VirtQueue
*vq
)
1789 VRingMemoryRegionCaches
*caches
;
1790 MemoryRegionCache
*desc_cache
;
1791 unsigned int dropped
= 0;
1792 VirtQueueElement elem
= {};
1793 VirtIODevice
*vdev
= vq
->vdev
;
1794 VRingPackedDesc desc
;
1796 RCU_READ_LOCK_GUARD();
1798 caches
= vring_get_region_caches(vq
);
1803 desc_cache
= &caches
->desc
;
1805 virtio_queue_set_notification(vq
, 0);
1807 while (vq
->inuse
< vq
->vring
.num
) {
1808 unsigned int idx
= vq
->last_avail_idx
;
1810 * works similar to virtqueue_pop but does not map buffers
1811 * and does not allocate any memory.
1813 vring_packed_desc_read(vdev
, &desc
, desc_cache
,
1814 vq
->last_avail_idx
, true);
1815 if (!is_desc_avail(desc
.flags
, vq
->last_avail_wrap_counter
)) {
1818 elem
.index
= desc
.id
;
1820 while (virtqueue_packed_read_next_desc(vq
, &desc
, desc_cache
,
1821 vq
->vring
.num
, &idx
, false)) {
1825 * immediately push the element, nothing to unmap
1826 * as both in_num and out_num are set to 0.
1828 virtqueue_push(vq
, &elem
, 0);
1830 vq
->last_avail_idx
+= elem
.ndescs
;
1831 if (vq
->last_avail_idx
>= vq
->vring
.num
) {
1832 vq
->last_avail_idx
-= vq
->vring
.num
;
1833 vq
->last_avail_wrap_counter
^= 1;
1840 static unsigned int virtqueue_split_drop_all(VirtQueue
*vq
)
1842 unsigned int dropped
= 0;
1843 VirtQueueElement elem
= {};
1844 VirtIODevice
*vdev
= vq
->vdev
;
1845 bool fEventIdx
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
1847 while (!virtio_queue_empty(vq
) && vq
->inuse
< vq
->vring
.num
) {
1848 /* works similar to virtqueue_pop but does not map buffers
1849 * and does not allocate any memory */
1851 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
, &elem
.index
)) {
1855 vq
->last_avail_idx
++;
1857 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1859 /* immediately push the element, nothing to unmap
1860 * as both in_num and out_num are set to 0 */
1861 virtqueue_push(vq
, &elem
, 0);
1868 /* virtqueue_drop_all:
1869 * @vq: The #VirtQueue
1870 * Drops all queued buffers and indicates them to the guest
1871 * as if they are done. Useful when buffers can not be
1872 * processed but must be returned to the guest.
1874 unsigned int virtqueue_drop_all(VirtQueue
*vq
)
1876 struct VirtIODevice
*vdev
= vq
->vdev
;
1878 if (virtio_device_disabled(vq
->vdev
)) {
1882 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1883 return virtqueue_packed_drop_all(vq
);
1885 return virtqueue_split_drop_all(vq
);
1889 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1890 * it is what QEMU has always done by mistake. We can change it sooner
1891 * or later by bumping the version number of the affected vm states.
1892 * In the meanwhile, since the in-memory layout of VirtQueueElement
1893 * has changed, we need to marshal to and from the layout that was
1894 * used before the change.
1896 typedef struct VirtQueueElementOld
{
1898 unsigned int out_num
;
1899 unsigned int in_num
;
1900 hwaddr in_addr
[VIRTQUEUE_MAX_SIZE
];
1901 hwaddr out_addr
[VIRTQUEUE_MAX_SIZE
];
1902 struct iovec in_sg
[VIRTQUEUE_MAX_SIZE
];
1903 struct iovec out_sg
[VIRTQUEUE_MAX_SIZE
];
1904 } VirtQueueElementOld
;
1906 void *qemu_get_virtqueue_element(VirtIODevice
*vdev
, QEMUFile
*f
, size_t sz
)
1908 VirtQueueElement
*elem
;
1909 VirtQueueElementOld data
;
1912 qemu_get_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1914 /* TODO: teach all callers that this can fail, and return failure instead
1915 * of asserting here.
1916 * This is just one thing (there are probably more) that must be
1917 * fixed before we can allow NDEBUG compilation.
1919 assert(ARRAY_SIZE(data
.in_addr
) >= data
.in_num
);
1920 assert(ARRAY_SIZE(data
.out_addr
) >= data
.out_num
);
1922 elem
= virtqueue_alloc_element(sz
, data
.out_num
, data
.in_num
);
1923 elem
->index
= data
.index
;
1925 for (i
= 0; i
< elem
->in_num
; i
++) {
1926 elem
->in_addr
[i
] = data
.in_addr
[i
];
1929 for (i
= 0; i
< elem
->out_num
; i
++) {
1930 elem
->out_addr
[i
] = data
.out_addr
[i
];
1933 for (i
= 0; i
< elem
->in_num
; i
++) {
1934 /* Base is overwritten by virtqueue_map. */
1935 elem
->in_sg
[i
].iov_base
= 0;
1936 elem
->in_sg
[i
].iov_len
= data
.in_sg
[i
].iov_len
;
1939 for (i
= 0; i
< elem
->out_num
; i
++) {
1940 /* Base is overwritten by virtqueue_map. */
1941 elem
->out_sg
[i
].iov_base
= 0;
1942 elem
->out_sg
[i
].iov_len
= data
.out_sg
[i
].iov_len
;
1945 if (virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1946 qemu_get_be32s(f
, &elem
->ndescs
);
1949 virtqueue_map(vdev
, elem
);
1953 void qemu_put_virtqueue_element(VirtIODevice
*vdev
, QEMUFile
*f
,
1954 VirtQueueElement
*elem
)
1956 VirtQueueElementOld data
;
1959 memset(&data
, 0, sizeof(data
));
1960 data
.index
= elem
->index
;
1961 data
.in_num
= elem
->in_num
;
1962 data
.out_num
= elem
->out_num
;
1964 for (i
= 0; i
< elem
->in_num
; i
++) {
1965 data
.in_addr
[i
] = elem
->in_addr
[i
];
1968 for (i
= 0; i
< elem
->out_num
; i
++) {
1969 data
.out_addr
[i
] = elem
->out_addr
[i
];
1972 for (i
= 0; i
< elem
->in_num
; i
++) {
1973 /* Base is overwritten by virtqueue_map when loading. Do not
1974 * save it, as it would leak the QEMU address space layout. */
1975 data
.in_sg
[i
].iov_len
= elem
->in_sg
[i
].iov_len
;
1978 for (i
= 0; i
< elem
->out_num
; i
++) {
1979 /* Do not save iov_base as above. */
1980 data
.out_sg
[i
].iov_len
= elem
->out_sg
[i
].iov_len
;
1983 if (virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
1984 qemu_put_be32s(f
, &elem
->ndescs
);
1987 qemu_put_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1991 static void virtio_notify_vector(VirtIODevice
*vdev
, uint16_t vector
)
1993 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1994 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1996 if (virtio_device_disabled(vdev
)) {
2001 k
->notify(qbus
->parent
, vector
);
2005 void virtio_update_irq(VirtIODevice
*vdev
)
2007 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
2010 static int virtio_validate_features(VirtIODevice
*vdev
)
2012 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2014 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
) &&
2015 !virtio_vdev_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
2019 if (k
->validate_features
) {
2020 return k
->validate_features(vdev
);
2026 int virtio_set_status(VirtIODevice
*vdev
, uint8_t val
)
2028 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2029 trace_virtio_set_status(vdev
, val
);
2031 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2032 if (!(vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) &&
2033 val
& VIRTIO_CONFIG_S_FEATURES_OK
) {
2034 int ret
= virtio_validate_features(vdev
);
2042 if ((vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
) !=
2043 (val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
2044 virtio_set_started(vdev
, val
& VIRTIO_CONFIG_S_DRIVER_OK
);
2047 if (k
->set_status
) {
2048 k
->set_status(vdev
, val
);
2055 static enum virtio_device_endian
virtio_default_endian(void)
2057 if (target_words_bigendian()) {
2058 return VIRTIO_DEVICE_ENDIAN_BIG
;
2060 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
2064 static enum virtio_device_endian
virtio_current_cpu_endian(void)
2066 if (cpu_virtio_is_big_endian(current_cpu
)) {
2067 return VIRTIO_DEVICE_ENDIAN_BIG
;
2069 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
2073 static void __virtio_queue_reset(VirtIODevice
*vdev
, uint32_t i
)
2075 vdev
->vq
[i
].vring
.desc
= 0;
2076 vdev
->vq
[i
].vring
.avail
= 0;
2077 vdev
->vq
[i
].vring
.used
= 0;
2078 vdev
->vq
[i
].last_avail_idx
= 0;
2079 vdev
->vq
[i
].shadow_avail_idx
= 0;
2080 vdev
->vq
[i
].used_idx
= 0;
2081 vdev
->vq
[i
].last_avail_wrap_counter
= true;
2082 vdev
->vq
[i
].shadow_avail_wrap_counter
= true;
2083 vdev
->vq
[i
].used_wrap_counter
= true;
2084 virtio_queue_set_vector(vdev
, i
, VIRTIO_NO_VECTOR
);
2085 vdev
->vq
[i
].signalled_used
= 0;
2086 vdev
->vq
[i
].signalled_used_valid
= false;
2087 vdev
->vq
[i
].notification
= true;
2088 vdev
->vq
[i
].vring
.num
= vdev
->vq
[i
].vring
.num_default
;
2089 vdev
->vq
[i
].inuse
= 0;
2090 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
2093 void virtio_queue_reset(VirtIODevice
*vdev
, uint32_t queue_index
)
2095 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2097 if (k
->queue_reset
) {
2098 k
->queue_reset(vdev
, queue_index
);
2101 __virtio_queue_reset(vdev
, queue_index
);
2104 void virtio_queue_enable(VirtIODevice
*vdev
, uint32_t queue_index
)
2106 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2109 * TODO: Seabios is currently out of spec and triggering this error.
2110 * So this needs to be fixed in Seabios, then this can
2111 * be re-enabled for new machine types only, and also after
2112 * being converted to LOG_GUEST_ERROR.
2114 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2115 error_report("queue_enable is only supported in devices of virtio "
2120 if (k
->queue_enable
) {
2121 k
->queue_enable(vdev
, queue_index
);
2125 void virtio_reset(void *opaque
)
2127 VirtIODevice
*vdev
= opaque
;
2128 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2131 virtio_set_status(vdev
, 0);
2133 /* Guest initiated reset */
2134 vdev
->device_endian
= virtio_current_cpu_endian();
2137 vdev
->device_endian
= virtio_default_endian();
2140 if (vdev
->vhost_started
&& k
->get_vhost
) {
2141 vhost_reset_device(k
->get_vhost(vdev
));
2148 vdev
->start_on_kick
= false;
2149 vdev
->started
= false;
2150 vdev
->broken
= false;
2151 vdev
->guest_features
= 0;
2152 vdev
->queue_sel
= 0;
2154 vdev
->disabled
= false;
2155 qatomic_set(&vdev
->isr
, 0);
2156 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
2157 virtio_notify_vector(vdev
, vdev
->config_vector
);
2159 for(i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2160 __virtio_queue_reset(vdev
, i
);
2164 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, hwaddr addr
)
2166 if (!vdev
->vq
[n
].vring
.num
) {
2169 vdev
->vq
[n
].vring
.desc
= addr
;
2170 virtio_queue_update_rings(vdev
, n
);
2173 hwaddr
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
)
2175 return vdev
->vq
[n
].vring
.desc
;
2178 void virtio_queue_set_rings(VirtIODevice
*vdev
, int n
, hwaddr desc
,
2179 hwaddr avail
, hwaddr used
)
2181 if (!vdev
->vq
[n
].vring
.num
) {
2184 vdev
->vq
[n
].vring
.desc
= desc
;
2185 vdev
->vq
[n
].vring
.avail
= avail
;
2186 vdev
->vq
[n
].vring
.used
= used
;
2187 virtio_init_region_cache(vdev
, n
);
2190 void virtio_queue_set_num(VirtIODevice
*vdev
, int n
, int num
)
2192 /* Don't allow guest to flip queue between existent and
2193 * nonexistent states, or to set it to an invalid size.
2195 if (!!num
!= !!vdev
->vq
[n
].vring
.num
||
2196 num
> VIRTQUEUE_MAX_SIZE
||
2200 vdev
->vq
[n
].vring
.num
= num
;
2203 VirtQueue
*virtio_vector_first_queue(VirtIODevice
*vdev
, uint16_t vector
)
2205 return QLIST_FIRST(&vdev
->vector_queues
[vector
]);
2208 VirtQueue
*virtio_vector_next_queue(VirtQueue
*vq
)
2210 return QLIST_NEXT(vq
, node
);
2213 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
)
2215 return vdev
->vq
[n
].vring
.num
;
2218 int virtio_queue_get_max_num(VirtIODevice
*vdev
, int n
)
2220 return vdev
->vq
[n
].vring
.num_default
;
2223 int virtio_get_num_queues(VirtIODevice
*vdev
)
2227 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2228 if (!virtio_queue_get_num(vdev
, i
)) {
2236 void virtio_queue_set_align(VirtIODevice
*vdev
, int n
, int align
)
2238 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2239 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2241 /* virtio-1 compliant devices cannot change the alignment */
2242 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2243 error_report("tried to modify queue alignment for virtio-1 device");
2246 /* Check that the transport told us it was going to do this
2247 * (so a buggy transport will immediately assert rather than
2248 * silently failing to migrate this state)
2250 assert(k
->has_variable_vring_alignment
);
2253 vdev
->vq
[n
].vring
.align
= align
;
2254 virtio_queue_update_rings(vdev
, n
);
2258 static void virtio_queue_notify_vq(VirtQueue
*vq
)
2260 if (vq
->vring
.desc
&& vq
->handle_output
) {
2261 VirtIODevice
*vdev
= vq
->vdev
;
2263 if (unlikely(vdev
->broken
)) {
2267 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
2268 vq
->handle_output(vdev
, vq
);
2270 if (unlikely(vdev
->start_on_kick
)) {
2271 virtio_set_started(vdev
, true);
2276 void virtio_queue_notify(VirtIODevice
*vdev
, int n
)
2278 VirtQueue
*vq
= &vdev
->vq
[n
];
2280 if (unlikely(!vq
->vring
.desc
|| vdev
->broken
)) {
2284 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
2285 if (vq
->host_notifier_enabled
) {
2286 event_notifier_set(&vq
->host_notifier
);
2287 } else if (vq
->handle_output
) {
2288 vq
->handle_output(vdev
, vq
);
2290 if (unlikely(vdev
->start_on_kick
)) {
2291 virtio_set_started(vdev
, true);
2296 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
)
2298 return n
< VIRTIO_QUEUE_MAX
? vdev
->vq
[n
].vector
:
2302 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
)
2304 VirtQueue
*vq
= &vdev
->vq
[n
];
2306 if (n
< VIRTIO_QUEUE_MAX
) {
2307 if (vdev
->vector_queues
&&
2308 vdev
->vq
[n
].vector
!= VIRTIO_NO_VECTOR
) {
2309 QLIST_REMOVE(vq
, node
);
2311 vdev
->vq
[n
].vector
= vector
;
2312 if (vdev
->vector_queues
&&
2313 vector
!= VIRTIO_NO_VECTOR
) {
2314 QLIST_INSERT_HEAD(&vdev
->vector_queues
[vector
], vq
, node
);
2319 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
2320 VirtIOHandleOutput handle_output
)
2324 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2325 if (vdev
->vq
[i
].vring
.num
== 0)
2329 if (i
== VIRTIO_QUEUE_MAX
|| queue_size
> VIRTQUEUE_MAX_SIZE
)
2332 vdev
->vq
[i
].vring
.num
= queue_size
;
2333 vdev
->vq
[i
].vring
.num_default
= queue_size
;
2334 vdev
->vq
[i
].vring
.align
= VIRTIO_PCI_VRING_ALIGN
;
2335 vdev
->vq
[i
].handle_output
= handle_output
;
2336 vdev
->vq
[i
].used_elems
= g_new0(VirtQueueElement
, queue_size
);
2338 return &vdev
->vq
[i
];
2341 void virtio_delete_queue(VirtQueue
*vq
)
2344 vq
->vring
.num_default
= 0;
2345 vq
->handle_output
= NULL
;
2346 g_free(vq
->used_elems
);
2347 vq
->used_elems
= NULL
;
2348 virtio_virtqueue_reset_region_cache(vq
);
2351 void virtio_del_queue(VirtIODevice
*vdev
, int n
)
2353 if (n
< 0 || n
>= VIRTIO_QUEUE_MAX
) {
2357 virtio_delete_queue(&vdev
->vq
[n
]);
2360 static void virtio_set_isr(VirtIODevice
*vdev
, int value
)
2362 uint8_t old
= qatomic_read(&vdev
->isr
);
2364 /* Do not write ISR if it does not change, so that its cacheline remains
2365 * shared in the common case where the guest does not read it.
2367 if ((old
& value
) != value
) {
2368 qatomic_or(&vdev
->isr
, value
);
2372 /* Called within rcu_read_lock(). */
2373 static bool virtio_split_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2377 /* We need to expose used array entries before checking used event. */
2379 /* Always notify when queue is empty (when feature acknowledge) */
2380 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2381 !vq
->inuse
&& virtio_queue_empty(vq
)) {
2385 if (!virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
2386 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
2389 v
= vq
->signalled_used_valid
;
2390 vq
->signalled_used_valid
= true;
2391 old
= vq
->signalled_used
;
2392 new = vq
->signalled_used
= vq
->used_idx
;
2393 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
2396 static bool vring_packed_need_event(VirtQueue
*vq
, bool wrap
,
2397 uint16_t off_wrap
, uint16_t new,
2400 int off
= off_wrap
& ~(1 << 15);
2402 if (wrap
!= off_wrap
>> 15) {
2403 off
-= vq
->vring
.num
;
2406 return vring_need_event(off
, new, old
);
2409 /* Called within rcu_read_lock(). */
2410 static bool virtio_packed_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2412 VRingPackedDescEvent e
;
2415 VRingMemoryRegionCaches
*caches
;
2417 caches
= vring_get_region_caches(vq
);
2422 vring_packed_event_read(vdev
, &caches
->avail
, &e
);
2424 old
= vq
->signalled_used
;
2425 new = vq
->signalled_used
= vq
->used_idx
;
2426 v
= vq
->signalled_used_valid
;
2427 vq
->signalled_used_valid
= true;
2429 if (e
.flags
== VRING_PACKED_EVENT_FLAG_DISABLE
) {
2431 } else if (e
.flags
== VRING_PACKED_EVENT_FLAG_ENABLE
) {
2435 return !v
|| vring_packed_need_event(vq
, vq
->used_wrap_counter
,
2436 e
.off_wrap
, new, old
);
2439 /* Called within rcu_read_lock(). */
2440 static bool virtio_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2442 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
2443 return virtio_packed_should_notify(vdev
, vq
);
2445 return virtio_split_should_notify(vdev
, vq
);
2449 /* Batch irqs while inside a defer_call_begin()/defer_call_end() section */
2450 static void virtio_notify_irqfd_deferred_fn(void *opaque
)
2452 EventNotifier
*notifier
= opaque
;
2453 VirtQueue
*vq
= container_of(notifier
, VirtQueue
, guest_notifier
);
2455 trace_virtio_notify_irqfd_deferred_fn(vq
->vdev
, vq
);
2456 event_notifier_set(notifier
);
2459 void virtio_notify_irqfd(VirtIODevice
*vdev
, VirtQueue
*vq
)
2461 WITH_RCU_READ_LOCK_GUARD() {
2462 if (!virtio_should_notify(vdev
, vq
)) {
2467 trace_virtio_notify_irqfd(vdev
, vq
);
2470 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2471 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2472 * incorrectly polling this bit during crashdump and hibernation
2473 * in MSI mode, causing a hang if this bit is never updated.
2474 * Recent releases of Windows do not really shut down, but rather
2475 * log out and hibernate to make the next startup faster. Hence,
2476 * this manifested as a more serious hang during shutdown with
2478 * Next driver release from 2016 fixed this problem, so working around it
2479 * is not a must, but it's easy to do so let's do it here.
2481 * Note: it's safe to update ISR from any thread as it was switched
2482 * to an atomic operation.
2484 virtio_set_isr(vq
->vdev
, 0x1);
2485 defer_call(virtio_notify_irqfd_deferred_fn
, &vq
->guest_notifier
);
2488 static void virtio_irq(VirtQueue
*vq
)
2490 virtio_set_isr(vq
->vdev
, 0x1);
2491 virtio_notify_vector(vq
->vdev
, vq
->vector
);
2494 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
2496 WITH_RCU_READ_LOCK_GUARD() {
2497 if (!virtio_should_notify(vdev
, vq
)) {
2502 trace_virtio_notify(vdev
, vq
);
2506 void virtio_notify_config(VirtIODevice
*vdev
)
2508 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))
2511 virtio_set_isr(vdev
, 0x3);
2513 virtio_notify_vector(vdev
, vdev
->config_vector
);
2516 static bool virtio_device_endian_needed(void *opaque
)
2518 VirtIODevice
*vdev
= opaque
;
2520 assert(vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_UNKNOWN
);
2521 if (!virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2522 return vdev
->device_endian
!= virtio_default_endian();
2524 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2525 return vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_LITTLE
;
2528 static bool virtio_64bit_features_needed(void *opaque
)
2530 VirtIODevice
*vdev
= opaque
;
2532 return (vdev
->host_features
>> 32) != 0;
2535 static bool virtio_virtqueue_needed(void *opaque
)
2537 VirtIODevice
*vdev
= opaque
;
2539 return virtio_host_has_feature(vdev
, VIRTIO_F_VERSION_1
);
2542 static bool virtio_packed_virtqueue_needed(void *opaque
)
2544 VirtIODevice
*vdev
= opaque
;
2546 return virtio_host_has_feature(vdev
, VIRTIO_F_RING_PACKED
);
2549 static bool virtio_ringsize_needed(void *opaque
)
2551 VirtIODevice
*vdev
= opaque
;
2554 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2555 if (vdev
->vq
[i
].vring
.num
!= vdev
->vq
[i
].vring
.num_default
) {
2562 static bool virtio_extra_state_needed(void *opaque
)
2564 VirtIODevice
*vdev
= opaque
;
2565 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2566 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2568 return k
->has_extra_state
&&
2569 k
->has_extra_state(qbus
->parent
);
2572 static bool virtio_broken_needed(void *opaque
)
2574 VirtIODevice
*vdev
= opaque
;
2576 return vdev
->broken
;
2579 static bool virtio_started_needed(void *opaque
)
2581 VirtIODevice
*vdev
= opaque
;
2583 return vdev
->started
;
2586 static bool virtio_disabled_needed(void *opaque
)
2588 VirtIODevice
*vdev
= opaque
;
2590 return vdev
->disabled
;
2593 static const VMStateDescription vmstate_virtqueue
= {
2594 .name
= "virtqueue_state",
2596 .minimum_version_id
= 1,
2597 .fields
= (VMStateField
[]) {
2598 VMSTATE_UINT64(vring
.avail
, struct VirtQueue
),
2599 VMSTATE_UINT64(vring
.used
, struct VirtQueue
),
2600 VMSTATE_END_OF_LIST()
2604 static const VMStateDescription vmstate_packed_virtqueue
= {
2605 .name
= "packed_virtqueue_state",
2607 .minimum_version_id
= 1,
2608 .fields
= (VMStateField
[]) {
2609 VMSTATE_UINT16(last_avail_idx
, struct VirtQueue
),
2610 VMSTATE_BOOL(last_avail_wrap_counter
, struct VirtQueue
),
2611 VMSTATE_UINT16(used_idx
, struct VirtQueue
),
2612 VMSTATE_BOOL(used_wrap_counter
, struct VirtQueue
),
2613 VMSTATE_UINT32(inuse
, struct VirtQueue
),
2614 VMSTATE_END_OF_LIST()
2618 static const VMStateDescription vmstate_virtio_virtqueues
= {
2619 .name
= "virtio/virtqueues",
2621 .minimum_version_id
= 1,
2622 .needed
= &virtio_virtqueue_needed
,
2623 .fields
= (VMStateField
[]) {
2624 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2625 VIRTIO_QUEUE_MAX
, 0, vmstate_virtqueue
, VirtQueue
),
2626 VMSTATE_END_OF_LIST()
2630 static const VMStateDescription vmstate_virtio_packed_virtqueues
= {
2631 .name
= "virtio/packed_virtqueues",
2633 .minimum_version_id
= 1,
2634 .needed
= &virtio_packed_virtqueue_needed
,
2635 .fields
= (VMStateField
[]) {
2636 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2637 VIRTIO_QUEUE_MAX
, 0, vmstate_packed_virtqueue
, VirtQueue
),
2638 VMSTATE_END_OF_LIST()
2642 static const VMStateDescription vmstate_ringsize
= {
2643 .name
= "ringsize_state",
2645 .minimum_version_id
= 1,
2646 .fields
= (VMStateField
[]) {
2647 VMSTATE_UINT32(vring
.num_default
, struct VirtQueue
),
2648 VMSTATE_END_OF_LIST()
2652 static const VMStateDescription vmstate_virtio_ringsize
= {
2653 .name
= "virtio/ringsize",
2655 .minimum_version_id
= 1,
2656 .needed
= &virtio_ringsize_needed
,
2657 .fields
= (VMStateField
[]) {
2658 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
2659 VIRTIO_QUEUE_MAX
, 0, vmstate_ringsize
, VirtQueue
),
2660 VMSTATE_END_OF_LIST()
2664 static int get_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
2665 const VMStateField
*field
)
2667 VirtIODevice
*vdev
= pv
;
2668 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2669 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2671 if (!k
->load_extra_state
) {
2674 return k
->load_extra_state(qbus
->parent
, f
);
2678 static int put_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
2679 const VMStateField
*field
, JSONWriter
*vmdesc
)
2681 VirtIODevice
*vdev
= pv
;
2682 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2683 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2685 k
->save_extra_state(qbus
->parent
, f
);
2689 static const VMStateInfo vmstate_info_extra_state
= {
2690 .name
= "virtqueue_extra_state",
2691 .get
= get_extra_state
,
2692 .put
= put_extra_state
,
2695 static const VMStateDescription vmstate_virtio_extra_state
= {
2696 .name
= "virtio/extra_state",
2698 .minimum_version_id
= 1,
2699 .needed
= &virtio_extra_state_needed
,
2700 .fields
= (VMStateField
[]) {
2702 .name
= "extra_state",
2704 .field_exists
= NULL
,
2706 .info
= &vmstate_info_extra_state
,
2707 .flags
= VMS_SINGLE
,
2710 VMSTATE_END_OF_LIST()
2714 static const VMStateDescription vmstate_virtio_device_endian
= {
2715 .name
= "virtio/device_endian",
2717 .minimum_version_id
= 1,
2718 .needed
= &virtio_device_endian_needed
,
2719 .fields
= (VMStateField
[]) {
2720 VMSTATE_UINT8(device_endian
, VirtIODevice
),
2721 VMSTATE_END_OF_LIST()
2725 static const VMStateDescription vmstate_virtio_64bit_features
= {
2726 .name
= "virtio/64bit_features",
2728 .minimum_version_id
= 1,
2729 .needed
= &virtio_64bit_features_needed
,
2730 .fields
= (VMStateField
[]) {
2731 VMSTATE_UINT64(guest_features
, VirtIODevice
),
2732 VMSTATE_END_OF_LIST()
2736 static const VMStateDescription vmstate_virtio_broken
= {
2737 .name
= "virtio/broken",
2739 .minimum_version_id
= 1,
2740 .needed
= &virtio_broken_needed
,
2741 .fields
= (VMStateField
[]) {
2742 VMSTATE_BOOL(broken
, VirtIODevice
),
2743 VMSTATE_END_OF_LIST()
2747 static const VMStateDescription vmstate_virtio_started
= {
2748 .name
= "virtio/started",
2750 .minimum_version_id
= 1,
2751 .needed
= &virtio_started_needed
,
2752 .fields
= (VMStateField
[]) {
2753 VMSTATE_BOOL(started
, VirtIODevice
),
2754 VMSTATE_END_OF_LIST()
2758 static const VMStateDescription vmstate_virtio_disabled
= {
2759 .name
= "virtio/disabled",
2761 .minimum_version_id
= 1,
2762 .needed
= &virtio_disabled_needed
,
2763 .fields
= (VMStateField
[]) {
2764 VMSTATE_BOOL(disabled
, VirtIODevice
),
2765 VMSTATE_END_OF_LIST()
2769 static const VMStateDescription vmstate_virtio
= {
2772 .minimum_version_id
= 1,
2773 .fields
= (VMStateField
[]) {
2774 VMSTATE_END_OF_LIST()
2776 .subsections
= (const VMStateDescription
*[]) {
2777 &vmstate_virtio_device_endian
,
2778 &vmstate_virtio_64bit_features
,
2779 &vmstate_virtio_virtqueues
,
2780 &vmstate_virtio_ringsize
,
2781 &vmstate_virtio_broken
,
2782 &vmstate_virtio_extra_state
,
2783 &vmstate_virtio_started
,
2784 &vmstate_virtio_packed_virtqueues
,
2785 &vmstate_virtio_disabled
,
2790 int virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
)
2792 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2793 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2794 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2795 uint32_t guest_features_lo
= (vdev
->guest_features
& 0xffffffff);
2798 if (k
->save_config
) {
2799 k
->save_config(qbus
->parent
, f
);
2802 qemu_put_8s(f
, &vdev
->status
);
2803 qemu_put_8s(f
, &vdev
->isr
);
2804 qemu_put_be16s(f
, &vdev
->queue_sel
);
2805 qemu_put_be32s(f
, &guest_features_lo
);
2806 qemu_put_be32(f
, vdev
->config_len
);
2807 qemu_put_buffer(f
, vdev
->config
, vdev
->config_len
);
2809 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2810 if (vdev
->vq
[i
].vring
.num
== 0)
2814 qemu_put_be32(f
, i
);
2816 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2817 if (vdev
->vq
[i
].vring
.num
== 0)
2820 qemu_put_be32(f
, vdev
->vq
[i
].vring
.num
);
2821 if (k
->has_variable_vring_alignment
) {
2822 qemu_put_be32(f
, vdev
->vq
[i
].vring
.align
);
2825 * Save desc now, the rest of the ring addresses are saved in
2826 * subsections for VIRTIO-1 devices.
2828 qemu_put_be64(f
, vdev
->vq
[i
].vring
.desc
);
2829 qemu_put_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
2830 if (k
->save_queue
) {
2831 k
->save_queue(qbus
->parent
, i
, f
);
2835 if (vdc
->save
!= NULL
) {
2840 int ret
= vmstate_save_state(f
, vdc
->vmsd
, vdev
, NULL
);
2847 return vmstate_save_state(f
, &vmstate_virtio
, vdev
, NULL
);
2850 /* A wrapper for use as a VMState .put function */
2851 static int virtio_device_put(QEMUFile
*f
, void *opaque
, size_t size
,
2852 const VMStateField
*field
, JSONWriter
*vmdesc
)
2854 return virtio_save(VIRTIO_DEVICE(opaque
), f
);
2857 /* A wrapper for use as a VMState .get function */
2858 static int coroutine_mixed_fn
2859 virtio_device_get(QEMUFile
*f
, void *opaque
, size_t size
,
2860 const VMStateField
*field
)
2862 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
2863 DeviceClass
*dc
= DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev
));
2865 return virtio_load(vdev
, f
, dc
->vmsd
->version_id
);
2868 const VMStateInfo virtio_vmstate_info
= {
2870 .get
= virtio_device_get
,
2871 .put
= virtio_device_put
,
2874 static int virtio_set_features_nocheck(VirtIODevice
*vdev
, uint64_t val
)
2876 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2877 bool bad
= (val
& ~(vdev
->host_features
)) != 0;
2879 val
&= vdev
->host_features
;
2880 if (k
->set_features
) {
2881 k
->set_features(vdev
, val
);
2883 vdev
->guest_features
= val
;
2884 return bad
? -1 : 0;
2887 typedef struct VirtioSetFeaturesNocheckData
{
2892 } VirtioSetFeaturesNocheckData
;
2894 static void virtio_set_features_nocheck_bh(void *opaque
)
2896 VirtioSetFeaturesNocheckData
*data
= opaque
;
2898 data
->ret
= virtio_set_features_nocheck(data
->vdev
, data
->val
);
2899 aio_co_wake(data
->co
);
2902 static int coroutine_mixed_fn
2903 virtio_set_features_nocheck_maybe_co(VirtIODevice
*vdev
, uint64_t val
)
2905 if (qemu_in_coroutine()) {
2906 VirtioSetFeaturesNocheckData data
= {
2907 .co
= qemu_coroutine_self(),
2911 aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
2912 virtio_set_features_nocheck_bh
, &data
);
2913 qemu_coroutine_yield();
2916 return virtio_set_features_nocheck(vdev
, val
);
2920 int virtio_set_features(VirtIODevice
*vdev
, uint64_t val
)
2924 * The driver must not attempt to set features after feature negotiation
2927 if (vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) {
2931 if (val
& (1ull << VIRTIO_F_BAD_FEATURE
)) {
2932 qemu_log_mask(LOG_GUEST_ERROR
,
2933 "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n",
2934 __func__
, vdev
->name
);
2937 ret
= virtio_set_features_nocheck(vdev
, val
);
2938 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
2939 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
2941 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2942 if (vdev
->vq
[i
].vring
.num
!= 0) {
2943 virtio_init_region_cache(vdev
, i
);
2948 if (!virtio_device_started(vdev
, vdev
->status
) &&
2949 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2950 vdev
->start_on_kick
= true;
2956 size_t virtio_get_config_size(const VirtIOConfigSizeParams
*params
,
2957 uint64_t host_features
)
2959 size_t config_size
= params
->min_size
;
2960 const VirtIOFeature
*feature_sizes
= params
->feature_sizes
;
2963 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
2964 if (host_features
& feature_sizes
[i
].flags
) {
2965 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
2969 assert(config_size
<= params
->max_size
);
2973 int coroutine_mixed_fn
2974 virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
, int version_id
)
2980 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2981 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2982 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2985 * We poison the endianness to ensure it does not get used before
2986 * subsections have been loaded.
2988 vdev
->device_endian
= VIRTIO_DEVICE_ENDIAN_UNKNOWN
;
2990 if (k
->load_config
) {
2991 ret
= k
->load_config(qbus
->parent
, f
);
2996 qemu_get_8s(f
, &vdev
->status
);
2997 qemu_get_8s(f
, &vdev
->isr
);
2998 qemu_get_be16s(f
, &vdev
->queue_sel
);
2999 if (vdev
->queue_sel
>= VIRTIO_QUEUE_MAX
) {
3002 qemu_get_be32s(f
, &features
);
3005 * Temporarily set guest_features low bits - needed by
3006 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3007 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3009 * Note: devices should always test host features in future - don't create
3010 * new dependencies like this.
3012 vdev
->guest_features
= features
;
3014 config_len
= qemu_get_be32(f
);
3017 * There are cases where the incoming config can be bigger or smaller
3018 * than what we have; so load what we have space for, and skip
3019 * any excess that's in the stream.
3021 qemu_get_buffer(f
, vdev
->config
, MIN(config_len
, vdev
->config_len
));
3023 while (config_len
> vdev
->config_len
) {
3028 num
= qemu_get_be32(f
);
3030 if (num
> VIRTIO_QUEUE_MAX
) {
3031 error_report("Invalid number of virtqueues: 0x%x", num
);
3035 for (i
= 0; i
< num
; i
++) {
3036 vdev
->vq
[i
].vring
.num
= qemu_get_be32(f
);
3037 if (k
->has_variable_vring_alignment
) {
3038 vdev
->vq
[i
].vring
.align
= qemu_get_be32(f
);
3040 vdev
->vq
[i
].vring
.desc
= qemu_get_be64(f
);
3041 qemu_get_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
3042 vdev
->vq
[i
].signalled_used_valid
= false;
3043 vdev
->vq
[i
].notification
= true;
3045 if (!vdev
->vq
[i
].vring
.desc
&& vdev
->vq
[i
].last_avail_idx
) {
3046 error_report("VQ %d address 0x0 "
3047 "inconsistent with Host index 0x%x",
3048 i
, vdev
->vq
[i
].last_avail_idx
);
3051 if (k
->load_queue
) {
3052 ret
= k
->load_queue(qbus
->parent
, i
, f
);
3058 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
3060 if (vdc
->load
!= NULL
) {
3061 ret
= vdc
->load(vdev
, f
, version_id
);
3068 ret
= vmstate_load_state(f
, vdc
->vmsd
, vdev
, version_id
);
3075 ret
= vmstate_load_state(f
, &vmstate_virtio
, vdev
, 1);
3080 if (vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_UNKNOWN
) {
3081 vdev
->device_endian
= virtio_default_endian();
3084 if (virtio_64bit_features_needed(vdev
)) {
3086 * Subsection load filled vdev->guest_features. Run them
3087 * through virtio_set_features to sanity-check them against
3090 uint64_t features64
= vdev
->guest_features
;
3091 if (virtio_set_features_nocheck_maybe_co(vdev
, features64
) < 0) {
3092 error_report("Features 0x%" PRIx64
" unsupported. "
3093 "Allowed features: 0x%" PRIx64
,
3094 features64
, vdev
->host_features
);
3098 if (virtio_set_features_nocheck_maybe_co(vdev
, features
) < 0) {
3099 error_report("Features 0x%x unsupported. "
3100 "Allowed features: 0x%" PRIx64
,
3101 features
, vdev
->host_features
);
3106 if (!virtio_device_started(vdev
, vdev
->status
) &&
3107 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3108 vdev
->start_on_kick
= true;
3111 RCU_READ_LOCK_GUARD();
3112 for (i
= 0; i
< num
; i
++) {
3113 if (vdev
->vq
[i
].vring
.desc
) {
3117 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3118 * only the region cache needs to be set up. Legacy devices need
3119 * to calculate used and avail ring addresses based on the desc
3122 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3123 virtio_init_region_cache(vdev
, i
);
3125 virtio_queue_update_rings(vdev
, i
);
3128 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3129 vdev
->vq
[i
].shadow_avail_idx
= vdev
->vq
[i
].last_avail_idx
;
3130 vdev
->vq
[i
].shadow_avail_wrap_counter
=
3131 vdev
->vq
[i
].last_avail_wrap_counter
;
3135 nheads
= vring_avail_idx(&vdev
->vq
[i
]) - vdev
->vq
[i
].last_avail_idx
;
3136 /* Check it isn't doing strange things with descriptor numbers. */
3137 if (nheads
> vdev
->vq
[i
].vring
.num
) {
3138 virtio_error(vdev
, "VQ %d size 0x%x Guest index 0x%x "
3139 "inconsistent with Host index 0x%x: delta 0x%x",
3140 i
, vdev
->vq
[i
].vring
.num
,
3141 vring_avail_idx(&vdev
->vq
[i
]),
3142 vdev
->vq
[i
].last_avail_idx
, nheads
);
3143 vdev
->vq
[i
].used_idx
= 0;
3144 vdev
->vq
[i
].shadow_avail_idx
= 0;
3145 vdev
->vq
[i
].inuse
= 0;
3148 vdev
->vq
[i
].used_idx
= vring_used_idx(&vdev
->vq
[i
]);
3149 vdev
->vq
[i
].shadow_avail_idx
= vring_avail_idx(&vdev
->vq
[i
]);
3152 * Some devices migrate VirtQueueElements that have been popped
3153 * from the avail ring but not yet returned to the used ring.
3154 * Since max ring size < UINT16_MAX it's safe to use modulo
3155 * UINT16_MAX + 1 subtraction.
3157 vdev
->vq
[i
].inuse
= (uint16_t)(vdev
->vq
[i
].last_avail_idx
-
3158 vdev
->vq
[i
].used_idx
);
3159 if (vdev
->vq
[i
].inuse
> vdev
->vq
[i
].vring
.num
) {
3160 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3162 i
, vdev
->vq
[i
].vring
.num
,
3163 vdev
->vq
[i
].last_avail_idx
,
3164 vdev
->vq
[i
].used_idx
);
3170 if (vdc
->post_load
) {
3171 ret
= vdc
->post_load(vdev
);
3180 void virtio_cleanup(VirtIODevice
*vdev
)
3182 qemu_del_vm_change_state_handler(vdev
->vmstate
);
3185 static void virtio_vmstate_change(void *opaque
, bool running
, RunState state
)
3187 VirtIODevice
*vdev
= opaque
;
3188 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3189 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3190 bool backend_run
= running
&& virtio_device_started(vdev
, vdev
->status
);
3191 vdev
->vm_running
= running
;
3194 virtio_set_status(vdev
, vdev
->status
);
3197 if (k
->vmstate_change
) {
3198 k
->vmstate_change(qbus
->parent
, backend_run
);
3202 virtio_set_status(vdev
, vdev
->status
);
3206 void virtio_instance_init_common(Object
*proxy_obj
, void *data
,
3207 size_t vdev_size
, const char *vdev_name
)
3209 DeviceState
*vdev
= data
;
3211 object_initialize_child_with_props(proxy_obj
, "virtio-backend", vdev
,
3212 vdev_size
, vdev_name
, &error_abort
,
3214 qdev_alias_all_properties(vdev
, proxy_obj
);
3217 void virtio_init(VirtIODevice
*vdev
, uint16_t device_id
, size_t config_size
)
3219 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3220 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3222 int nvectors
= k
->query_nvectors
? k
->query_nvectors(qbus
->parent
) : 0;
3225 vdev
->vector_queues
=
3226 g_malloc0(sizeof(*vdev
->vector_queues
) * nvectors
);
3229 vdev
->start_on_kick
= false;
3230 vdev
->started
= false;
3231 vdev
->vhost_started
= false;
3232 vdev
->device_id
= device_id
;
3234 qatomic_set(&vdev
->isr
, 0);
3235 vdev
->queue_sel
= 0;
3236 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
3237 vdev
->vq
= g_new0(VirtQueue
, VIRTIO_QUEUE_MAX
);
3238 vdev
->vm_running
= runstate_is_running();
3239 vdev
->broken
= false;
3240 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3241 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
3242 vdev
->vq
[i
].vdev
= vdev
;
3243 vdev
->vq
[i
].queue_index
= i
;
3244 vdev
->vq
[i
].host_notifier_enabled
= false;
3247 vdev
->name
= virtio_id_to_name(device_id
);
3248 vdev
->config_len
= config_size
;
3249 if (vdev
->config_len
) {
3250 vdev
->config
= g_malloc0(config_size
);
3252 vdev
->config
= NULL
;
3254 vdev
->vmstate
= qdev_add_vm_change_state_handler(DEVICE(vdev
),
3255 virtio_vmstate_change
, vdev
);
3256 vdev
->device_endian
= virtio_default_endian();
3257 vdev
->use_guest_notifier_mask
= true;
3261 * Only devices that have already been around prior to defining the virtio
3262 * standard support legacy mode; this includes devices not specified in the
3263 * standard. All newer devices conform to the virtio standard only.
3265 bool virtio_legacy_allowed(VirtIODevice
*vdev
)
3267 switch (vdev
->device_id
) {
3269 case VIRTIO_ID_BLOCK
:
3270 case VIRTIO_ID_CONSOLE
:
3272 case VIRTIO_ID_BALLOON
:
3273 case VIRTIO_ID_RPMSG
:
3274 case VIRTIO_ID_SCSI
:
3276 case VIRTIO_ID_RPROC_SERIAL
:
3277 case VIRTIO_ID_CAIF
:
3284 bool virtio_legacy_check_disabled(VirtIODevice
*vdev
)
3286 return vdev
->disable_legacy_check
;
3289 hwaddr
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
)
3291 return vdev
->vq
[n
].vring
.desc
;
3294 bool virtio_queue_enabled_legacy(VirtIODevice
*vdev
, int n
)
3296 return virtio_queue_get_desc_addr(vdev
, n
) != 0;
3299 bool virtio_queue_enabled(VirtIODevice
*vdev
, int n
)
3301 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3302 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3304 if (k
->queue_enabled
) {
3305 return k
->queue_enabled(qbus
->parent
, n
);
3307 return virtio_queue_enabled_legacy(vdev
, n
);
3310 hwaddr
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
)
3312 return vdev
->vq
[n
].vring
.avail
;
3315 hwaddr
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
)
3317 return vdev
->vq
[n
].vring
.used
;
3320 hwaddr
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
)
3322 return sizeof(VRingDesc
) * vdev
->vq
[n
].vring
.num
;
3325 hwaddr
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
)
3329 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3330 return sizeof(struct VRingPackedDescEvent
);
3333 s
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
3334 return offsetof(VRingAvail
, ring
) +
3335 sizeof(uint16_t) * vdev
->vq
[n
].vring
.num
+ s
;
3338 hwaddr
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
)
3342 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3343 return sizeof(struct VRingPackedDescEvent
);
3346 s
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
3347 return offsetof(VRingUsed
, ring
) +
3348 sizeof(VRingUsedElem
) * vdev
->vq
[n
].vring
.num
+ s
;
3351 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice
*vdev
,
3354 unsigned int avail
, used
;
3356 avail
= vdev
->vq
[n
].last_avail_idx
;
3357 avail
|= ((uint16_t)vdev
->vq
[n
].last_avail_wrap_counter
) << 15;
3359 used
= vdev
->vq
[n
].used_idx
;
3360 used
|= ((uint16_t)vdev
->vq
[n
].used_wrap_counter
) << 15;
3362 return avail
| used
<< 16;
3365 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice
*vdev
,
3368 return vdev
->vq
[n
].last_avail_idx
;
3371 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
)
3373 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3374 return virtio_queue_packed_get_last_avail_idx(vdev
, n
);
3376 return virtio_queue_split_get_last_avail_idx(vdev
, n
);
3380 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice
*vdev
,
3381 int n
, unsigned int idx
)
3383 struct VirtQueue
*vq
= &vdev
->vq
[n
];
3385 vq
->last_avail_idx
= vq
->shadow_avail_idx
= idx
& 0x7fff;
3386 vq
->last_avail_wrap_counter
=
3387 vq
->shadow_avail_wrap_counter
= !!(idx
& 0x8000);
3389 vq
->used_idx
= idx
& 0x7fff;
3390 vq
->used_wrap_counter
= !!(idx
& 0x8000);
3393 static void virtio_queue_split_set_last_avail_idx(VirtIODevice
*vdev
,
3394 int n
, unsigned int idx
)
3396 vdev
->vq
[n
].last_avail_idx
= idx
;
3397 vdev
->vq
[n
].shadow_avail_idx
= idx
;
3400 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
,
3403 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3404 virtio_queue_packed_set_last_avail_idx(vdev
, n
, idx
);
3406 virtio_queue_split_set_last_avail_idx(vdev
, n
, idx
);
3410 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice
*vdev
,
3413 /* We don't have a reference like avail idx in shared memory */
3417 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice
*vdev
,
3420 RCU_READ_LOCK_GUARD();
3421 if (vdev
->vq
[n
].vring
.desc
) {
3422 vdev
->vq
[n
].last_avail_idx
= vring_used_idx(&vdev
->vq
[n
]);
3423 vdev
->vq
[n
].shadow_avail_idx
= vdev
->vq
[n
].last_avail_idx
;
3427 void virtio_queue_restore_last_avail_idx(VirtIODevice
*vdev
, int n
)
3429 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3430 virtio_queue_packed_restore_last_avail_idx(vdev
, n
);
3432 virtio_queue_split_restore_last_avail_idx(vdev
, n
);
3436 static void virtio_queue_packed_update_used_idx(VirtIODevice
*vdev
, int n
)
3438 /* used idx was updated through set_last_avail_idx() */
3442 static void virtio_split_packed_update_used_idx(VirtIODevice
*vdev
, int n
)
3444 RCU_READ_LOCK_GUARD();
3445 if (vdev
->vq
[n
].vring
.desc
) {
3446 vdev
->vq
[n
].used_idx
= vring_used_idx(&vdev
->vq
[n
]);
3450 void virtio_queue_update_used_idx(VirtIODevice
*vdev
, int n
)
3452 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3453 return virtio_queue_packed_update_used_idx(vdev
, n
);
3455 return virtio_split_packed_update_used_idx(vdev
, n
);
3459 void virtio_queue_invalidate_signalled_used(VirtIODevice
*vdev
, int n
)
3461 vdev
->vq
[n
].signalled_used_valid
= false;
3464 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
)
3466 return vdev
->vq
+ n
;
3469 uint16_t virtio_get_queue_index(VirtQueue
*vq
)
3471 return vq
->queue_index
;
3474 static void virtio_queue_guest_notifier_read(EventNotifier
*n
)
3476 VirtQueue
*vq
= container_of(n
, VirtQueue
, guest_notifier
);
3477 if (event_notifier_test_and_clear(n
)) {
3481 static void virtio_config_guest_notifier_read(EventNotifier
*n
)
3483 VirtIODevice
*vdev
= container_of(n
, VirtIODevice
, config_notifier
);
3485 if (event_notifier_test_and_clear(n
)) {
3486 virtio_notify_config(vdev
);
3489 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
3492 if (assign
&& !with_irqfd
) {
3493 event_notifier_set_handler(&vq
->guest_notifier
,
3494 virtio_queue_guest_notifier_read
);
3496 event_notifier_set_handler(&vq
->guest_notifier
, NULL
);
3499 /* Test and clear notifier before closing it,
3500 * in case poll callback didn't have time to run. */
3501 virtio_queue_guest_notifier_read(&vq
->guest_notifier
);
3505 void virtio_config_set_guest_notifier_fd_handler(VirtIODevice
*vdev
,
3506 bool assign
, bool with_irqfd
)
3509 n
= &vdev
->config_notifier
;
3510 if (assign
&& !with_irqfd
) {
3511 event_notifier_set_handler(n
, virtio_config_guest_notifier_read
);
3513 event_notifier_set_handler(n
, NULL
);
3516 /* Test and clear notifier before closing it,*/
3517 /* in case poll callback didn't have time to run. */
3518 virtio_config_guest_notifier_read(n
);
3522 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
)
3524 return &vq
->guest_notifier
;
3527 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier
*n
)
3529 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3531 virtio_queue_set_notification(vq
, 0);
3534 static bool virtio_queue_host_notifier_aio_poll(void *opaque
)
3536 EventNotifier
*n
= opaque
;
3537 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3539 return vq
->vring
.desc
&& !virtio_queue_empty(vq
);
3542 static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier
*n
)
3544 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3546 virtio_queue_notify_vq(vq
);
3549 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier
*n
)
3551 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3553 /* Caller polls once more after this to catch requests that race with us */
3554 virtio_queue_set_notification(vq
, 1);
3557 void virtio_queue_aio_attach_host_notifier(VirtQueue
*vq
, AioContext
*ctx
)
3559 aio_set_event_notifier(ctx
, &vq
->host_notifier
,
3560 virtio_queue_host_notifier_read
,
3561 virtio_queue_host_notifier_aio_poll
,
3562 virtio_queue_host_notifier_aio_poll_ready
);
3563 aio_set_event_notifier_poll(ctx
, &vq
->host_notifier
,
3564 virtio_queue_host_notifier_aio_poll_begin
,
3565 virtio_queue_host_notifier_aio_poll_end
);
3569 * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
3570 * this for rx virtqueues and similar cases where the virtqueue handler
3571 * function does not pop all elements. When the virtqueue is left non-empty
3572 * polling consumes CPU cycles and should not be used.
3574 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue
*vq
, AioContext
*ctx
)
3576 aio_set_event_notifier(ctx
, &vq
->host_notifier
,
3577 virtio_queue_host_notifier_read
,
3581 void virtio_queue_aio_detach_host_notifier(VirtQueue
*vq
, AioContext
*ctx
)
3583 aio_set_event_notifier(ctx
, &vq
->host_notifier
, NULL
, NULL
, NULL
);
3586 void virtio_queue_host_notifier_read(EventNotifier
*n
)
3588 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
3589 if (event_notifier_test_and_clear(n
)) {
3590 virtio_queue_notify_vq(vq
);
3594 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
)
3596 return &vq
->host_notifier
;
3599 EventNotifier
*virtio_config_get_guest_notifier(VirtIODevice
*vdev
)
3601 return &vdev
->config_notifier
;
3604 void virtio_queue_set_host_notifier_enabled(VirtQueue
*vq
, bool enabled
)
3606 vq
->host_notifier_enabled
= enabled
;
3609 int virtio_queue_set_host_notifier_mr(VirtIODevice
*vdev
, int n
,
3610 MemoryRegion
*mr
, bool assign
)
3612 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3613 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
3615 if (k
->set_host_notifier_mr
) {
3616 return k
->set_host_notifier_mr(qbus
->parent
, n
, mr
, assign
);
3622 void virtio_device_set_child_bus_name(VirtIODevice
*vdev
, char *bus_name
)
3624 g_free(vdev
->bus_name
);
3625 vdev
->bus_name
= g_strdup(bus_name
);
3628 void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice
*vdev
, const char *fmt
, ...)
3633 error_vreport(fmt
, ap
);
3636 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
3637 vdev
->status
= vdev
->status
| VIRTIO_CONFIG_S_NEEDS_RESET
;
3638 virtio_notify_config(vdev
);
3641 vdev
->broken
= true;
3644 static void virtio_memory_listener_commit(MemoryListener
*listener
)
3646 VirtIODevice
*vdev
= container_of(listener
, VirtIODevice
, listener
);
3649 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3650 if (vdev
->vq
[i
].vring
.num
== 0) {
3653 virtio_init_region_cache(vdev
, i
);
3657 static void virtio_device_realize(DeviceState
*dev
, Error
**errp
)
3659 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
3660 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
3663 /* Devices should either use vmsd or the load/save methods */
3664 assert(!vdc
->vmsd
|| !vdc
->load
);
3666 if (vdc
->realize
!= NULL
) {
3667 vdc
->realize(dev
, &err
);
3669 error_propagate(errp
, err
);
3674 virtio_bus_device_plugged(vdev
, &err
);
3676 error_propagate(errp
, err
);
3677 vdc
->unrealize(dev
);
3681 vdev
->listener
.commit
= virtio_memory_listener_commit
;
3682 vdev
->listener
.name
= "virtio";
3683 memory_listener_register(&vdev
->listener
, vdev
->dma_as
);
3686 static void virtio_device_unrealize(DeviceState
*dev
)
3688 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
3689 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
3691 memory_listener_unregister(&vdev
->listener
);
3692 virtio_bus_device_unplugged(vdev
);
3694 if (vdc
->unrealize
!= NULL
) {
3695 vdc
->unrealize(dev
);
3698 g_free(vdev
->bus_name
);
3699 vdev
->bus_name
= NULL
;
3702 static void virtio_device_free_virtqueues(VirtIODevice
*vdev
)
3709 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
3710 if (vdev
->vq
[i
].vring
.num
== 0) {
3713 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
3718 static void virtio_device_instance_finalize(Object
*obj
)
3720 VirtIODevice
*vdev
= VIRTIO_DEVICE(obj
);
3722 virtio_device_free_virtqueues(vdev
);
3724 g_free(vdev
->config
);
3725 g_free(vdev
->vector_queues
);
3728 static Property virtio_properties
[] = {
3729 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice
, host_features
),
3730 DEFINE_PROP_BOOL("use-started", VirtIODevice
, use_started
, true),
3731 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice
, use_disabled_flag
, true),
3732 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice
,
3733 disable_legacy_check
, false),
3734 DEFINE_PROP_END_OF_LIST(),
3737 static int virtio_device_start_ioeventfd_impl(VirtIODevice
*vdev
)
3739 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
3743 * Batch all the host notifiers in a single transaction to avoid
3744 * quadratic time complexity in address_space_update_ioeventfds().
3746 memory_region_transaction_begin();
3747 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3748 VirtQueue
*vq
= &vdev
->vq
[n
];
3749 if (!virtio_queue_get_num(vdev
, n
)) {
3752 r
= virtio_bus_set_host_notifier(qbus
, n
, true);
3757 event_notifier_set_handler(&vq
->host_notifier
,
3758 virtio_queue_host_notifier_read
);
3761 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3762 /* Kick right away to begin processing requests already in vring */
3763 VirtQueue
*vq
= &vdev
->vq
[n
];
3764 if (!vq
->vring
.num
) {
3767 event_notifier_set(&vq
->host_notifier
);
3769 memory_region_transaction_commit();
3773 i
= n
; /* save n for a second iteration after transaction is committed. */
3775 VirtQueue
*vq
= &vdev
->vq
[n
];
3776 if (!virtio_queue_get_num(vdev
, n
)) {
3780 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
3781 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
3785 * The transaction expects the ioeventfds to be open when it
3786 * commits. Do it now, before the cleanup loop.
3788 memory_region_transaction_commit();
3791 if (!virtio_queue_get_num(vdev
, i
)) {
3794 virtio_bus_cleanup_host_notifier(qbus
, i
);
3799 int virtio_device_start_ioeventfd(VirtIODevice
*vdev
)
3801 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3802 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3804 return virtio_bus_start_ioeventfd(vbus
);
3807 static void virtio_device_stop_ioeventfd_impl(VirtIODevice
*vdev
)
3809 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
3813 * Batch all the host notifiers in a single transaction to avoid
3814 * quadratic time complexity in address_space_update_ioeventfds().
3816 memory_region_transaction_begin();
3817 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3818 VirtQueue
*vq
= &vdev
->vq
[n
];
3820 if (!virtio_queue_get_num(vdev
, n
)) {
3823 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
3824 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
3828 * The transaction expects the ioeventfds to be open when it
3829 * commits. Do it now, before the cleanup loop.
3831 memory_region_transaction_commit();
3833 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
3834 if (!virtio_queue_get_num(vdev
, n
)) {
3837 virtio_bus_cleanup_host_notifier(qbus
, n
);
3841 int virtio_device_grab_ioeventfd(VirtIODevice
*vdev
)
3843 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3844 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3846 return virtio_bus_grab_ioeventfd(vbus
);
3849 void virtio_device_release_ioeventfd(VirtIODevice
*vdev
)
3851 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3852 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3854 virtio_bus_release_ioeventfd(vbus
);
3857 static void virtio_device_class_init(ObjectClass
*klass
, void *data
)
3859 /* Set the default value here. */
3860 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
3861 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3863 dc
->realize
= virtio_device_realize
;
3864 dc
->unrealize
= virtio_device_unrealize
;
3865 dc
->bus_type
= TYPE_VIRTIO_BUS
;
3866 device_class_set_props(dc
, virtio_properties
);
3867 vdc
->start_ioeventfd
= virtio_device_start_ioeventfd_impl
;
3868 vdc
->stop_ioeventfd
= virtio_device_stop_ioeventfd_impl
;
3870 vdc
->legacy_features
|= VIRTIO_LEGACY_FEATURES
;
3873 bool virtio_device_ioeventfd_enabled(VirtIODevice
*vdev
)
3875 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
3876 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
3878 return virtio_bus_ioeventfd_enabled(vbus
);
3881 VirtQueueStatus
*qmp_x_query_virtio_queue_status(const char *path
,
3886 VirtQueueStatus
*status
;
3888 vdev
= qmp_find_virtio_device(path
);
3890 error_setg(errp
, "Path %s is not a VirtIODevice", path
);
3894 if (queue
>= VIRTIO_QUEUE_MAX
|| !virtio_queue_get_num(vdev
, queue
)) {
3895 error_setg(errp
, "Invalid virtqueue number %d", queue
);
3899 status
= g_new0(VirtQueueStatus
, 1);
3900 status
->name
= g_strdup(vdev
->name
);
3901 status
->queue_index
= vdev
->vq
[queue
].queue_index
;
3902 status
->inuse
= vdev
->vq
[queue
].inuse
;
3903 status
->vring_num
= vdev
->vq
[queue
].vring
.num
;
3904 status
->vring_num_default
= vdev
->vq
[queue
].vring
.num_default
;
3905 status
->vring_align
= vdev
->vq
[queue
].vring
.align
;
3906 status
->vring_desc
= vdev
->vq
[queue
].vring
.desc
;
3907 status
->vring_avail
= vdev
->vq
[queue
].vring
.avail
;
3908 status
->vring_used
= vdev
->vq
[queue
].vring
.used
;
3909 status
->used_idx
= vdev
->vq
[queue
].used_idx
;
3910 status
->signalled_used
= vdev
->vq
[queue
].signalled_used
;
3911 status
->signalled_used_valid
= vdev
->vq
[queue
].signalled_used_valid
;
3913 if (vdev
->vhost_started
) {
3914 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
3915 struct vhost_dev
*hdev
= vdc
->get_vhost(vdev
);
3917 /* check if vq index exists for vhost as well */
3918 if (queue
>= hdev
->vq_index
&& queue
< hdev
->vq_index
+ hdev
->nvqs
) {
3919 status
->has_last_avail_idx
= true;
3921 int vhost_vq_index
=
3922 hdev
->vhost_ops
->vhost_get_vq_index(hdev
, queue
);
3923 struct vhost_vring_state state
= {
3924 .index
= vhost_vq_index
,
3927 status
->last_avail_idx
=
3928 hdev
->vhost_ops
->vhost_get_vring_base(hdev
, &state
);
3931 status
->has_shadow_avail_idx
= true;
3932 status
->has_last_avail_idx
= true;
3933 status
->last_avail_idx
= vdev
->vq
[queue
].last_avail_idx
;
3934 status
->shadow_avail_idx
= vdev
->vq
[queue
].shadow_avail_idx
;
3940 static strList
*qmp_decode_vring_desc_flags(uint16_t flags
)
3942 strList
*list
= NULL
;
3950 { VRING_DESC_F_NEXT
, "next" },
3951 { VRING_DESC_F_WRITE
, "write" },
3952 { VRING_DESC_F_INDIRECT
, "indirect" },
3953 { 1 << VRING_PACKED_DESC_F_AVAIL
, "avail" },
3954 { 1 << VRING_PACKED_DESC_F_USED
, "used" },
3958 for (i
= 0; map
[i
].flag
; i
++) {
3959 if ((map
[i
].flag
& flags
) == 0) {
3962 node
= g_malloc0(sizeof(strList
));
3963 node
->value
= g_strdup(map
[i
].value
);
3971 VirtioQueueElement
*qmp_x_query_virtio_queue_element(const char *path
,
3979 VirtioQueueElement
*element
= NULL
;
3981 vdev
= qmp_find_virtio_device(path
);
3983 error_setg(errp
, "Path %s is not a VirtIO device", path
);
3987 if (queue
>= VIRTIO_QUEUE_MAX
|| !virtio_queue_get_num(vdev
, queue
)) {
3988 error_setg(errp
, "Invalid virtqueue number %d", queue
);
3991 vq
= &vdev
->vq
[queue
];
3993 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
3994 error_setg(errp
, "Packed ring not supported");
3997 unsigned int head
, i
, max
;
3998 VRingMemoryRegionCaches
*caches
;
3999 MemoryRegionCache indirect_desc_cache
;
4000 MemoryRegionCache
*desc_cache
;
4002 VirtioRingDescList
*list
= NULL
;
4003 VirtioRingDescList
*node
;
4006 address_space_cache_init_empty(&indirect_desc_cache
);
4008 RCU_READ_LOCK_GUARD();
4010 max
= vq
->vring
.num
;
4013 head
= vring_avail_ring(vq
, vq
->last_avail_idx
% vq
->vring
.num
);
4015 head
= vring_avail_ring(vq
, index
% vq
->vring
.num
);
4019 caches
= vring_get_region_caches(vq
);
4021 error_setg(errp
, "Region caches not initialized");
4024 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
4025 error_setg(errp
, "Cannot map descriptor ring");
4029 desc_cache
= &caches
->desc
;
4030 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
4031 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
4033 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
4034 desc
.addr
, desc
.len
, false);
4035 desc_cache
= &indirect_desc_cache
;
4036 if (len
< desc
.len
) {
4037 error_setg(errp
, "Cannot map indirect buffer");
4041 max
= desc
.len
/ sizeof(VRingDesc
);
4043 vring_split_desc_read(vdev
, &desc
, desc_cache
, i
);
4046 element
= g_new0(VirtioQueueElement
, 1);
4047 element
->avail
= g_new0(VirtioRingAvail
, 1);
4048 element
->used
= g_new0(VirtioRingUsed
, 1);
4049 element
->name
= g_strdup(vdev
->name
);
4050 element
->index
= head
;
4051 element
->avail
->flags
= vring_avail_flags(vq
);
4052 element
->avail
->idx
= vring_avail_idx(vq
);
4053 element
->avail
->ring
= head
;
4054 element
->used
->flags
= vring_used_flags(vq
);
4055 element
->used
->idx
= vring_used_idx(vq
);
4059 /* A buggy driver may produce an infinite loop */
4060 if (ndescs
>= max
) {
4063 node
= g_new0(VirtioRingDescList
, 1);
4064 node
->value
= g_new0(VirtioRingDesc
, 1);
4065 node
->value
->addr
= desc
.addr
;
4066 node
->value
->len
= desc
.len
;
4067 node
->value
->flags
= qmp_decode_vring_desc_flags(desc
.flags
);
4072 rc
= virtqueue_split_read_next_desc(vdev
, &desc
, desc_cache
, max
);
4073 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
4074 element
->descs
= list
;
4076 address_space_cache_destroy(&indirect_desc_cache
);
4082 static const TypeInfo virtio_device_info
= {
4083 .name
= TYPE_VIRTIO_DEVICE
,
4084 .parent
= TYPE_DEVICE
,
4085 .instance_size
= sizeof(VirtIODevice
),
4086 .class_init
= virtio_device_class_init
,
4087 .instance_finalize
= virtio_device_instance_finalize
,
4089 .class_size
= sizeof(VirtioDeviceClass
),
4092 static void virtio_register_types(void)
4094 type_register_static(&virtio_device_info
);
4097 type_init(virtio_register_types
)