4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
18 #include "exec/address-spaces.h"
19 #include "qemu/error-report.h"
20 #include "qemu/module.h"
21 #include "hw/virtio/virtio.h"
22 #include "qemu/atomic.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "sysemu/dma.h"
28 * The alignment to use between consumer and producer parts of vring.
29 * x86 pagesize again. This is the default, used by transports like PCI
30 * which don't provide a means for the guest to tell the host the alignment.
32 #define VIRTIO_PCI_VRING_ALIGN 4096
34 typedef struct VRingDesc
42 typedef struct VRingAvail
49 typedef struct VRingUsedElem
55 typedef struct VRingUsed
59 VRingUsedElem ring
[0];
62 typedef struct VRingMemoryRegionCaches
{
64 MemoryRegionCache desc
;
65 MemoryRegionCache avail
;
66 MemoryRegionCache used
;
67 } VRingMemoryRegionCaches
;
72 unsigned int num_default
;
77 VRingMemoryRegionCaches
*caches
;
84 /* Next head to pop */
85 uint16_t last_avail_idx
;
87 /* Last avail_idx read from VQ. */
88 uint16_t shadow_avail_idx
;
92 /* Last used index value we have signalled on */
93 uint16_t signalled_used
;
95 /* Last used index value we have signalled on */
96 bool signalled_used_valid
;
98 /* Notification enabled? */
101 uint16_t queue_index
;
106 VirtIOHandleOutput handle_output
;
107 VirtIOHandleAIOOutput handle_aio_output
;
109 EventNotifier guest_notifier
;
110 EventNotifier host_notifier
;
111 QLIST_ENTRY(VirtQueue
) node
;
114 static void virtio_free_region_cache(VRingMemoryRegionCaches
*caches
)
120 address_space_cache_destroy(&caches
->desc
);
121 address_space_cache_destroy(&caches
->avail
);
122 address_space_cache_destroy(&caches
->used
);
126 static void virtio_virtqueue_reset_region_cache(struct VirtQueue
*vq
)
128 VRingMemoryRegionCaches
*caches
;
130 caches
= atomic_read(&vq
->vring
.caches
);
131 atomic_rcu_set(&vq
->vring
.caches
, NULL
);
133 call_rcu(caches
, virtio_free_region_cache
, rcu
);
137 static void virtio_init_region_cache(VirtIODevice
*vdev
, int n
)
139 VirtQueue
*vq
= &vdev
->vq
[n
];
140 VRingMemoryRegionCaches
*old
= vq
->vring
.caches
;
141 VRingMemoryRegionCaches
*new = NULL
;
146 event_size
= virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
148 addr
= vq
->vring
.desc
;
152 new = g_new0(VRingMemoryRegionCaches
, 1);
153 size
= virtio_queue_get_desc_size(vdev
, n
);
154 len
= address_space_cache_init(&new->desc
, vdev
->dma_as
,
157 virtio_error(vdev
, "Cannot map desc");
161 size
= virtio_queue_get_used_size(vdev
, n
) + event_size
;
162 len
= address_space_cache_init(&new->used
, vdev
->dma_as
,
163 vq
->vring
.used
, size
, true);
165 virtio_error(vdev
, "Cannot map used");
169 size
= virtio_queue_get_avail_size(vdev
, n
) + event_size
;
170 len
= address_space_cache_init(&new->avail
, vdev
->dma_as
,
171 vq
->vring
.avail
, size
, false);
173 virtio_error(vdev
, "Cannot map avail");
177 atomic_rcu_set(&vq
->vring
.caches
, new);
179 call_rcu(old
, virtio_free_region_cache
, rcu
);
184 address_space_cache_destroy(&new->avail
);
186 address_space_cache_destroy(&new->used
);
188 address_space_cache_destroy(&new->desc
);
191 virtio_virtqueue_reset_region_cache(vq
);
194 /* virt queue functions */
195 void virtio_queue_update_rings(VirtIODevice
*vdev
, int n
)
197 VRing
*vring
= &vdev
->vq
[n
].vring
;
199 if (!vring
->num
|| !vring
->desc
|| !vring
->align
) {
200 /* not yet setup -> nothing to do */
203 vring
->avail
= vring
->desc
+ vring
->num
* sizeof(VRingDesc
);
204 vring
->used
= vring_align(vring
->avail
+
205 offsetof(VRingAvail
, ring
[vring
->num
]),
207 virtio_init_region_cache(vdev
, n
);
210 /* Called within rcu_read_lock(). */
211 static void vring_desc_read(VirtIODevice
*vdev
, VRingDesc
*desc
,
212 MemoryRegionCache
*cache
, int i
)
214 address_space_read_cached(cache
, i
* sizeof(VRingDesc
),
215 desc
, sizeof(VRingDesc
));
216 virtio_tswap64s(vdev
, &desc
->addr
);
217 virtio_tswap32s(vdev
, &desc
->len
);
218 virtio_tswap16s(vdev
, &desc
->flags
);
219 virtio_tswap16s(vdev
, &desc
->next
);
222 static VRingMemoryRegionCaches
*vring_get_region_caches(struct VirtQueue
*vq
)
224 VRingMemoryRegionCaches
*caches
= atomic_rcu_read(&vq
->vring
.caches
);
225 assert(caches
!= NULL
);
228 /* Called within rcu_read_lock(). */
229 static inline uint16_t vring_avail_flags(VirtQueue
*vq
)
231 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
232 hwaddr pa
= offsetof(VRingAvail
, flags
);
233 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
236 /* Called within rcu_read_lock(). */
237 static inline uint16_t vring_avail_idx(VirtQueue
*vq
)
239 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
240 hwaddr pa
= offsetof(VRingAvail
, idx
);
241 vq
->shadow_avail_idx
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
242 return vq
->shadow_avail_idx
;
245 /* Called within rcu_read_lock(). */
246 static inline uint16_t vring_avail_ring(VirtQueue
*vq
, int i
)
248 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
249 hwaddr pa
= offsetof(VRingAvail
, ring
[i
]);
250 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
253 /* Called within rcu_read_lock(). */
254 static inline uint16_t vring_get_used_event(VirtQueue
*vq
)
256 return vring_avail_ring(vq
, vq
->vring
.num
);
259 /* Called within rcu_read_lock(). */
260 static inline void vring_used_write(VirtQueue
*vq
, VRingUsedElem
*uelem
,
263 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
264 hwaddr pa
= offsetof(VRingUsed
, ring
[i
]);
265 virtio_tswap32s(vq
->vdev
, &uelem
->id
);
266 virtio_tswap32s(vq
->vdev
, &uelem
->len
);
267 address_space_write_cached(&caches
->used
, pa
, uelem
, sizeof(VRingUsedElem
));
268 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(VRingUsedElem
));
271 /* Called within rcu_read_lock(). */
272 static uint16_t vring_used_idx(VirtQueue
*vq
)
274 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
275 hwaddr pa
= offsetof(VRingUsed
, idx
);
276 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
279 /* Called within rcu_read_lock(). */
280 static inline void vring_used_idx_set(VirtQueue
*vq
, uint16_t val
)
282 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
283 hwaddr pa
= offsetof(VRingUsed
, idx
);
284 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
285 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
289 /* Called within rcu_read_lock(). */
290 static inline void vring_used_flags_set_bit(VirtQueue
*vq
, int mask
)
292 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
293 VirtIODevice
*vdev
= vq
->vdev
;
294 hwaddr pa
= offsetof(VRingUsed
, flags
);
295 uint16_t flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
297 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
| mask
);
298 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
301 /* Called within rcu_read_lock(). */
302 static inline void vring_used_flags_unset_bit(VirtQueue
*vq
, int mask
)
304 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
305 VirtIODevice
*vdev
= vq
->vdev
;
306 hwaddr pa
= offsetof(VRingUsed
, flags
);
307 uint16_t flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
309 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
& ~mask
);
310 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
313 /* Called within rcu_read_lock(). */
314 static inline void vring_set_avail_event(VirtQueue
*vq
, uint16_t val
)
316 VRingMemoryRegionCaches
*caches
;
318 if (!vq
->notification
) {
322 caches
= vring_get_region_caches(vq
);
323 pa
= offsetof(VRingUsed
, ring
[vq
->vring
.num
]);
324 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
325 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
328 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
)
330 vq
->notification
= enable
;
332 if (!vq
->vring
.desc
) {
337 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
338 vring_set_avail_event(vq
, vring_avail_idx(vq
));
340 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
342 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
345 /* Expose avail event/used flags before caller checks the avail idx. */
351 int virtio_queue_ready(VirtQueue
*vq
)
353 return vq
->vring
.avail
!= 0;
356 /* Fetch avail_idx from VQ memory only when we really need to know if
357 * guest has added some buffers.
358 * Called within rcu_read_lock(). */
359 static int virtio_queue_empty_rcu(VirtQueue
*vq
)
361 if (unlikely(vq
->vdev
->broken
)) {
365 if (unlikely(!vq
->vring
.avail
)) {
369 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
373 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
376 int virtio_queue_empty(VirtQueue
*vq
)
380 if (unlikely(vq
->vdev
->broken
)) {
384 if (unlikely(!vq
->vring
.avail
)) {
388 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
393 empty
= vring_avail_idx(vq
) == vq
->last_avail_idx
;
398 static void virtqueue_unmap_sg(VirtQueue
*vq
, const VirtQueueElement
*elem
,
401 AddressSpace
*dma_as
= vq
->vdev
->dma_as
;
406 for (i
= 0; i
< elem
->in_num
; i
++) {
407 size_t size
= MIN(len
- offset
, elem
->in_sg
[i
].iov_len
);
409 dma_memory_unmap(dma_as
, elem
->in_sg
[i
].iov_base
,
410 elem
->in_sg
[i
].iov_len
,
411 DMA_DIRECTION_FROM_DEVICE
, size
);
416 for (i
= 0; i
< elem
->out_num
; i
++)
417 dma_memory_unmap(dma_as
, elem
->out_sg
[i
].iov_base
,
418 elem
->out_sg
[i
].iov_len
,
419 DMA_DIRECTION_TO_DEVICE
,
420 elem
->out_sg
[i
].iov_len
);
423 /* virtqueue_detach_element:
424 * @vq: The #VirtQueue
425 * @elem: The #VirtQueueElement
426 * @len: number of bytes written
428 * Detach the element from the virtqueue. This function is suitable for device
429 * reset or other situations where a #VirtQueueElement is simply freed and will
430 * not be pushed or discarded.
432 void virtqueue_detach_element(VirtQueue
*vq
, const VirtQueueElement
*elem
,
436 virtqueue_unmap_sg(vq
, elem
, len
);
440 * @vq: The #VirtQueue
441 * @elem: The #VirtQueueElement
442 * @len: number of bytes written
444 * Pretend the most recent element wasn't popped from the virtqueue. The next
445 * call to virtqueue_pop() will refetch the element.
447 void virtqueue_unpop(VirtQueue
*vq
, const VirtQueueElement
*elem
,
450 vq
->last_avail_idx
--;
451 virtqueue_detach_element(vq
, elem
, len
);
455 * @vq: The #VirtQueue
456 * @num: Number of elements to push back
458 * Pretend that elements weren't popped from the virtqueue. The next
459 * virtqueue_pop() will refetch the oldest element.
461 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
463 * Returns: true on success, false if @num is greater than the number of in use
466 bool virtqueue_rewind(VirtQueue
*vq
, unsigned int num
)
468 if (num
> vq
->inuse
) {
471 vq
->last_avail_idx
-= num
;
476 /* Called within rcu_read_lock(). */
477 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
478 unsigned int len
, unsigned int idx
)
482 trace_virtqueue_fill(vq
, elem
, len
, idx
);
484 virtqueue_unmap_sg(vq
, elem
, len
);
486 if (unlikely(vq
->vdev
->broken
)) {
490 if (unlikely(!vq
->vring
.used
)) {
494 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
496 uelem
.id
= elem
->index
;
498 vring_used_write(vq
, &uelem
, idx
);
501 /* Called within rcu_read_lock(). */
502 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
)
506 if (unlikely(vq
->vdev
->broken
)) {
511 if (unlikely(!vq
->vring
.used
)) {
515 /* Make sure buffer is written before we update index. */
517 trace_virtqueue_flush(vq
, count
);
520 vring_used_idx_set(vq
, new);
522 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
)))
523 vq
->signalled_used_valid
= false;
526 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
530 virtqueue_fill(vq
, elem
, len
, 0);
531 virtqueue_flush(vq
, 1);
535 /* Called within rcu_read_lock(). */
536 static int virtqueue_num_heads(VirtQueue
*vq
, unsigned int idx
)
538 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
540 /* Check it isn't doing very strange things with descriptor numbers. */
541 if (num_heads
> vq
->vring
.num
) {
542 virtio_error(vq
->vdev
, "Guest moved used index from %u to %u",
543 idx
, vq
->shadow_avail_idx
);
546 /* On success, callers read a descriptor at vq->last_avail_idx.
547 * Make sure descriptor read does not bypass avail index read. */
555 /* Called within rcu_read_lock(). */
556 static bool virtqueue_get_head(VirtQueue
*vq
, unsigned int idx
,
559 /* Grab the next descriptor number they're advertising, and increment
560 * the index we've seen. */
561 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
563 /* If their number is silly, that's a fatal mistake. */
564 if (*head
>= vq
->vring
.num
) {
565 virtio_error(vq
->vdev
, "Guest says index %u is available", *head
);
573 VIRTQUEUE_READ_DESC_ERROR
= -1,
574 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
575 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
578 static int virtqueue_read_next_desc(VirtIODevice
*vdev
, VRingDesc
*desc
,
579 MemoryRegionCache
*desc_cache
, unsigned int max
,
582 /* If this descriptor says it doesn't chain, we're done. */
583 if (!(desc
->flags
& VRING_DESC_F_NEXT
)) {
584 return VIRTQUEUE_READ_DESC_DONE
;
587 /* Check they're not leading us off end of descriptors. */
589 /* Make sure compiler knows to grab that: we don't want it changing! */
593 virtio_error(vdev
, "Desc next is %u", *next
);
594 return VIRTQUEUE_READ_DESC_ERROR
;
597 vring_desc_read(vdev
, desc
, desc_cache
, *next
);
598 return VIRTQUEUE_READ_DESC_MORE
;
601 void virtqueue_get_avail_bytes(VirtQueue
*vq
, unsigned int *in_bytes
,
602 unsigned int *out_bytes
,
603 unsigned max_in_bytes
, unsigned max_out_bytes
)
605 VirtIODevice
*vdev
= vq
->vdev
;
606 unsigned int max
, idx
;
607 unsigned int total_bufs
, in_total
, out_total
;
608 VRingMemoryRegionCaches
*caches
;
609 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
613 if (unlikely(!vq
->vring
.desc
)) {
624 idx
= vq
->last_avail_idx
;
625 total_bufs
= in_total
= out_total
= 0;
628 caches
= vring_get_region_caches(vq
);
629 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
630 virtio_error(vdev
, "Cannot map descriptor ring");
634 while ((rc
= virtqueue_num_heads(vq
, idx
)) > 0) {
635 MemoryRegionCache
*desc_cache
= &caches
->desc
;
636 unsigned int num_bufs
;
640 num_bufs
= total_bufs
;
642 if (!virtqueue_get_head(vq
, idx
++, &i
)) {
646 vring_desc_read(vdev
, &desc
, desc_cache
, i
);
648 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
649 if (!desc
.len
|| (desc
.len
% sizeof(VRingDesc
))) {
650 virtio_error(vdev
, "Invalid size for indirect buffer table");
654 /* If we've got too many, that implies a descriptor loop. */
655 if (num_bufs
>= max
) {
656 virtio_error(vdev
, "Looped descriptor");
660 /* loop over the indirect descriptor table */
661 len
= address_space_cache_init(&indirect_desc_cache
,
663 desc
.addr
, desc
.len
, false);
664 desc_cache
= &indirect_desc_cache
;
665 if (len
< desc
.len
) {
666 virtio_error(vdev
, "Cannot map indirect buffer");
670 max
= desc
.len
/ sizeof(VRingDesc
);
672 vring_desc_read(vdev
, &desc
, desc_cache
, i
);
676 /* If we've got too many, that implies a descriptor loop. */
677 if (++num_bufs
> max
) {
678 virtio_error(vdev
, "Looped descriptor");
682 if (desc
.flags
& VRING_DESC_F_WRITE
) {
683 in_total
+= desc
.len
;
685 out_total
+= desc
.len
;
687 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
691 rc
= virtqueue_read_next_desc(vdev
, &desc
, desc_cache
, max
, &i
);
692 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
694 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
698 if (desc_cache
== &indirect_desc_cache
) {
699 address_space_cache_destroy(&indirect_desc_cache
);
702 total_bufs
= num_bufs
;
711 address_space_cache_destroy(&indirect_desc_cache
);
713 *in_bytes
= in_total
;
716 *out_bytes
= out_total
;
722 in_total
= out_total
= 0;
726 int virtqueue_avail_bytes(VirtQueue
*vq
, unsigned int in_bytes
,
727 unsigned int out_bytes
)
729 unsigned int in_total
, out_total
;
731 virtqueue_get_avail_bytes(vq
, &in_total
, &out_total
, in_bytes
, out_bytes
);
732 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
735 static bool virtqueue_map_desc(VirtIODevice
*vdev
, unsigned int *p_num_sg
,
736 hwaddr
*addr
, struct iovec
*iov
,
737 unsigned int max_num_sg
, bool is_write
,
738 hwaddr pa
, size_t sz
)
741 unsigned num_sg
= *p_num_sg
;
742 assert(num_sg
<= max_num_sg
);
745 virtio_error(vdev
, "virtio: zero sized buffers are not allowed");
752 if (num_sg
== max_num_sg
) {
753 virtio_error(vdev
, "virtio: too many write descriptors in "
758 iov
[num_sg
].iov_base
= dma_memory_map(vdev
->dma_as
, pa
, &len
,
760 DMA_DIRECTION_FROM_DEVICE
:
761 DMA_DIRECTION_TO_DEVICE
);
762 if (!iov
[num_sg
].iov_base
) {
763 virtio_error(vdev
, "virtio: bogus descriptor or out of resources");
767 iov
[num_sg
].iov_len
= len
;
781 /* Only used by error code paths before we have a VirtQueueElement (therefore
782 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
785 static void virtqueue_undo_map_desc(unsigned int out_num
, unsigned int in_num
,
790 for (i
= 0; i
< out_num
+ in_num
; i
++) {
791 int is_write
= i
>= out_num
;
793 cpu_physical_memory_unmap(iov
->iov_base
, iov
->iov_len
, is_write
, 0);
798 static void virtqueue_map_iovec(VirtIODevice
*vdev
, struct iovec
*sg
,
799 hwaddr
*addr
, unsigned int num_sg
,
805 for (i
= 0; i
< num_sg
; i
++) {
807 sg
[i
].iov_base
= dma_memory_map(vdev
->dma_as
,
808 addr
[i
], &len
, is_write
?
809 DMA_DIRECTION_FROM_DEVICE
:
810 DMA_DIRECTION_TO_DEVICE
);
811 if (!sg
[i
].iov_base
) {
812 error_report("virtio: error trying to map MMIO memory");
815 if (len
!= sg
[i
].iov_len
) {
816 error_report("virtio: unexpected memory split");
822 void virtqueue_map(VirtIODevice
*vdev
, VirtQueueElement
*elem
)
824 virtqueue_map_iovec(vdev
, elem
->in_sg
, elem
->in_addr
, elem
->in_num
, 1);
825 virtqueue_map_iovec(vdev
, elem
->out_sg
, elem
->out_addr
, elem
->out_num
, 0);
828 static void *virtqueue_alloc_element(size_t sz
, unsigned out_num
, unsigned in_num
)
830 VirtQueueElement
*elem
;
831 size_t in_addr_ofs
= QEMU_ALIGN_UP(sz
, __alignof__(elem
->in_addr
[0]));
832 size_t out_addr_ofs
= in_addr_ofs
+ in_num
* sizeof(elem
->in_addr
[0]);
833 size_t out_addr_end
= out_addr_ofs
+ out_num
* sizeof(elem
->out_addr
[0]);
834 size_t in_sg_ofs
= QEMU_ALIGN_UP(out_addr_end
, __alignof__(elem
->in_sg
[0]));
835 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
836 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
838 assert(sz
>= sizeof(VirtQueueElement
));
839 elem
= g_malloc(out_sg_end
);
840 trace_virtqueue_alloc_element(elem
, sz
, in_num
, out_num
);
841 elem
->out_num
= out_num
;
842 elem
->in_num
= in_num
;
843 elem
->in_addr
= (void *)elem
+ in_addr_ofs
;
844 elem
->out_addr
= (void *)elem
+ out_addr_ofs
;
845 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
846 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
850 void *virtqueue_pop(VirtQueue
*vq
, size_t sz
)
852 unsigned int i
, head
, max
;
853 VRingMemoryRegionCaches
*caches
;
854 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
855 MemoryRegionCache
*desc_cache
;
857 VirtIODevice
*vdev
= vq
->vdev
;
858 VirtQueueElement
*elem
= NULL
;
859 unsigned out_num
, in_num
, elem_entries
;
860 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
861 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
865 if (unlikely(vdev
->broken
)) {
869 if (virtio_queue_empty_rcu(vq
)) {
872 /* Needed after virtio_queue_empty(), see comment in
873 * virtqueue_num_heads(). */
876 /* When we start there are none of either input nor output. */
877 out_num
= in_num
= elem_entries
= 0;
881 if (vq
->inuse
>= vq
->vring
.num
) {
882 virtio_error(vdev
, "Virtqueue size exceeded");
886 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
++, &head
)) {
890 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
891 vring_set_avail_event(vq
, vq
->last_avail_idx
);
896 caches
= vring_get_region_caches(vq
);
897 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
898 virtio_error(vdev
, "Cannot map descriptor ring");
902 desc_cache
= &caches
->desc
;
903 vring_desc_read(vdev
, &desc
, desc_cache
, i
);
904 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
905 if (!desc
.len
|| (desc
.len
% sizeof(VRingDesc
))) {
906 virtio_error(vdev
, "Invalid size for indirect buffer table");
910 /* loop over the indirect descriptor table */
911 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
912 desc
.addr
, desc
.len
, false);
913 desc_cache
= &indirect_desc_cache
;
914 if (len
< desc
.len
) {
915 virtio_error(vdev
, "Cannot map indirect buffer");
919 max
= desc
.len
/ sizeof(VRingDesc
);
921 vring_desc_read(vdev
, &desc
, desc_cache
, i
);
924 /* Collect all the descriptors */
928 if (desc
.flags
& VRING_DESC_F_WRITE
) {
929 map_ok
= virtqueue_map_desc(vdev
, &in_num
, addr
+ out_num
,
931 VIRTQUEUE_MAX_SIZE
- out_num
, true,
932 desc
.addr
, desc
.len
);
935 virtio_error(vdev
, "Incorrect order for descriptors");
938 map_ok
= virtqueue_map_desc(vdev
, &out_num
, addr
, iov
,
939 VIRTQUEUE_MAX_SIZE
, false,
940 desc
.addr
, desc
.len
);
946 /* If we've got too many, that implies a descriptor loop. */
947 if (++elem_entries
> max
) {
948 virtio_error(vdev
, "Looped descriptor");
952 rc
= virtqueue_read_next_desc(vdev
, &desc
, desc_cache
, max
, &i
);
953 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
955 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
959 /* Now copy what we have collected and mapped */
960 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
962 for (i
= 0; i
< out_num
; i
++) {
963 elem
->out_addr
[i
] = addr
[i
];
964 elem
->out_sg
[i
] = iov
[i
];
966 for (i
= 0; i
< in_num
; i
++) {
967 elem
->in_addr
[i
] = addr
[out_num
+ i
];
968 elem
->in_sg
[i
] = iov
[out_num
+ i
];
973 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
975 address_space_cache_destroy(&indirect_desc_cache
);
981 virtqueue_undo_map_desc(out_num
, in_num
, iov
);
985 /* virtqueue_drop_all:
986 * @vq: The #VirtQueue
987 * Drops all queued buffers and indicates them to the guest
988 * as if they are done. Useful when buffers can not be
989 * processed but must be returned to the guest.
991 unsigned int virtqueue_drop_all(VirtQueue
*vq
)
993 unsigned int dropped
= 0;
994 VirtQueueElement elem
= {};
995 VirtIODevice
*vdev
= vq
->vdev
;
996 bool fEventIdx
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
998 if (unlikely(vdev
->broken
)) {
1002 while (!virtio_queue_empty(vq
) && vq
->inuse
< vq
->vring
.num
) {
1003 /* works similar to virtqueue_pop but does not map buffers
1004 * and does not allocate any memory */
1006 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
, &elem
.index
)) {
1010 vq
->last_avail_idx
++;
1012 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1014 /* immediately push the element, nothing to unmap
1015 * as both in_num and out_num are set to 0 */
1016 virtqueue_push(vq
, &elem
, 0);
1023 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1024 * it is what QEMU has always done by mistake. We can change it sooner
1025 * or later by bumping the version number of the affected vm states.
1026 * In the meanwhile, since the in-memory layout of VirtQueueElement
1027 * has changed, we need to marshal to and from the layout that was
1028 * used before the change.
1030 typedef struct VirtQueueElementOld
{
1032 unsigned int out_num
;
1033 unsigned int in_num
;
1034 hwaddr in_addr
[VIRTQUEUE_MAX_SIZE
];
1035 hwaddr out_addr
[VIRTQUEUE_MAX_SIZE
];
1036 struct iovec in_sg
[VIRTQUEUE_MAX_SIZE
];
1037 struct iovec out_sg
[VIRTQUEUE_MAX_SIZE
];
1038 } VirtQueueElementOld
;
1040 void *qemu_get_virtqueue_element(VirtIODevice
*vdev
, QEMUFile
*f
, size_t sz
)
1042 VirtQueueElement
*elem
;
1043 VirtQueueElementOld data
;
1046 qemu_get_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1048 /* TODO: teach all callers that this can fail, and return failure instead
1049 * of asserting here.
1050 * This is just one thing (there are probably more) that must be
1051 * fixed before we can allow NDEBUG compilation.
1053 assert(ARRAY_SIZE(data
.in_addr
) >= data
.in_num
);
1054 assert(ARRAY_SIZE(data
.out_addr
) >= data
.out_num
);
1056 elem
= virtqueue_alloc_element(sz
, data
.out_num
, data
.in_num
);
1057 elem
->index
= data
.index
;
1059 for (i
= 0; i
< elem
->in_num
; i
++) {
1060 elem
->in_addr
[i
] = data
.in_addr
[i
];
1063 for (i
= 0; i
< elem
->out_num
; i
++) {
1064 elem
->out_addr
[i
] = data
.out_addr
[i
];
1067 for (i
= 0; i
< elem
->in_num
; i
++) {
1068 /* Base is overwritten by virtqueue_map. */
1069 elem
->in_sg
[i
].iov_base
= 0;
1070 elem
->in_sg
[i
].iov_len
= data
.in_sg
[i
].iov_len
;
1073 for (i
= 0; i
< elem
->out_num
; i
++) {
1074 /* Base is overwritten by virtqueue_map. */
1075 elem
->out_sg
[i
].iov_base
= 0;
1076 elem
->out_sg
[i
].iov_len
= data
.out_sg
[i
].iov_len
;
1079 virtqueue_map(vdev
, elem
);
1083 void qemu_put_virtqueue_element(QEMUFile
*f
, VirtQueueElement
*elem
)
1085 VirtQueueElementOld data
;
1088 memset(&data
, 0, sizeof(data
));
1089 data
.index
= elem
->index
;
1090 data
.in_num
= elem
->in_num
;
1091 data
.out_num
= elem
->out_num
;
1093 for (i
= 0; i
< elem
->in_num
; i
++) {
1094 data
.in_addr
[i
] = elem
->in_addr
[i
];
1097 for (i
= 0; i
< elem
->out_num
; i
++) {
1098 data
.out_addr
[i
] = elem
->out_addr
[i
];
1101 for (i
= 0; i
< elem
->in_num
; i
++) {
1102 /* Base is overwritten by virtqueue_map when loading. Do not
1103 * save it, as it would leak the QEMU address space layout. */
1104 data
.in_sg
[i
].iov_len
= elem
->in_sg
[i
].iov_len
;
1107 for (i
= 0; i
< elem
->out_num
; i
++) {
1108 /* Do not save iov_base as above. */
1109 data
.out_sg
[i
].iov_len
= elem
->out_sg
[i
].iov_len
;
1111 qemu_put_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1115 static void virtio_notify_vector(VirtIODevice
*vdev
, uint16_t vector
)
1117 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1118 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1120 if (unlikely(vdev
->broken
)) {
1125 k
->notify(qbus
->parent
, vector
);
1129 void virtio_update_irq(VirtIODevice
*vdev
)
1131 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
1134 static int virtio_validate_features(VirtIODevice
*vdev
)
1136 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1138 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
) &&
1139 !virtio_vdev_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
1143 if (k
->validate_features
) {
1144 return k
->validate_features(vdev
);
1150 int virtio_set_status(VirtIODevice
*vdev
, uint8_t val
)
1152 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1153 trace_virtio_set_status(vdev
, val
);
1155 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1156 if (!(vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) &&
1157 val
& VIRTIO_CONFIG_S_FEATURES_OK
) {
1158 int ret
= virtio_validate_features(vdev
);
1166 virtio_set_started(vdev
, val
& VIRTIO_CONFIG_S_DRIVER_OK
);
1168 if (k
->set_status
) {
1169 k
->set_status(vdev
, val
);
1176 static enum virtio_device_endian
virtio_default_endian(void)
1178 if (target_words_bigendian()) {
1179 return VIRTIO_DEVICE_ENDIAN_BIG
;
1181 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
1185 static enum virtio_device_endian
virtio_current_cpu_endian(void)
1187 CPUClass
*cc
= CPU_GET_CLASS(current_cpu
);
1189 if (cc
->virtio_is_big_endian(current_cpu
)) {
1190 return VIRTIO_DEVICE_ENDIAN_BIG
;
1192 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
1196 void virtio_reset(void *opaque
)
1198 VirtIODevice
*vdev
= opaque
;
1199 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1202 virtio_set_status(vdev
, 0);
1204 /* Guest initiated reset */
1205 vdev
->device_endian
= virtio_current_cpu_endian();
1208 vdev
->device_endian
= virtio_default_endian();
1215 vdev
->start_on_kick
= false;
1216 vdev
->started
= false;
1217 vdev
->broken
= false;
1218 vdev
->guest_features
= 0;
1219 vdev
->queue_sel
= 0;
1221 atomic_set(&vdev
->isr
, 0);
1222 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
1223 virtio_notify_vector(vdev
, vdev
->config_vector
);
1225 for(i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1226 vdev
->vq
[i
].vring
.desc
= 0;
1227 vdev
->vq
[i
].vring
.avail
= 0;
1228 vdev
->vq
[i
].vring
.used
= 0;
1229 vdev
->vq
[i
].last_avail_idx
= 0;
1230 vdev
->vq
[i
].shadow_avail_idx
= 0;
1231 vdev
->vq
[i
].used_idx
= 0;
1232 virtio_queue_set_vector(vdev
, i
, VIRTIO_NO_VECTOR
);
1233 vdev
->vq
[i
].signalled_used
= 0;
1234 vdev
->vq
[i
].signalled_used_valid
= false;
1235 vdev
->vq
[i
].notification
= true;
1236 vdev
->vq
[i
].vring
.num
= vdev
->vq
[i
].vring
.num_default
;
1237 vdev
->vq
[i
].inuse
= 0;
1238 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
1242 uint32_t virtio_config_readb(VirtIODevice
*vdev
, uint32_t addr
)
1244 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1247 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1248 return (uint32_t)-1;
1251 k
->get_config(vdev
, vdev
->config
);
1253 val
= ldub_p(vdev
->config
+ addr
);
1257 uint32_t virtio_config_readw(VirtIODevice
*vdev
, uint32_t addr
)
1259 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1262 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1263 return (uint32_t)-1;
1266 k
->get_config(vdev
, vdev
->config
);
1268 val
= lduw_p(vdev
->config
+ addr
);
1272 uint32_t virtio_config_readl(VirtIODevice
*vdev
, uint32_t addr
)
1274 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1277 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1278 return (uint32_t)-1;
1281 k
->get_config(vdev
, vdev
->config
);
1283 val
= ldl_p(vdev
->config
+ addr
);
1287 void virtio_config_writeb(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
1289 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1292 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1296 stb_p(vdev
->config
+ addr
, val
);
1298 if (k
->set_config
) {
1299 k
->set_config(vdev
, vdev
->config
);
1303 void virtio_config_writew(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
1305 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1306 uint16_t val
= data
;
1308 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1312 stw_p(vdev
->config
+ addr
, val
);
1314 if (k
->set_config
) {
1315 k
->set_config(vdev
, vdev
->config
);
1319 void virtio_config_writel(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
1321 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1322 uint32_t val
= data
;
1324 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1328 stl_p(vdev
->config
+ addr
, val
);
1330 if (k
->set_config
) {
1331 k
->set_config(vdev
, vdev
->config
);
1335 uint32_t virtio_config_modern_readb(VirtIODevice
*vdev
, uint32_t addr
)
1337 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1340 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1341 return (uint32_t)-1;
1344 k
->get_config(vdev
, vdev
->config
);
1346 val
= ldub_p(vdev
->config
+ addr
);
1350 uint32_t virtio_config_modern_readw(VirtIODevice
*vdev
, uint32_t addr
)
1352 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1355 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1356 return (uint32_t)-1;
1359 k
->get_config(vdev
, vdev
->config
);
1361 val
= lduw_le_p(vdev
->config
+ addr
);
1365 uint32_t virtio_config_modern_readl(VirtIODevice
*vdev
, uint32_t addr
)
1367 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1370 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1371 return (uint32_t)-1;
1374 k
->get_config(vdev
, vdev
->config
);
1376 val
= ldl_le_p(vdev
->config
+ addr
);
1380 void virtio_config_modern_writeb(VirtIODevice
*vdev
,
1381 uint32_t addr
, uint32_t data
)
1383 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1386 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1390 stb_p(vdev
->config
+ addr
, val
);
1392 if (k
->set_config
) {
1393 k
->set_config(vdev
, vdev
->config
);
1397 void virtio_config_modern_writew(VirtIODevice
*vdev
,
1398 uint32_t addr
, uint32_t data
)
1400 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1401 uint16_t val
= data
;
1403 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1407 stw_le_p(vdev
->config
+ addr
, val
);
1409 if (k
->set_config
) {
1410 k
->set_config(vdev
, vdev
->config
);
1414 void virtio_config_modern_writel(VirtIODevice
*vdev
,
1415 uint32_t addr
, uint32_t data
)
1417 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1418 uint32_t val
= data
;
1420 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1424 stl_le_p(vdev
->config
+ addr
, val
);
1426 if (k
->set_config
) {
1427 k
->set_config(vdev
, vdev
->config
);
1431 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, hwaddr addr
)
1433 if (!vdev
->vq
[n
].vring
.num
) {
1436 vdev
->vq
[n
].vring
.desc
= addr
;
1437 virtio_queue_update_rings(vdev
, n
);
1440 hwaddr
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
)
1442 return vdev
->vq
[n
].vring
.desc
;
1445 void virtio_queue_set_rings(VirtIODevice
*vdev
, int n
, hwaddr desc
,
1446 hwaddr avail
, hwaddr used
)
1448 if (!vdev
->vq
[n
].vring
.num
) {
1451 vdev
->vq
[n
].vring
.desc
= desc
;
1452 vdev
->vq
[n
].vring
.avail
= avail
;
1453 vdev
->vq
[n
].vring
.used
= used
;
1454 virtio_init_region_cache(vdev
, n
);
1457 void virtio_queue_set_num(VirtIODevice
*vdev
, int n
, int num
)
1459 /* Don't allow guest to flip queue between existent and
1460 * nonexistent states, or to set it to an invalid size.
1462 if (!!num
!= !!vdev
->vq
[n
].vring
.num
||
1463 num
> VIRTQUEUE_MAX_SIZE
||
1467 vdev
->vq
[n
].vring
.num
= num
;
1470 VirtQueue
*virtio_vector_first_queue(VirtIODevice
*vdev
, uint16_t vector
)
1472 return QLIST_FIRST(&vdev
->vector_queues
[vector
]);
1475 VirtQueue
*virtio_vector_next_queue(VirtQueue
*vq
)
1477 return QLIST_NEXT(vq
, node
);
1480 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
)
1482 return vdev
->vq
[n
].vring
.num
;
1485 int virtio_queue_get_max_num(VirtIODevice
*vdev
, int n
)
1487 return vdev
->vq
[n
].vring
.num_default
;
1490 int virtio_get_num_queues(VirtIODevice
*vdev
)
1494 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1495 if (!virtio_queue_get_num(vdev
, i
)) {
1503 void virtio_queue_set_align(VirtIODevice
*vdev
, int n
, int align
)
1505 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1506 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1508 /* virtio-1 compliant devices cannot change the alignment */
1509 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1510 error_report("tried to modify queue alignment for virtio-1 device");
1513 /* Check that the transport told us it was going to do this
1514 * (so a buggy transport will immediately assert rather than
1515 * silently failing to migrate this state)
1517 assert(k
->has_variable_vring_alignment
);
1520 vdev
->vq
[n
].vring
.align
= align
;
1521 virtio_queue_update_rings(vdev
, n
);
1525 static bool virtio_queue_notify_aio_vq(VirtQueue
*vq
)
1529 if (vq
->vring
.desc
&& vq
->handle_aio_output
) {
1530 VirtIODevice
*vdev
= vq
->vdev
;
1532 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
1533 ret
= vq
->handle_aio_output(vdev
, vq
);
1535 if (unlikely(vdev
->start_on_kick
)) {
1536 virtio_set_started(vdev
, true);
1543 static void virtio_queue_notify_vq(VirtQueue
*vq
)
1545 if (vq
->vring
.desc
&& vq
->handle_output
) {
1546 VirtIODevice
*vdev
= vq
->vdev
;
1548 if (unlikely(vdev
->broken
)) {
1552 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
1553 vq
->handle_output(vdev
, vq
);
1555 if (unlikely(vdev
->start_on_kick
)) {
1556 virtio_set_started(vdev
, true);
1561 void virtio_queue_notify(VirtIODevice
*vdev
, int n
)
1563 VirtQueue
*vq
= &vdev
->vq
[n
];
1565 if (unlikely(!vq
->vring
.desc
|| vdev
->broken
)) {
1569 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
1570 if (vq
->handle_aio_output
) {
1571 event_notifier_set(&vq
->host_notifier
);
1572 } else if (vq
->handle_output
) {
1573 vq
->handle_output(vdev
, vq
);
1576 if (unlikely(vdev
->start_on_kick
)) {
1577 virtio_set_started(vdev
, true);
1581 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
)
1583 return n
< VIRTIO_QUEUE_MAX
? vdev
->vq
[n
].vector
:
1587 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
)
1589 VirtQueue
*vq
= &vdev
->vq
[n
];
1591 if (n
< VIRTIO_QUEUE_MAX
) {
1592 if (vdev
->vector_queues
&&
1593 vdev
->vq
[n
].vector
!= VIRTIO_NO_VECTOR
) {
1594 QLIST_REMOVE(vq
, node
);
1596 vdev
->vq
[n
].vector
= vector
;
1597 if (vdev
->vector_queues
&&
1598 vector
!= VIRTIO_NO_VECTOR
) {
1599 QLIST_INSERT_HEAD(&vdev
->vector_queues
[vector
], vq
, node
);
1604 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
1605 VirtIOHandleOutput handle_output
)
1609 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1610 if (vdev
->vq
[i
].vring
.num
== 0)
1614 if (i
== VIRTIO_QUEUE_MAX
|| queue_size
> VIRTQUEUE_MAX_SIZE
)
1617 vdev
->vq
[i
].vring
.num
= queue_size
;
1618 vdev
->vq
[i
].vring
.num_default
= queue_size
;
1619 vdev
->vq
[i
].vring
.align
= VIRTIO_PCI_VRING_ALIGN
;
1620 vdev
->vq
[i
].handle_output
= handle_output
;
1621 vdev
->vq
[i
].handle_aio_output
= NULL
;
1623 return &vdev
->vq
[i
];
1626 void virtio_del_queue(VirtIODevice
*vdev
, int n
)
1628 if (n
< 0 || n
>= VIRTIO_QUEUE_MAX
) {
1632 vdev
->vq
[n
].vring
.num
= 0;
1633 vdev
->vq
[n
].vring
.num_default
= 0;
1634 vdev
->vq
[n
].handle_output
= NULL
;
1635 vdev
->vq
[n
].handle_aio_output
= NULL
;
1638 static void virtio_set_isr(VirtIODevice
*vdev
, int value
)
1640 uint8_t old
= atomic_read(&vdev
->isr
);
1642 /* Do not write ISR if it does not change, so that its cacheline remains
1643 * shared in the common case where the guest does not read it.
1645 if ((old
& value
) != value
) {
1646 atomic_or(&vdev
->isr
, value
);
1650 /* Called within rcu_read_lock(). */
1651 static bool virtio_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
1655 /* We need to expose used array entries before checking used event. */
1657 /* Always notify when queue is empty (when feature acknowledge) */
1658 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
1659 !vq
->inuse
&& virtio_queue_empty(vq
)) {
1663 if (!virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
1664 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
1667 v
= vq
->signalled_used_valid
;
1668 vq
->signalled_used_valid
= true;
1669 old
= vq
->signalled_used
;
1670 new = vq
->signalled_used
= vq
->used_idx
;
1671 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
1674 void virtio_notify_irqfd(VirtIODevice
*vdev
, VirtQueue
*vq
)
1678 should_notify
= virtio_should_notify(vdev
, vq
);
1681 if (!should_notify
) {
1685 trace_virtio_notify_irqfd(vdev
, vq
);
1688 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1689 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
1690 * incorrectly polling this bit during crashdump and hibernation
1691 * in MSI mode, causing a hang if this bit is never updated.
1692 * Recent releases of Windows do not really shut down, but rather
1693 * log out and hibernate to make the next startup faster. Hence,
1694 * this manifested as a more serious hang during shutdown with
1696 * Next driver release from 2016 fixed this problem, so working around it
1697 * is not a must, but it's easy to do so let's do it here.
1699 * Note: it's safe to update ISR from any thread as it was switched
1700 * to an atomic operation.
1702 virtio_set_isr(vq
->vdev
, 0x1);
1703 event_notifier_set(&vq
->guest_notifier
);
1706 static void virtio_irq(VirtQueue
*vq
)
1708 virtio_set_isr(vq
->vdev
, 0x1);
1709 virtio_notify_vector(vq
->vdev
, vq
->vector
);
1712 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
1716 should_notify
= virtio_should_notify(vdev
, vq
);
1719 if (!should_notify
) {
1723 trace_virtio_notify(vdev
, vq
);
1727 void virtio_notify_config(VirtIODevice
*vdev
)
1729 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))
1732 virtio_set_isr(vdev
, 0x3);
1734 virtio_notify_vector(vdev
, vdev
->config_vector
);
1737 static bool virtio_device_endian_needed(void *opaque
)
1739 VirtIODevice
*vdev
= opaque
;
1741 assert(vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_UNKNOWN
);
1742 if (!virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1743 return vdev
->device_endian
!= virtio_default_endian();
1745 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1746 return vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_LITTLE
;
1749 static bool virtio_64bit_features_needed(void *opaque
)
1751 VirtIODevice
*vdev
= opaque
;
1753 return (vdev
->host_features
>> 32) != 0;
1756 static bool virtio_virtqueue_needed(void *opaque
)
1758 VirtIODevice
*vdev
= opaque
;
1760 return virtio_host_has_feature(vdev
, VIRTIO_F_VERSION_1
);
1763 static bool virtio_ringsize_needed(void *opaque
)
1765 VirtIODevice
*vdev
= opaque
;
1768 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1769 if (vdev
->vq
[i
].vring
.num
!= vdev
->vq
[i
].vring
.num_default
) {
1776 static bool virtio_extra_state_needed(void *opaque
)
1778 VirtIODevice
*vdev
= opaque
;
1779 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1780 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1782 return k
->has_extra_state
&&
1783 k
->has_extra_state(qbus
->parent
);
1786 static bool virtio_broken_needed(void *opaque
)
1788 VirtIODevice
*vdev
= opaque
;
1790 return vdev
->broken
;
1793 static bool virtio_started_needed(void *opaque
)
1795 VirtIODevice
*vdev
= opaque
;
1797 return vdev
->started
;
1800 static const VMStateDescription vmstate_virtqueue
= {
1801 .name
= "virtqueue_state",
1803 .minimum_version_id
= 1,
1804 .fields
= (VMStateField
[]) {
1805 VMSTATE_UINT64(vring
.avail
, struct VirtQueue
),
1806 VMSTATE_UINT64(vring
.used
, struct VirtQueue
),
1807 VMSTATE_END_OF_LIST()
1811 static const VMStateDescription vmstate_virtio_virtqueues
= {
1812 .name
= "virtio/virtqueues",
1814 .minimum_version_id
= 1,
1815 .needed
= &virtio_virtqueue_needed
,
1816 .fields
= (VMStateField
[]) {
1817 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
1818 VIRTIO_QUEUE_MAX
, 0, vmstate_virtqueue
, VirtQueue
),
1819 VMSTATE_END_OF_LIST()
1823 static const VMStateDescription vmstate_ringsize
= {
1824 .name
= "ringsize_state",
1826 .minimum_version_id
= 1,
1827 .fields
= (VMStateField
[]) {
1828 VMSTATE_UINT32(vring
.num_default
, struct VirtQueue
),
1829 VMSTATE_END_OF_LIST()
1833 static const VMStateDescription vmstate_virtio_ringsize
= {
1834 .name
= "virtio/ringsize",
1836 .minimum_version_id
= 1,
1837 .needed
= &virtio_ringsize_needed
,
1838 .fields
= (VMStateField
[]) {
1839 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
1840 VIRTIO_QUEUE_MAX
, 0, vmstate_ringsize
, VirtQueue
),
1841 VMSTATE_END_OF_LIST()
1845 static int get_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
1846 const VMStateField
*field
)
1848 VirtIODevice
*vdev
= pv
;
1849 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1850 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1852 if (!k
->load_extra_state
) {
1855 return k
->load_extra_state(qbus
->parent
, f
);
1859 static int put_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
1860 const VMStateField
*field
, QJSON
*vmdesc
)
1862 VirtIODevice
*vdev
= pv
;
1863 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1864 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1866 k
->save_extra_state(qbus
->parent
, f
);
1870 static const VMStateInfo vmstate_info_extra_state
= {
1871 .name
= "virtqueue_extra_state",
1872 .get
= get_extra_state
,
1873 .put
= put_extra_state
,
1876 static const VMStateDescription vmstate_virtio_extra_state
= {
1877 .name
= "virtio/extra_state",
1879 .minimum_version_id
= 1,
1880 .needed
= &virtio_extra_state_needed
,
1881 .fields
= (VMStateField
[]) {
1883 .name
= "extra_state",
1885 .field_exists
= NULL
,
1887 .info
= &vmstate_info_extra_state
,
1888 .flags
= VMS_SINGLE
,
1891 VMSTATE_END_OF_LIST()
1895 static const VMStateDescription vmstate_virtio_device_endian
= {
1896 .name
= "virtio/device_endian",
1898 .minimum_version_id
= 1,
1899 .needed
= &virtio_device_endian_needed
,
1900 .fields
= (VMStateField
[]) {
1901 VMSTATE_UINT8(device_endian
, VirtIODevice
),
1902 VMSTATE_END_OF_LIST()
1906 static const VMStateDescription vmstate_virtio_64bit_features
= {
1907 .name
= "virtio/64bit_features",
1909 .minimum_version_id
= 1,
1910 .needed
= &virtio_64bit_features_needed
,
1911 .fields
= (VMStateField
[]) {
1912 VMSTATE_UINT64(guest_features
, VirtIODevice
),
1913 VMSTATE_END_OF_LIST()
1917 static const VMStateDescription vmstate_virtio_broken
= {
1918 .name
= "virtio/broken",
1920 .minimum_version_id
= 1,
1921 .needed
= &virtio_broken_needed
,
1922 .fields
= (VMStateField
[]) {
1923 VMSTATE_BOOL(broken
, VirtIODevice
),
1924 VMSTATE_END_OF_LIST()
1928 static const VMStateDescription vmstate_virtio_started
= {
1929 .name
= "virtio/started",
1931 .minimum_version_id
= 1,
1932 .needed
= &virtio_started_needed
,
1933 .fields
= (VMStateField
[]) {
1934 VMSTATE_BOOL(started
, VirtIODevice
),
1935 VMSTATE_END_OF_LIST()
1939 static const VMStateDescription vmstate_virtio
= {
1942 .minimum_version_id
= 1,
1943 .minimum_version_id_old
= 1,
1944 .fields
= (VMStateField
[]) {
1945 VMSTATE_END_OF_LIST()
1947 .subsections
= (const VMStateDescription
*[]) {
1948 &vmstate_virtio_device_endian
,
1949 &vmstate_virtio_64bit_features
,
1950 &vmstate_virtio_virtqueues
,
1951 &vmstate_virtio_ringsize
,
1952 &vmstate_virtio_broken
,
1953 &vmstate_virtio_extra_state
,
1954 &vmstate_virtio_started
,
1959 int virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
)
1961 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1962 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1963 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1964 uint32_t guest_features_lo
= (vdev
->guest_features
& 0xffffffff);
1967 if (k
->save_config
) {
1968 k
->save_config(qbus
->parent
, f
);
1971 qemu_put_8s(f
, &vdev
->status
);
1972 qemu_put_8s(f
, &vdev
->isr
);
1973 qemu_put_be16s(f
, &vdev
->queue_sel
);
1974 qemu_put_be32s(f
, &guest_features_lo
);
1975 qemu_put_be32(f
, vdev
->config_len
);
1976 qemu_put_buffer(f
, vdev
->config
, vdev
->config_len
);
1978 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1979 if (vdev
->vq
[i
].vring
.num
== 0)
1983 qemu_put_be32(f
, i
);
1985 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1986 if (vdev
->vq
[i
].vring
.num
== 0)
1989 qemu_put_be32(f
, vdev
->vq
[i
].vring
.num
);
1990 if (k
->has_variable_vring_alignment
) {
1991 qemu_put_be32(f
, vdev
->vq
[i
].vring
.align
);
1994 * Save desc now, the rest of the ring addresses are saved in
1995 * subsections for VIRTIO-1 devices.
1997 qemu_put_be64(f
, vdev
->vq
[i
].vring
.desc
);
1998 qemu_put_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
1999 if (k
->save_queue
) {
2000 k
->save_queue(qbus
->parent
, i
, f
);
2004 if (vdc
->save
!= NULL
) {
2009 int ret
= vmstate_save_state(f
, vdc
->vmsd
, vdev
, NULL
);
2016 return vmstate_save_state(f
, &vmstate_virtio
, vdev
, NULL
);
2019 /* A wrapper for use as a VMState .put function */
2020 static int virtio_device_put(QEMUFile
*f
, void *opaque
, size_t size
,
2021 const VMStateField
*field
, QJSON
*vmdesc
)
2023 return virtio_save(VIRTIO_DEVICE(opaque
), f
);
2026 /* A wrapper for use as a VMState .get function */
2027 static int virtio_device_get(QEMUFile
*f
, void *opaque
, size_t size
,
2028 const VMStateField
*field
)
2030 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
2031 DeviceClass
*dc
= DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev
));
2033 return virtio_load(vdev
, f
, dc
->vmsd
->version_id
);
2036 const VMStateInfo virtio_vmstate_info
= {
2038 .get
= virtio_device_get
,
2039 .put
= virtio_device_put
,
2042 static int virtio_set_features_nocheck(VirtIODevice
*vdev
, uint64_t val
)
2044 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2045 bool bad
= (val
& ~(vdev
->host_features
)) != 0;
2047 val
&= vdev
->host_features
;
2048 if (k
->set_features
) {
2049 k
->set_features(vdev
, val
);
2051 vdev
->guest_features
= val
;
2052 return bad
? -1 : 0;
2055 int virtio_set_features(VirtIODevice
*vdev
, uint64_t val
)
2059 * The driver must not attempt to set features after feature negotiation
2062 if (vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) {
2065 ret
= virtio_set_features_nocheck(vdev
, val
);
2067 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
2068 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
2070 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2071 if (vdev
->vq
[i
].vring
.num
!= 0) {
2072 virtio_init_region_cache(vdev
, i
);
2077 if (!virtio_device_started(vdev
, vdev
->status
) &&
2078 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2079 vdev
->start_on_kick
= true;
2085 size_t virtio_feature_get_config_size(VirtIOFeature
*feature_sizes
,
2086 uint64_t host_features
)
2088 size_t config_size
= 0;
2091 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
2092 if (host_features
& feature_sizes
[i
].flags
) {
2093 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
2100 int virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
, int version_id
)
2106 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2107 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2108 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
2111 * We poison the endianness to ensure it does not get used before
2112 * subsections have been loaded.
2114 vdev
->device_endian
= VIRTIO_DEVICE_ENDIAN_UNKNOWN
;
2116 if (k
->load_config
) {
2117 ret
= k
->load_config(qbus
->parent
, f
);
2122 qemu_get_8s(f
, &vdev
->status
);
2123 qemu_get_8s(f
, &vdev
->isr
);
2124 qemu_get_be16s(f
, &vdev
->queue_sel
);
2125 if (vdev
->queue_sel
>= VIRTIO_QUEUE_MAX
) {
2128 qemu_get_be32s(f
, &features
);
2131 * Temporarily set guest_features low bits - needed by
2132 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
2133 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
2135 * Note: devices should always test host features in future - don't create
2136 * new dependencies like this.
2138 vdev
->guest_features
= features
;
2140 config_len
= qemu_get_be32(f
);
2143 * There are cases where the incoming config can be bigger or smaller
2144 * than what we have; so load what we have space for, and skip
2145 * any excess that's in the stream.
2147 qemu_get_buffer(f
, vdev
->config
, MIN(config_len
, vdev
->config_len
));
2149 while (config_len
> vdev
->config_len
) {
2154 num
= qemu_get_be32(f
);
2156 if (num
> VIRTIO_QUEUE_MAX
) {
2157 error_report("Invalid number of virtqueues: 0x%x", num
);
2161 for (i
= 0; i
< num
; i
++) {
2162 vdev
->vq
[i
].vring
.num
= qemu_get_be32(f
);
2163 if (k
->has_variable_vring_alignment
) {
2164 vdev
->vq
[i
].vring
.align
= qemu_get_be32(f
);
2166 vdev
->vq
[i
].vring
.desc
= qemu_get_be64(f
);
2167 qemu_get_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
2168 vdev
->vq
[i
].signalled_used_valid
= false;
2169 vdev
->vq
[i
].notification
= true;
2171 if (!vdev
->vq
[i
].vring
.desc
&& vdev
->vq
[i
].last_avail_idx
) {
2172 error_report("VQ %d address 0x0 "
2173 "inconsistent with Host index 0x%x",
2174 i
, vdev
->vq
[i
].last_avail_idx
);
2177 if (k
->load_queue
) {
2178 ret
= k
->load_queue(qbus
->parent
, i
, f
);
2184 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
2186 if (vdc
->load
!= NULL
) {
2187 ret
= vdc
->load(vdev
, f
, version_id
);
2194 ret
= vmstate_load_state(f
, vdc
->vmsd
, vdev
, version_id
);
2201 ret
= vmstate_load_state(f
, &vmstate_virtio
, vdev
, 1);
2206 if (vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_UNKNOWN
) {
2207 vdev
->device_endian
= virtio_default_endian();
2210 if (virtio_64bit_features_needed(vdev
)) {
2212 * Subsection load filled vdev->guest_features. Run them
2213 * through virtio_set_features to sanity-check them against
2216 uint64_t features64
= vdev
->guest_features
;
2217 if (virtio_set_features_nocheck(vdev
, features64
) < 0) {
2218 error_report("Features 0x%" PRIx64
" unsupported. "
2219 "Allowed features: 0x%" PRIx64
,
2220 features64
, vdev
->host_features
);
2224 if (virtio_set_features_nocheck(vdev
, features
) < 0) {
2225 error_report("Features 0x%x unsupported. "
2226 "Allowed features: 0x%" PRIx64
,
2227 features
, vdev
->host_features
);
2232 if (!virtio_device_started(vdev
, vdev
->status
) &&
2233 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2234 vdev
->start_on_kick
= true;
2238 for (i
= 0; i
< num
; i
++) {
2239 if (vdev
->vq
[i
].vring
.desc
) {
2243 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
2244 * only the region cache needs to be set up. Legacy devices need
2245 * to calculate used and avail ring addresses based on the desc
2248 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2249 virtio_init_region_cache(vdev
, i
);
2251 virtio_queue_update_rings(vdev
, i
);
2254 nheads
= vring_avail_idx(&vdev
->vq
[i
]) - vdev
->vq
[i
].last_avail_idx
;
2255 /* Check it isn't doing strange things with descriptor numbers. */
2256 if (nheads
> vdev
->vq
[i
].vring
.num
) {
2257 error_report("VQ %d size 0x%x Guest index 0x%x "
2258 "inconsistent with Host index 0x%x: delta 0x%x",
2259 i
, vdev
->vq
[i
].vring
.num
,
2260 vring_avail_idx(&vdev
->vq
[i
]),
2261 vdev
->vq
[i
].last_avail_idx
, nheads
);
2264 vdev
->vq
[i
].used_idx
= vring_used_idx(&vdev
->vq
[i
]);
2265 vdev
->vq
[i
].shadow_avail_idx
= vring_avail_idx(&vdev
->vq
[i
]);
2268 * Some devices migrate VirtQueueElements that have been popped
2269 * from the avail ring but not yet returned to the used ring.
2270 * Since max ring size < UINT16_MAX it's safe to use modulo
2271 * UINT16_MAX + 1 subtraction.
2273 vdev
->vq
[i
].inuse
= (uint16_t)(vdev
->vq
[i
].last_avail_idx
-
2274 vdev
->vq
[i
].used_idx
);
2275 if (vdev
->vq
[i
].inuse
> vdev
->vq
[i
].vring
.num
) {
2276 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
2278 i
, vdev
->vq
[i
].vring
.num
,
2279 vdev
->vq
[i
].last_avail_idx
,
2280 vdev
->vq
[i
].used_idx
);
2290 void virtio_cleanup(VirtIODevice
*vdev
)
2292 qemu_del_vm_change_state_handler(vdev
->vmstate
);
2295 static void virtio_vmstate_change(void *opaque
, int running
, RunState state
)
2297 VirtIODevice
*vdev
= opaque
;
2298 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2299 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2300 bool backend_run
= running
&& virtio_device_started(vdev
, vdev
->status
);
2301 vdev
->vm_running
= running
;
2304 virtio_set_status(vdev
, vdev
->status
);
2307 if (k
->vmstate_change
) {
2308 k
->vmstate_change(qbus
->parent
, backend_run
);
2312 virtio_set_status(vdev
, vdev
->status
);
2316 void virtio_instance_init_common(Object
*proxy_obj
, void *data
,
2317 size_t vdev_size
, const char *vdev_name
)
2319 DeviceState
*vdev
= data
;
2321 object_initialize_child(proxy_obj
, "virtio-backend", vdev
, vdev_size
,
2322 vdev_name
, &error_abort
, NULL
);
2323 qdev_alias_all_properties(vdev
, proxy_obj
);
2326 void virtio_init(VirtIODevice
*vdev
, const char *name
,
2327 uint16_t device_id
, size_t config_size
)
2329 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2330 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2332 int nvectors
= k
->query_nvectors
? k
->query_nvectors(qbus
->parent
) : 0;
2335 vdev
->vector_queues
=
2336 g_malloc0(sizeof(*vdev
->vector_queues
) * nvectors
);
2339 vdev
->start_on_kick
= false;
2340 vdev
->started
= false;
2341 vdev
->device_id
= device_id
;
2343 atomic_set(&vdev
->isr
, 0);
2344 vdev
->queue_sel
= 0;
2345 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
2346 vdev
->vq
= g_malloc0(sizeof(VirtQueue
) * VIRTIO_QUEUE_MAX
);
2347 vdev
->vm_running
= runstate_is_running();
2348 vdev
->broken
= false;
2349 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2350 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
2351 vdev
->vq
[i
].vdev
= vdev
;
2352 vdev
->vq
[i
].queue_index
= i
;
2356 vdev
->config_len
= config_size
;
2357 if (vdev
->config_len
) {
2358 vdev
->config
= g_malloc0(config_size
);
2360 vdev
->config
= NULL
;
2362 vdev
->vmstate
= qemu_add_vm_change_state_handler(virtio_vmstate_change
,
2364 vdev
->device_endian
= virtio_default_endian();
2365 vdev
->use_guest_notifier_mask
= true;
2368 hwaddr
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
)
2370 return vdev
->vq
[n
].vring
.desc
;
2373 bool virtio_queue_enabled(VirtIODevice
*vdev
, int n
)
2375 return virtio_queue_get_desc_addr(vdev
, n
) != 0;
2378 hwaddr
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
)
2380 return vdev
->vq
[n
].vring
.avail
;
2383 hwaddr
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
)
2385 return vdev
->vq
[n
].vring
.used
;
2388 hwaddr
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
)
2390 return sizeof(VRingDesc
) * vdev
->vq
[n
].vring
.num
;
2393 hwaddr
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
)
2395 return offsetof(VRingAvail
, ring
) +
2396 sizeof(uint16_t) * vdev
->vq
[n
].vring
.num
;
2399 hwaddr
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
)
2401 return offsetof(VRingUsed
, ring
) +
2402 sizeof(VRingUsedElem
) * vdev
->vq
[n
].vring
.num
;
2405 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
)
2407 return vdev
->vq
[n
].last_avail_idx
;
2410 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
, uint16_t idx
)
2412 vdev
->vq
[n
].last_avail_idx
= idx
;
2413 vdev
->vq
[n
].shadow_avail_idx
= idx
;
2416 void virtio_queue_restore_last_avail_idx(VirtIODevice
*vdev
, int n
)
2419 if (vdev
->vq
[n
].vring
.desc
) {
2420 vdev
->vq
[n
].last_avail_idx
= vring_used_idx(&vdev
->vq
[n
]);
2421 vdev
->vq
[n
].shadow_avail_idx
= vdev
->vq
[n
].last_avail_idx
;
2426 void virtio_queue_update_used_idx(VirtIODevice
*vdev
, int n
)
2429 if (vdev
->vq
[n
].vring
.desc
) {
2430 vdev
->vq
[n
].used_idx
= vring_used_idx(&vdev
->vq
[n
]);
2435 void virtio_queue_invalidate_signalled_used(VirtIODevice
*vdev
, int n
)
2437 vdev
->vq
[n
].signalled_used_valid
= false;
2440 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
)
2442 return vdev
->vq
+ n
;
2445 uint16_t virtio_get_queue_index(VirtQueue
*vq
)
2447 return vq
->queue_index
;
2450 static void virtio_queue_guest_notifier_read(EventNotifier
*n
)
2452 VirtQueue
*vq
= container_of(n
, VirtQueue
, guest_notifier
);
2453 if (event_notifier_test_and_clear(n
)) {
2458 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
2461 if (assign
&& !with_irqfd
) {
2462 event_notifier_set_handler(&vq
->guest_notifier
,
2463 virtio_queue_guest_notifier_read
);
2465 event_notifier_set_handler(&vq
->guest_notifier
, NULL
);
2468 /* Test and clear notifier before closing it,
2469 * in case poll callback didn't have time to run. */
2470 virtio_queue_guest_notifier_read(&vq
->guest_notifier
);
2474 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
)
2476 return &vq
->guest_notifier
;
2479 static void virtio_queue_host_notifier_aio_read(EventNotifier
*n
)
2481 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
2482 if (event_notifier_test_and_clear(n
)) {
2483 virtio_queue_notify_aio_vq(vq
);
2487 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier
*n
)
2489 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
2491 virtio_queue_set_notification(vq
, 0);
2494 static bool virtio_queue_host_notifier_aio_poll(void *opaque
)
2496 EventNotifier
*n
= opaque
;
2497 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
2500 if (!vq
->vring
.desc
|| virtio_queue_empty(vq
)) {
2504 progress
= virtio_queue_notify_aio_vq(vq
);
2506 /* In case the handler function re-enabled notifications */
2507 virtio_queue_set_notification(vq
, 0);
2511 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier
*n
)
2513 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
2515 /* Caller polls once more after this to catch requests that race with us */
2516 virtio_queue_set_notification(vq
, 1);
2519 void virtio_queue_aio_set_host_notifier_handler(VirtQueue
*vq
, AioContext
*ctx
,
2520 VirtIOHandleAIOOutput handle_output
)
2522 if (handle_output
) {
2523 vq
->handle_aio_output
= handle_output
;
2524 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true,
2525 virtio_queue_host_notifier_aio_read
,
2526 virtio_queue_host_notifier_aio_poll
);
2527 aio_set_event_notifier_poll(ctx
, &vq
->host_notifier
,
2528 virtio_queue_host_notifier_aio_poll_begin
,
2529 virtio_queue_host_notifier_aio_poll_end
);
2531 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true, NULL
, NULL
);
2532 /* Test and clear notifier before after disabling event,
2533 * in case poll callback didn't have time to run. */
2534 virtio_queue_host_notifier_aio_read(&vq
->host_notifier
);
2535 vq
->handle_aio_output
= NULL
;
2539 void virtio_queue_host_notifier_read(EventNotifier
*n
)
2541 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
2542 if (event_notifier_test_and_clear(n
)) {
2543 virtio_queue_notify_vq(vq
);
2547 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
)
2549 return &vq
->host_notifier
;
2552 int virtio_queue_set_host_notifier_mr(VirtIODevice
*vdev
, int n
,
2553 MemoryRegion
*mr
, bool assign
)
2555 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2556 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2558 if (k
->set_host_notifier_mr
) {
2559 return k
->set_host_notifier_mr(qbus
->parent
, n
, mr
, assign
);
2565 void virtio_device_set_child_bus_name(VirtIODevice
*vdev
, char *bus_name
)
2567 g_free(vdev
->bus_name
);
2568 vdev
->bus_name
= g_strdup(bus_name
);
2571 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice
*vdev
, const char *fmt
, ...)
2576 error_vreport(fmt
, ap
);
2579 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2580 vdev
->status
= vdev
->status
| VIRTIO_CONFIG_S_NEEDS_RESET
;
2581 virtio_notify_config(vdev
);
2584 vdev
->broken
= true;
2587 static void virtio_memory_listener_commit(MemoryListener
*listener
)
2589 VirtIODevice
*vdev
= container_of(listener
, VirtIODevice
, listener
);
2592 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2593 if (vdev
->vq
[i
].vring
.num
== 0) {
2596 virtio_init_region_cache(vdev
, i
);
2600 static void virtio_device_realize(DeviceState
*dev
, Error
**errp
)
2602 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
2603 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
2606 /* Devices should either use vmsd or the load/save methods */
2607 assert(!vdc
->vmsd
|| !vdc
->load
);
2609 if (vdc
->realize
!= NULL
) {
2610 vdc
->realize(dev
, &err
);
2612 error_propagate(errp
, err
);
2617 virtio_bus_device_plugged(vdev
, &err
);
2619 error_propagate(errp
, err
);
2620 vdc
->unrealize(dev
, NULL
);
2624 vdev
->listener
.commit
= virtio_memory_listener_commit
;
2625 memory_listener_register(&vdev
->listener
, vdev
->dma_as
);
2628 static void virtio_device_unrealize(DeviceState
*dev
, Error
**errp
)
2630 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
2631 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
2634 virtio_bus_device_unplugged(vdev
);
2636 if (vdc
->unrealize
!= NULL
) {
2637 vdc
->unrealize(dev
, &err
);
2639 error_propagate(errp
, err
);
2644 g_free(vdev
->bus_name
);
2645 vdev
->bus_name
= NULL
;
2648 static void virtio_device_free_virtqueues(VirtIODevice
*vdev
)
2655 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2656 if (vdev
->vq
[i
].vring
.num
== 0) {
2659 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
2664 static void virtio_device_instance_finalize(Object
*obj
)
2666 VirtIODevice
*vdev
= VIRTIO_DEVICE(obj
);
2668 memory_listener_unregister(&vdev
->listener
);
2669 virtio_device_free_virtqueues(vdev
);
2671 g_free(vdev
->config
);
2672 g_free(vdev
->vector_queues
);
2675 static Property virtio_properties
[] = {
2676 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice
, host_features
),
2677 DEFINE_PROP_BOOL("use-started", VirtIODevice
, use_started
, true),
2678 DEFINE_PROP_END_OF_LIST(),
2681 static int virtio_device_start_ioeventfd_impl(VirtIODevice
*vdev
)
2683 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
2686 memory_region_transaction_begin();
2687 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
2688 VirtQueue
*vq
= &vdev
->vq
[n
];
2689 if (!virtio_queue_get_num(vdev
, n
)) {
2692 r
= virtio_bus_set_host_notifier(qbus
, n
, true);
2697 event_notifier_set_handler(&vq
->host_notifier
,
2698 virtio_queue_host_notifier_read
);
2701 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
2702 /* Kick right away to begin processing requests already in vring */
2703 VirtQueue
*vq
= &vdev
->vq
[n
];
2704 if (!vq
->vring
.num
) {
2707 event_notifier_set(&vq
->host_notifier
);
2709 memory_region_transaction_commit();
2713 i
= n
; /* save n for a second iteration after transaction is committed. */
2715 VirtQueue
*vq
= &vdev
->vq
[n
];
2716 if (!virtio_queue_get_num(vdev
, n
)) {
2720 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
2721 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
2724 memory_region_transaction_commit();
2727 if (!virtio_queue_get_num(vdev
, i
)) {
2730 virtio_bus_cleanup_host_notifier(qbus
, i
);
2735 int virtio_device_start_ioeventfd(VirtIODevice
*vdev
)
2737 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2738 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
2740 return virtio_bus_start_ioeventfd(vbus
);
2743 static void virtio_device_stop_ioeventfd_impl(VirtIODevice
*vdev
)
2745 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
2748 memory_region_transaction_begin();
2749 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
2750 VirtQueue
*vq
= &vdev
->vq
[n
];
2752 if (!virtio_queue_get_num(vdev
, n
)) {
2755 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
2756 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
2759 memory_region_transaction_commit();
2761 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
2762 if (!virtio_queue_get_num(vdev
, n
)) {
2765 virtio_bus_cleanup_host_notifier(qbus
, n
);
2769 void virtio_device_stop_ioeventfd(VirtIODevice
*vdev
)
2771 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2772 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
2774 virtio_bus_stop_ioeventfd(vbus
);
2777 int virtio_device_grab_ioeventfd(VirtIODevice
*vdev
)
2779 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2780 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
2782 return virtio_bus_grab_ioeventfd(vbus
);
2785 void virtio_device_release_ioeventfd(VirtIODevice
*vdev
)
2787 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2788 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
2790 virtio_bus_release_ioeventfd(vbus
);
2793 static void virtio_device_class_init(ObjectClass
*klass
, void *data
)
2795 /* Set the default value here. */
2796 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
2797 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2799 dc
->realize
= virtio_device_realize
;
2800 dc
->unrealize
= virtio_device_unrealize
;
2801 dc
->bus_type
= TYPE_VIRTIO_BUS
;
2802 dc
->props
= virtio_properties
;
2803 vdc
->start_ioeventfd
= virtio_device_start_ioeventfd_impl
;
2804 vdc
->stop_ioeventfd
= virtio_device_stop_ioeventfd_impl
;
2806 vdc
->legacy_features
|= VIRTIO_LEGACY_FEATURES
;
2809 bool virtio_device_ioeventfd_enabled(VirtIODevice
*vdev
)
2811 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2812 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
2814 return virtio_bus_ioeventfd_enabled(vbus
);
2817 static const TypeInfo virtio_device_info
= {
2818 .name
= TYPE_VIRTIO_DEVICE
,
2819 .parent
= TYPE_DEVICE
,
2820 .instance_size
= sizeof(VirtIODevice
),
2821 .class_init
= virtio_device_class_init
,
2822 .instance_finalize
= virtio_device_instance_finalize
,
2824 .class_size
= sizeof(VirtioDeviceClass
),
2827 static void virtio_register_types(void)
2829 type_register_static(&virtio_device_info
);
2832 type_init(virtio_register_types
)