4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
19 #include "exec/address-spaces.h"
20 #include "qemu/error-report.h"
21 #include "hw/virtio/virtio.h"
22 #include "qemu/atomic.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "migration/migration.h"
25 #include "hw/virtio/virtio-access.h"
26 #include "sysemu/dma.h"
29 * The alignment to use between consumer and producer parts of vring.
30 * x86 pagesize again. This is the default, used by transports like PCI
31 * which don't provide a means for the guest to tell the host the alignment.
33 #define VIRTIO_PCI_VRING_ALIGN 4096
35 typedef struct VRingDesc
43 typedef struct VRingAvail
50 typedef struct VRingUsedElem
56 typedef struct VRingUsed
60 VRingUsedElem ring
[0];
63 typedef struct VRingMemoryRegionCaches
{
65 MemoryRegionCache desc
;
66 MemoryRegionCache avail
;
67 MemoryRegionCache used
;
68 } VRingMemoryRegionCaches
;
73 unsigned int num_default
;
78 VRingMemoryRegionCaches
*caches
;
85 /* Next head to pop */
86 uint16_t last_avail_idx
;
88 /* Last avail_idx read from VQ. */
89 uint16_t shadow_avail_idx
;
93 /* Last used index value we have signalled on */
94 uint16_t signalled_used
;
96 /* Last used index value we have signalled on */
97 bool signalled_used_valid
;
99 /* Notification enabled? */
102 uint16_t queue_index
;
107 VirtIOHandleOutput handle_output
;
108 VirtIOHandleAIOOutput handle_aio_output
;
110 EventNotifier guest_notifier
;
111 EventNotifier host_notifier
;
112 QLIST_ENTRY(VirtQueue
) node
;
115 static void virtio_free_region_cache(VRingMemoryRegionCaches
*caches
)
121 address_space_cache_destroy(&caches
->desc
);
122 address_space_cache_destroy(&caches
->avail
);
123 address_space_cache_destroy(&caches
->used
);
127 static void virtio_init_region_cache(VirtIODevice
*vdev
, int n
)
129 VirtQueue
*vq
= &vdev
->vq
[n
];
130 VRingMemoryRegionCaches
*old
= vq
->vring
.caches
;
131 VRingMemoryRegionCaches
*new;
135 event_size
= virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
137 addr
= vq
->vring
.desc
;
141 new = g_new0(VRingMemoryRegionCaches
, 1);
142 size
= virtio_queue_get_desc_size(vdev
, n
);
143 address_space_cache_init(&new->desc
, vdev
->dma_as
,
146 size
= virtio_queue_get_used_size(vdev
, n
) + event_size
;
147 address_space_cache_init(&new->used
, vdev
->dma_as
,
148 vq
->vring
.used
, size
, true);
150 size
= virtio_queue_get_avail_size(vdev
, n
) + event_size
;
151 address_space_cache_init(&new->avail
, vdev
->dma_as
,
152 vq
->vring
.avail
, size
, false);
154 atomic_rcu_set(&vq
->vring
.caches
, new);
156 call_rcu(old
, virtio_free_region_cache
, rcu
);
160 /* virt queue functions */
161 void virtio_queue_update_rings(VirtIODevice
*vdev
, int n
)
163 VRing
*vring
= &vdev
->vq
[n
].vring
;
166 /* not yet setup -> nothing to do */
169 vring
->avail
= vring
->desc
+ vring
->num
* sizeof(VRingDesc
);
170 vring
->used
= vring_align(vring
->avail
+
171 offsetof(VRingAvail
, ring
[vring
->num
]),
173 virtio_init_region_cache(vdev
, n
);
176 /* Called within rcu_read_lock(). */
177 static void vring_desc_read(VirtIODevice
*vdev
, VRingDesc
*desc
,
178 MemoryRegionCache
*cache
, int i
)
180 address_space_read_cached(cache
, i
* sizeof(VRingDesc
),
181 desc
, sizeof(VRingDesc
));
182 virtio_tswap64s(vdev
, &desc
->addr
);
183 virtio_tswap32s(vdev
, &desc
->len
);
184 virtio_tswap16s(vdev
, &desc
->flags
);
185 virtio_tswap16s(vdev
, &desc
->next
);
188 static VRingMemoryRegionCaches
*vring_get_region_caches(struct VirtQueue
*vq
)
190 VRingMemoryRegionCaches
*caches
= atomic_rcu_read(&vq
->vring
.caches
);
191 assert(caches
!= NULL
);
194 /* Called within rcu_read_lock(). */
195 static inline uint16_t vring_avail_flags(VirtQueue
*vq
)
197 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
198 hwaddr pa
= offsetof(VRingAvail
, flags
);
199 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
202 /* Called within rcu_read_lock(). */
203 static inline uint16_t vring_avail_idx(VirtQueue
*vq
)
205 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
206 hwaddr pa
= offsetof(VRingAvail
, idx
);
207 vq
->shadow_avail_idx
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
208 return vq
->shadow_avail_idx
;
211 /* Called within rcu_read_lock(). */
212 static inline uint16_t vring_avail_ring(VirtQueue
*vq
, int i
)
214 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
215 hwaddr pa
= offsetof(VRingAvail
, ring
[i
]);
216 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->avail
, pa
);
219 /* Called within rcu_read_lock(). */
220 static inline uint16_t vring_get_used_event(VirtQueue
*vq
)
222 return vring_avail_ring(vq
, vq
->vring
.num
);
225 /* Called within rcu_read_lock(). */
226 static inline void vring_used_write(VirtQueue
*vq
, VRingUsedElem
*uelem
,
229 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
230 hwaddr pa
= offsetof(VRingUsed
, ring
[i
]);
231 virtio_tswap32s(vq
->vdev
, &uelem
->id
);
232 virtio_tswap32s(vq
->vdev
, &uelem
->len
);
233 address_space_write_cached(&caches
->used
, pa
, uelem
, sizeof(VRingUsedElem
));
234 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(VRingUsedElem
));
237 /* Called within rcu_read_lock(). */
238 static uint16_t vring_used_idx(VirtQueue
*vq
)
240 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
241 hwaddr pa
= offsetof(VRingUsed
, idx
);
242 return virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
245 /* Called within rcu_read_lock(). */
246 static inline void vring_used_idx_set(VirtQueue
*vq
, uint16_t val
)
248 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
249 hwaddr pa
= offsetof(VRingUsed
, idx
);
250 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
251 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
255 /* Called within rcu_read_lock(). */
256 static inline void vring_used_flags_set_bit(VirtQueue
*vq
, int mask
)
258 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
259 VirtIODevice
*vdev
= vq
->vdev
;
260 hwaddr pa
= offsetof(VRingUsed
, flags
);
261 uint16_t flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
263 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
| mask
);
264 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
267 /* Called within rcu_read_lock(). */
268 static inline void vring_used_flags_unset_bit(VirtQueue
*vq
, int mask
)
270 VRingMemoryRegionCaches
*caches
= vring_get_region_caches(vq
);
271 VirtIODevice
*vdev
= vq
->vdev
;
272 hwaddr pa
= offsetof(VRingUsed
, flags
);
273 uint16_t flags
= virtio_lduw_phys_cached(vq
->vdev
, &caches
->used
, pa
);
275 virtio_stw_phys_cached(vdev
, &caches
->used
, pa
, flags
& ~mask
);
276 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(flags
));
279 /* Called within rcu_read_lock(). */
280 static inline void vring_set_avail_event(VirtQueue
*vq
, uint16_t val
)
282 VRingMemoryRegionCaches
*caches
;
284 if (!vq
->notification
) {
288 caches
= vring_get_region_caches(vq
);
289 pa
= offsetof(VRingUsed
, ring
[vq
->vring
.num
]);
290 virtio_stw_phys_cached(vq
->vdev
, &caches
->used
, pa
, val
);
291 address_space_cache_invalidate(&caches
->used
, pa
, sizeof(val
));
294 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
)
296 vq
->notification
= enable
;
298 if (!vq
->vring
.desc
) {
303 if (virtio_vdev_has_feature(vq
->vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
304 vring_set_avail_event(vq
, vring_avail_idx(vq
));
306 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
308 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
311 /* Expose avail event/used flags before caller checks the avail idx. */
317 int virtio_queue_ready(VirtQueue
*vq
)
319 return vq
->vring
.avail
!= 0;
322 /* Fetch avail_idx from VQ memory only when we really need to know if
323 * guest has added some buffers.
324 * Called within rcu_read_lock(). */
325 static int virtio_queue_empty_rcu(VirtQueue
*vq
)
327 if (unlikely(!vq
->vring
.avail
)) {
331 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
335 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
338 int virtio_queue_empty(VirtQueue
*vq
)
342 if (unlikely(!vq
->vring
.avail
)) {
346 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
351 empty
= vring_avail_idx(vq
) == vq
->last_avail_idx
;
356 static void virtqueue_unmap_sg(VirtQueue
*vq
, const VirtQueueElement
*elem
,
359 AddressSpace
*dma_as
= vq
->vdev
->dma_as
;
364 for (i
= 0; i
< elem
->in_num
; i
++) {
365 size_t size
= MIN(len
- offset
, elem
->in_sg
[i
].iov_len
);
367 dma_memory_unmap(dma_as
, elem
->in_sg
[i
].iov_base
,
368 elem
->in_sg
[i
].iov_len
,
369 DMA_DIRECTION_FROM_DEVICE
, size
);
374 for (i
= 0; i
< elem
->out_num
; i
++)
375 dma_memory_unmap(dma_as
, elem
->out_sg
[i
].iov_base
,
376 elem
->out_sg
[i
].iov_len
,
377 DMA_DIRECTION_TO_DEVICE
,
378 elem
->out_sg
[i
].iov_len
);
381 /* virtqueue_detach_element:
382 * @vq: The #VirtQueue
383 * @elem: The #VirtQueueElement
384 * @len: number of bytes written
386 * Detach the element from the virtqueue. This function is suitable for device
387 * reset or other situations where a #VirtQueueElement is simply freed and will
388 * not be pushed or discarded.
390 void virtqueue_detach_element(VirtQueue
*vq
, const VirtQueueElement
*elem
,
394 virtqueue_unmap_sg(vq
, elem
, len
);
398 * @vq: The #VirtQueue
399 * @elem: The #VirtQueueElement
400 * @len: number of bytes written
402 * Pretend the most recent element wasn't popped from the virtqueue. The next
403 * call to virtqueue_pop() will refetch the element.
405 void virtqueue_unpop(VirtQueue
*vq
, const VirtQueueElement
*elem
,
408 vq
->last_avail_idx
--;
409 virtqueue_detach_element(vq
, elem
, len
);
413 * @vq: The #VirtQueue
414 * @num: Number of elements to push back
416 * Pretend that elements weren't popped from the virtqueue. The next
417 * virtqueue_pop() will refetch the oldest element.
419 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
421 * Returns: true on success, false if @num is greater than the number of in use
424 bool virtqueue_rewind(VirtQueue
*vq
, unsigned int num
)
426 if (num
> vq
->inuse
) {
429 vq
->last_avail_idx
-= num
;
434 /* Called within rcu_read_lock(). */
435 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
436 unsigned int len
, unsigned int idx
)
440 trace_virtqueue_fill(vq
, elem
, len
, idx
);
442 virtqueue_unmap_sg(vq
, elem
, len
);
444 if (unlikely(vq
->vdev
->broken
)) {
448 if (unlikely(!vq
->vring
.used
)) {
452 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
454 uelem
.id
= elem
->index
;
456 vring_used_write(vq
, &uelem
, idx
);
459 /* Called within rcu_read_lock(). */
460 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
)
464 if (unlikely(vq
->vdev
->broken
)) {
469 if (unlikely(!vq
->vring
.used
)) {
473 /* Make sure buffer is written before we update index. */
475 trace_virtqueue_flush(vq
, count
);
478 vring_used_idx_set(vq
, new);
480 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
)))
481 vq
->signalled_used_valid
= false;
484 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
488 virtqueue_fill(vq
, elem
, len
, 0);
489 virtqueue_flush(vq
, 1);
493 /* Called within rcu_read_lock(). */
494 static int virtqueue_num_heads(VirtQueue
*vq
, unsigned int idx
)
496 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
498 /* Check it isn't doing very strange things with descriptor numbers. */
499 if (num_heads
> vq
->vring
.num
) {
500 virtio_error(vq
->vdev
, "Guest moved used index from %u to %u",
501 idx
, vq
->shadow_avail_idx
);
504 /* On success, callers read a descriptor at vq->last_avail_idx.
505 * Make sure descriptor read does not bypass avail index read. */
513 /* Called within rcu_read_lock(). */
514 static bool virtqueue_get_head(VirtQueue
*vq
, unsigned int idx
,
517 /* Grab the next descriptor number they're advertising, and increment
518 * the index we've seen. */
519 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
521 /* If their number is silly, that's a fatal mistake. */
522 if (*head
>= vq
->vring
.num
) {
523 virtio_error(vq
->vdev
, "Guest says index %u is available", *head
);
531 VIRTQUEUE_READ_DESC_ERROR
= -1,
532 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
533 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
536 static int virtqueue_read_next_desc(VirtIODevice
*vdev
, VRingDesc
*desc
,
537 MemoryRegionCache
*desc_cache
, unsigned int max
,
540 /* If this descriptor says it doesn't chain, we're done. */
541 if (!(desc
->flags
& VRING_DESC_F_NEXT
)) {
542 return VIRTQUEUE_READ_DESC_DONE
;
545 /* Check they're not leading us off end of descriptors. */
547 /* Make sure compiler knows to grab that: we don't want it changing! */
551 virtio_error(vdev
, "Desc next is %u", *next
);
552 return VIRTQUEUE_READ_DESC_ERROR
;
555 vring_desc_read(vdev
, desc
, desc_cache
, *next
);
556 return VIRTQUEUE_READ_DESC_MORE
;
559 void virtqueue_get_avail_bytes(VirtQueue
*vq
, unsigned int *in_bytes
,
560 unsigned int *out_bytes
,
561 unsigned max_in_bytes
, unsigned max_out_bytes
)
563 VirtIODevice
*vdev
= vq
->vdev
;
564 unsigned int max
, idx
;
565 unsigned int total_bufs
, in_total
, out_total
;
566 VRingMemoryRegionCaches
*caches
;
567 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
571 if (unlikely(!vq
->vring
.desc
)) {
582 idx
= vq
->last_avail_idx
;
583 total_bufs
= in_total
= out_total
= 0;
586 caches
= vring_get_region_caches(vq
);
587 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
588 virtio_error(vdev
, "Cannot map descriptor ring");
592 while ((rc
= virtqueue_num_heads(vq
, idx
)) > 0) {
593 MemoryRegionCache
*desc_cache
= &caches
->desc
;
594 unsigned int num_bufs
;
598 num_bufs
= total_bufs
;
600 if (!virtqueue_get_head(vq
, idx
++, &i
)) {
604 vring_desc_read(vdev
, &desc
, desc_cache
, i
);
606 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
607 if (desc
.len
% sizeof(VRingDesc
)) {
608 virtio_error(vdev
, "Invalid size for indirect buffer table");
612 /* If we've got too many, that implies a descriptor loop. */
613 if (num_bufs
>= max
) {
614 virtio_error(vdev
, "Looped descriptor");
618 /* loop over the indirect descriptor table */
619 len
= address_space_cache_init(&indirect_desc_cache
,
621 desc
.addr
, desc
.len
, false);
622 desc_cache
= &indirect_desc_cache
;
623 if (len
< desc
.len
) {
624 virtio_error(vdev
, "Cannot map indirect buffer");
628 max
= desc
.len
/ sizeof(VRingDesc
);
630 vring_desc_read(vdev
, &desc
, desc_cache
, i
);
634 /* If we've got too many, that implies a descriptor loop. */
635 if (++num_bufs
> max
) {
636 virtio_error(vdev
, "Looped descriptor");
640 if (desc
.flags
& VRING_DESC_F_WRITE
) {
641 in_total
+= desc
.len
;
643 out_total
+= desc
.len
;
645 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
649 rc
= virtqueue_read_next_desc(vdev
, &desc
, desc_cache
, max
, &i
);
650 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
652 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
656 if (desc_cache
== &indirect_desc_cache
) {
657 address_space_cache_destroy(&indirect_desc_cache
);
660 total_bufs
= num_bufs
;
669 address_space_cache_destroy(&indirect_desc_cache
);
671 *in_bytes
= in_total
;
674 *out_bytes
= out_total
;
680 in_total
= out_total
= 0;
684 int virtqueue_avail_bytes(VirtQueue
*vq
, unsigned int in_bytes
,
685 unsigned int out_bytes
)
687 unsigned int in_total
, out_total
;
689 virtqueue_get_avail_bytes(vq
, &in_total
, &out_total
, in_bytes
, out_bytes
);
690 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
693 static bool virtqueue_map_desc(VirtIODevice
*vdev
, unsigned int *p_num_sg
,
694 hwaddr
*addr
, struct iovec
*iov
,
695 unsigned int max_num_sg
, bool is_write
,
696 hwaddr pa
, size_t sz
)
699 unsigned num_sg
= *p_num_sg
;
700 assert(num_sg
<= max_num_sg
);
703 virtio_error(vdev
, "virtio: zero sized buffers are not allowed");
710 if (num_sg
== max_num_sg
) {
711 virtio_error(vdev
, "virtio: too many write descriptors in "
716 iov
[num_sg
].iov_base
= dma_memory_map(vdev
->dma_as
, pa
, &len
,
718 DMA_DIRECTION_FROM_DEVICE
:
719 DMA_DIRECTION_TO_DEVICE
);
720 if (!iov
[num_sg
].iov_base
) {
721 virtio_error(vdev
, "virtio: bogus descriptor or out of resources");
725 iov
[num_sg
].iov_len
= len
;
739 /* Only used by error code paths before we have a VirtQueueElement (therefore
740 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
743 static void virtqueue_undo_map_desc(unsigned int out_num
, unsigned int in_num
,
748 for (i
= 0; i
< out_num
+ in_num
; i
++) {
749 int is_write
= i
>= out_num
;
751 cpu_physical_memory_unmap(iov
->iov_base
, iov
->iov_len
, is_write
, 0);
756 static void virtqueue_map_iovec(VirtIODevice
*vdev
, struct iovec
*sg
,
757 hwaddr
*addr
, unsigned int *num_sg
,
763 for (i
= 0; i
< *num_sg
; i
++) {
765 sg
[i
].iov_base
= dma_memory_map(vdev
->dma_as
,
766 addr
[i
], &len
, is_write
?
767 DMA_DIRECTION_FROM_DEVICE
:
768 DMA_DIRECTION_TO_DEVICE
);
769 if (!sg
[i
].iov_base
) {
770 error_report("virtio: error trying to map MMIO memory");
773 if (len
!= sg
[i
].iov_len
) {
774 error_report("virtio: unexpected memory split");
780 void virtqueue_map(VirtIODevice
*vdev
, VirtQueueElement
*elem
)
782 virtqueue_map_iovec(vdev
, elem
->in_sg
, elem
->in_addr
, &elem
->in_num
, 1);
783 virtqueue_map_iovec(vdev
, elem
->out_sg
, elem
->out_addr
, &elem
->out_num
, 0);
786 static void *virtqueue_alloc_element(size_t sz
, unsigned out_num
, unsigned in_num
)
788 VirtQueueElement
*elem
;
789 size_t in_addr_ofs
= QEMU_ALIGN_UP(sz
, __alignof__(elem
->in_addr
[0]));
790 size_t out_addr_ofs
= in_addr_ofs
+ in_num
* sizeof(elem
->in_addr
[0]);
791 size_t out_addr_end
= out_addr_ofs
+ out_num
* sizeof(elem
->out_addr
[0]);
792 size_t in_sg_ofs
= QEMU_ALIGN_UP(out_addr_end
, __alignof__(elem
->in_sg
[0]));
793 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
794 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
796 assert(sz
>= sizeof(VirtQueueElement
));
797 elem
= g_malloc(out_sg_end
);
798 elem
->out_num
= out_num
;
799 elem
->in_num
= in_num
;
800 elem
->in_addr
= (void *)elem
+ in_addr_ofs
;
801 elem
->out_addr
= (void *)elem
+ out_addr_ofs
;
802 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
803 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
807 void *virtqueue_pop(VirtQueue
*vq
, size_t sz
)
809 unsigned int i
, head
, max
;
810 VRingMemoryRegionCaches
*caches
;
811 MemoryRegionCache indirect_desc_cache
= MEMORY_REGION_CACHE_INVALID
;
812 MemoryRegionCache
*desc_cache
;
814 VirtIODevice
*vdev
= vq
->vdev
;
815 VirtQueueElement
*elem
= NULL
;
816 unsigned out_num
, in_num
;
817 hwaddr addr
[VIRTQUEUE_MAX_SIZE
];
818 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
822 if (unlikely(vdev
->broken
)) {
826 if (virtio_queue_empty_rcu(vq
)) {
829 /* Needed after virtio_queue_empty(), see comment in
830 * virtqueue_num_heads(). */
833 /* When we start there are none of either input nor output. */
834 out_num
= in_num
= 0;
838 if (vq
->inuse
>= vq
->vring
.num
) {
839 virtio_error(vdev
, "Virtqueue size exceeded");
843 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
++, &head
)) {
847 if (virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
848 vring_set_avail_event(vq
, vq
->last_avail_idx
);
853 caches
= vring_get_region_caches(vq
);
854 if (caches
->desc
.len
< max
* sizeof(VRingDesc
)) {
855 virtio_error(vdev
, "Cannot map descriptor ring");
859 desc_cache
= &caches
->desc
;
860 vring_desc_read(vdev
, &desc
, desc_cache
, i
);
861 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
862 if (desc
.len
% sizeof(VRingDesc
)) {
863 virtio_error(vdev
, "Invalid size for indirect buffer table");
867 /* loop over the indirect descriptor table */
868 len
= address_space_cache_init(&indirect_desc_cache
, vdev
->dma_as
,
869 desc
.addr
, desc
.len
, false);
870 desc_cache
= &indirect_desc_cache
;
871 if (len
< desc
.len
) {
872 virtio_error(vdev
, "Cannot map indirect buffer");
876 max
= desc
.len
/ sizeof(VRingDesc
);
878 vring_desc_read(vdev
, &desc
, desc_cache
, i
);
881 /* Collect all the descriptors */
885 if (desc
.flags
& VRING_DESC_F_WRITE
) {
886 map_ok
= virtqueue_map_desc(vdev
, &in_num
, addr
+ out_num
,
888 VIRTQUEUE_MAX_SIZE
- out_num
, true,
889 desc
.addr
, desc
.len
);
892 virtio_error(vdev
, "Incorrect order for descriptors");
895 map_ok
= virtqueue_map_desc(vdev
, &out_num
, addr
, iov
,
896 VIRTQUEUE_MAX_SIZE
, false,
897 desc
.addr
, desc
.len
);
903 /* If we've got too many, that implies a descriptor loop. */
904 if ((in_num
+ out_num
) > max
) {
905 virtio_error(vdev
, "Looped descriptor");
909 rc
= virtqueue_read_next_desc(vdev
, &desc
, desc_cache
, max
, &i
);
910 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
912 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
916 /* Now copy what we have collected and mapped */
917 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
919 for (i
= 0; i
< out_num
; i
++) {
920 elem
->out_addr
[i
] = addr
[i
];
921 elem
->out_sg
[i
] = iov
[i
];
923 for (i
= 0; i
< in_num
; i
++) {
924 elem
->in_addr
[i
] = addr
[out_num
+ i
];
925 elem
->in_sg
[i
] = iov
[out_num
+ i
];
930 trace_virtqueue_pop(vq
, elem
, elem
->in_num
, elem
->out_num
);
932 address_space_cache_destroy(&indirect_desc_cache
);
938 virtqueue_undo_map_desc(out_num
, in_num
, iov
);
942 /* virtqueue_drop_all:
943 * @vq: The #VirtQueue
944 * Drops all queued buffers and indicates them to the guest
945 * as if they are done. Useful when buffers can not be
946 * processed but must be returned to the guest.
948 unsigned int virtqueue_drop_all(VirtQueue
*vq
)
950 unsigned int dropped
= 0;
951 VirtQueueElement elem
= {};
952 VirtIODevice
*vdev
= vq
->vdev
;
953 bool fEventIdx
= virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
955 if (unlikely(vdev
->broken
)) {
959 while (!virtio_queue_empty(vq
) && vq
->inuse
< vq
->vring
.num
) {
960 /* works similar to virtqueue_pop but does not map buffers
961 * and does not allocate any memory */
963 if (!virtqueue_get_head(vq
, vq
->last_avail_idx
, &elem
.index
)) {
967 vq
->last_avail_idx
++;
969 vring_set_avail_event(vq
, vq
->last_avail_idx
);
971 /* immediately push the element, nothing to unmap
972 * as both in_num and out_num are set to 0 */
973 virtqueue_push(vq
, &elem
, 0);
980 /* Reading and writing a structure directly to QEMUFile is *awful*, but
981 * it is what QEMU has always done by mistake. We can change it sooner
982 * or later by bumping the version number of the affected vm states.
983 * In the meanwhile, since the in-memory layout of VirtQueueElement
984 * has changed, we need to marshal to and from the layout that was
985 * used before the change.
987 typedef struct VirtQueueElementOld
{
989 unsigned int out_num
;
991 hwaddr in_addr
[VIRTQUEUE_MAX_SIZE
];
992 hwaddr out_addr
[VIRTQUEUE_MAX_SIZE
];
993 struct iovec in_sg
[VIRTQUEUE_MAX_SIZE
];
994 struct iovec out_sg
[VIRTQUEUE_MAX_SIZE
];
995 } VirtQueueElementOld
;
997 void *qemu_get_virtqueue_element(VirtIODevice
*vdev
, QEMUFile
*f
, size_t sz
)
999 VirtQueueElement
*elem
;
1000 VirtQueueElementOld data
;
1003 qemu_get_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1005 /* TODO: teach all callers that this can fail, and return failure instead
1006 * of asserting here.
1007 * When we do, we might be able to re-enable NDEBUG below.
1010 #error building with NDEBUG is not supported
1012 assert(ARRAY_SIZE(data
.in_addr
) >= data
.in_num
);
1013 assert(ARRAY_SIZE(data
.out_addr
) >= data
.out_num
);
1015 elem
= virtqueue_alloc_element(sz
, data
.out_num
, data
.in_num
);
1016 elem
->index
= data
.index
;
1018 for (i
= 0; i
< elem
->in_num
; i
++) {
1019 elem
->in_addr
[i
] = data
.in_addr
[i
];
1022 for (i
= 0; i
< elem
->out_num
; i
++) {
1023 elem
->out_addr
[i
] = data
.out_addr
[i
];
1026 for (i
= 0; i
< elem
->in_num
; i
++) {
1027 /* Base is overwritten by virtqueue_map. */
1028 elem
->in_sg
[i
].iov_base
= 0;
1029 elem
->in_sg
[i
].iov_len
= data
.in_sg
[i
].iov_len
;
1032 for (i
= 0; i
< elem
->out_num
; i
++) {
1033 /* Base is overwritten by virtqueue_map. */
1034 elem
->out_sg
[i
].iov_base
= 0;
1035 elem
->out_sg
[i
].iov_len
= data
.out_sg
[i
].iov_len
;
1038 virtqueue_map(vdev
, elem
);
1042 void qemu_put_virtqueue_element(QEMUFile
*f
, VirtQueueElement
*elem
)
1044 VirtQueueElementOld data
;
1047 memset(&data
, 0, sizeof(data
));
1048 data
.index
= elem
->index
;
1049 data
.in_num
= elem
->in_num
;
1050 data
.out_num
= elem
->out_num
;
1052 for (i
= 0; i
< elem
->in_num
; i
++) {
1053 data
.in_addr
[i
] = elem
->in_addr
[i
];
1056 for (i
= 0; i
< elem
->out_num
; i
++) {
1057 data
.out_addr
[i
] = elem
->out_addr
[i
];
1060 for (i
= 0; i
< elem
->in_num
; i
++) {
1061 /* Base is overwritten by virtqueue_map when loading. Do not
1062 * save it, as it would leak the QEMU address space layout. */
1063 data
.in_sg
[i
].iov_len
= elem
->in_sg
[i
].iov_len
;
1066 for (i
= 0; i
< elem
->out_num
; i
++) {
1067 /* Do not save iov_base as above. */
1068 data
.out_sg
[i
].iov_len
= elem
->out_sg
[i
].iov_len
;
1070 qemu_put_buffer(f
, (uint8_t *)&data
, sizeof(VirtQueueElementOld
));
1074 static void virtio_notify_vector(VirtIODevice
*vdev
, uint16_t vector
)
1076 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1077 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1079 if (unlikely(vdev
->broken
)) {
1084 k
->notify(qbus
->parent
, vector
);
1088 void virtio_update_irq(VirtIODevice
*vdev
)
1090 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
1093 static int virtio_validate_features(VirtIODevice
*vdev
)
1095 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1097 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
) &&
1098 !virtio_vdev_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
1102 if (k
->validate_features
) {
1103 return k
->validate_features(vdev
);
1109 int virtio_set_status(VirtIODevice
*vdev
, uint8_t val
)
1111 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1112 trace_virtio_set_status(vdev
, val
);
1114 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1115 if (!(vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) &&
1116 val
& VIRTIO_CONFIG_S_FEATURES_OK
) {
1117 int ret
= virtio_validate_features(vdev
);
1124 if (k
->set_status
) {
1125 k
->set_status(vdev
, val
);
1131 bool target_words_bigendian(void);
1132 static enum virtio_device_endian
virtio_default_endian(void)
1134 if (target_words_bigendian()) {
1135 return VIRTIO_DEVICE_ENDIAN_BIG
;
1137 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
1141 static enum virtio_device_endian
virtio_current_cpu_endian(void)
1143 CPUClass
*cc
= CPU_GET_CLASS(current_cpu
);
1145 if (cc
->virtio_is_big_endian(current_cpu
)) {
1146 return VIRTIO_DEVICE_ENDIAN_BIG
;
1148 return VIRTIO_DEVICE_ENDIAN_LITTLE
;
1152 static void virtio_virtqueue_reset_region_cache(struct VirtQueue
*vq
)
1154 VRingMemoryRegionCaches
*caches
;
1156 caches
= atomic_read(&vq
->vring
.caches
);
1157 atomic_rcu_set(&vq
->vring
.caches
, NULL
);
1159 call_rcu(caches
, virtio_free_region_cache
, rcu
);
1163 void virtio_reset(void *opaque
)
1165 VirtIODevice
*vdev
= opaque
;
1166 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1169 virtio_set_status(vdev
, 0);
1171 /* Guest initiated reset */
1172 vdev
->device_endian
= virtio_current_cpu_endian();
1175 vdev
->device_endian
= virtio_default_endian();
1182 vdev
->broken
= false;
1183 vdev
->guest_features
= 0;
1184 vdev
->queue_sel
= 0;
1186 atomic_set(&vdev
->isr
, 0);
1187 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
1188 virtio_notify_vector(vdev
, vdev
->config_vector
);
1190 for(i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1191 vdev
->vq
[i
].vring
.desc
= 0;
1192 vdev
->vq
[i
].vring
.avail
= 0;
1193 vdev
->vq
[i
].vring
.used
= 0;
1194 vdev
->vq
[i
].last_avail_idx
= 0;
1195 vdev
->vq
[i
].shadow_avail_idx
= 0;
1196 vdev
->vq
[i
].used_idx
= 0;
1197 virtio_queue_set_vector(vdev
, i
, VIRTIO_NO_VECTOR
);
1198 vdev
->vq
[i
].signalled_used
= 0;
1199 vdev
->vq
[i
].signalled_used_valid
= false;
1200 vdev
->vq
[i
].notification
= true;
1201 vdev
->vq
[i
].vring
.num
= vdev
->vq
[i
].vring
.num_default
;
1202 vdev
->vq
[i
].inuse
= 0;
1203 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
1207 uint32_t virtio_config_readb(VirtIODevice
*vdev
, uint32_t addr
)
1209 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1212 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1213 return (uint32_t)-1;
1216 k
->get_config(vdev
, vdev
->config
);
1218 val
= ldub_p(vdev
->config
+ addr
);
1222 uint32_t virtio_config_readw(VirtIODevice
*vdev
, uint32_t addr
)
1224 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1227 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1228 return (uint32_t)-1;
1231 k
->get_config(vdev
, vdev
->config
);
1233 val
= lduw_p(vdev
->config
+ addr
);
1237 uint32_t virtio_config_readl(VirtIODevice
*vdev
, uint32_t addr
)
1239 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1242 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1243 return (uint32_t)-1;
1246 k
->get_config(vdev
, vdev
->config
);
1248 val
= ldl_p(vdev
->config
+ addr
);
1252 void virtio_config_writeb(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
1254 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1257 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1261 stb_p(vdev
->config
+ addr
, val
);
1263 if (k
->set_config
) {
1264 k
->set_config(vdev
, vdev
->config
);
1268 void virtio_config_writew(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
1270 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1271 uint16_t val
= data
;
1273 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1277 stw_p(vdev
->config
+ addr
, val
);
1279 if (k
->set_config
) {
1280 k
->set_config(vdev
, vdev
->config
);
1284 void virtio_config_writel(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
)
1286 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1287 uint32_t val
= data
;
1289 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1293 stl_p(vdev
->config
+ addr
, val
);
1295 if (k
->set_config
) {
1296 k
->set_config(vdev
, vdev
->config
);
1300 uint32_t virtio_config_modern_readb(VirtIODevice
*vdev
, uint32_t addr
)
1302 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1305 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1306 return (uint32_t)-1;
1309 k
->get_config(vdev
, vdev
->config
);
1311 val
= ldub_p(vdev
->config
+ addr
);
1315 uint32_t virtio_config_modern_readw(VirtIODevice
*vdev
, uint32_t addr
)
1317 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1320 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1321 return (uint32_t)-1;
1324 k
->get_config(vdev
, vdev
->config
);
1326 val
= lduw_le_p(vdev
->config
+ addr
);
1330 uint32_t virtio_config_modern_readl(VirtIODevice
*vdev
, uint32_t addr
)
1332 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1335 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1336 return (uint32_t)-1;
1339 k
->get_config(vdev
, vdev
->config
);
1341 val
= ldl_le_p(vdev
->config
+ addr
);
1345 void virtio_config_modern_writeb(VirtIODevice
*vdev
,
1346 uint32_t addr
, uint32_t data
)
1348 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1351 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1355 stb_p(vdev
->config
+ addr
, val
);
1357 if (k
->set_config
) {
1358 k
->set_config(vdev
, vdev
->config
);
1362 void virtio_config_modern_writew(VirtIODevice
*vdev
,
1363 uint32_t addr
, uint32_t data
)
1365 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1366 uint16_t val
= data
;
1368 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1372 stw_le_p(vdev
->config
+ addr
, val
);
1374 if (k
->set_config
) {
1375 k
->set_config(vdev
, vdev
->config
);
1379 void virtio_config_modern_writel(VirtIODevice
*vdev
,
1380 uint32_t addr
, uint32_t data
)
1382 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1383 uint32_t val
= data
;
1385 if (addr
+ sizeof(val
) > vdev
->config_len
) {
1389 stl_le_p(vdev
->config
+ addr
, val
);
1391 if (k
->set_config
) {
1392 k
->set_config(vdev
, vdev
->config
);
1396 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, hwaddr addr
)
1398 vdev
->vq
[n
].vring
.desc
= addr
;
1399 virtio_queue_update_rings(vdev
, n
);
1402 hwaddr
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
)
1404 return vdev
->vq
[n
].vring
.desc
;
1407 void virtio_queue_set_rings(VirtIODevice
*vdev
, int n
, hwaddr desc
,
1408 hwaddr avail
, hwaddr used
)
1410 vdev
->vq
[n
].vring
.desc
= desc
;
1411 vdev
->vq
[n
].vring
.avail
= avail
;
1412 vdev
->vq
[n
].vring
.used
= used
;
1413 virtio_init_region_cache(vdev
, n
);
1416 void virtio_queue_set_num(VirtIODevice
*vdev
, int n
, int num
)
1418 /* Don't allow guest to flip queue between existent and
1419 * nonexistent states, or to set it to an invalid size.
1421 if (!!num
!= !!vdev
->vq
[n
].vring
.num
||
1422 num
> VIRTQUEUE_MAX_SIZE
||
1426 vdev
->vq
[n
].vring
.num
= num
;
1429 VirtQueue
*virtio_vector_first_queue(VirtIODevice
*vdev
, uint16_t vector
)
1431 return QLIST_FIRST(&vdev
->vector_queues
[vector
]);
1434 VirtQueue
*virtio_vector_next_queue(VirtQueue
*vq
)
1436 return QLIST_NEXT(vq
, node
);
1439 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
)
1441 return vdev
->vq
[n
].vring
.num
;
1444 int virtio_queue_get_max_num(VirtIODevice
*vdev
, int n
)
1446 return vdev
->vq
[n
].vring
.num_default
;
1449 int virtio_get_num_queues(VirtIODevice
*vdev
)
1453 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1454 if (!virtio_queue_get_num(vdev
, i
)) {
1462 void virtio_queue_set_align(VirtIODevice
*vdev
, int n
, int align
)
1464 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1465 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1467 /* virtio-1 compliant devices cannot change the alignment */
1468 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1469 error_report("tried to modify queue alignment for virtio-1 device");
1472 /* Check that the transport told us it was going to do this
1473 * (so a buggy transport will immediately assert rather than
1474 * silently failing to migrate this state)
1476 assert(k
->has_variable_vring_alignment
);
1478 vdev
->vq
[n
].vring
.align
= align
;
1479 virtio_queue_update_rings(vdev
, n
);
1482 static bool virtio_queue_notify_aio_vq(VirtQueue
*vq
)
1484 if (vq
->vring
.desc
&& vq
->handle_aio_output
) {
1485 VirtIODevice
*vdev
= vq
->vdev
;
1487 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
1488 return vq
->handle_aio_output(vdev
, vq
);
1494 static void virtio_queue_notify_vq(VirtQueue
*vq
)
1496 if (vq
->vring
.desc
&& vq
->handle_output
) {
1497 VirtIODevice
*vdev
= vq
->vdev
;
1499 if (unlikely(vdev
->broken
)) {
1503 trace_virtio_queue_notify(vdev
, vq
- vdev
->vq
, vq
);
1504 vq
->handle_output(vdev
, vq
);
1508 void virtio_queue_notify(VirtIODevice
*vdev
, int n
)
1510 virtio_queue_notify_vq(&vdev
->vq
[n
]);
1513 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
)
1515 return n
< VIRTIO_QUEUE_MAX
? vdev
->vq
[n
].vector
:
1519 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
)
1521 VirtQueue
*vq
= &vdev
->vq
[n
];
1523 if (n
< VIRTIO_QUEUE_MAX
) {
1524 if (vdev
->vector_queues
&&
1525 vdev
->vq
[n
].vector
!= VIRTIO_NO_VECTOR
) {
1526 QLIST_REMOVE(vq
, node
);
1528 vdev
->vq
[n
].vector
= vector
;
1529 if (vdev
->vector_queues
&&
1530 vector
!= VIRTIO_NO_VECTOR
) {
1531 QLIST_INSERT_HEAD(&vdev
->vector_queues
[vector
], vq
, node
);
1536 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
1537 VirtIOHandleOutput handle_output
)
1541 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1542 if (vdev
->vq
[i
].vring
.num
== 0)
1546 if (i
== VIRTIO_QUEUE_MAX
|| queue_size
> VIRTQUEUE_MAX_SIZE
)
1549 vdev
->vq
[i
].vring
.num
= queue_size
;
1550 vdev
->vq
[i
].vring
.num_default
= queue_size
;
1551 vdev
->vq
[i
].vring
.align
= VIRTIO_PCI_VRING_ALIGN
;
1552 vdev
->vq
[i
].handle_output
= handle_output
;
1553 vdev
->vq
[i
].handle_aio_output
= NULL
;
1555 return &vdev
->vq
[i
];
1558 void virtio_del_queue(VirtIODevice
*vdev
, int n
)
1560 if (n
< 0 || n
>= VIRTIO_QUEUE_MAX
) {
1564 vdev
->vq
[n
].vring
.num
= 0;
1565 vdev
->vq
[n
].vring
.num_default
= 0;
1568 static void virtio_set_isr(VirtIODevice
*vdev
, int value
)
1570 uint8_t old
= atomic_read(&vdev
->isr
);
1572 /* Do not write ISR if it does not change, so that its cacheline remains
1573 * shared in the common case where the guest does not read it.
1575 if ((old
& value
) != value
) {
1576 atomic_or(&vdev
->isr
, value
);
1580 /* Called within rcu_read_lock(). */
1581 static bool virtio_should_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
1585 /* We need to expose used array entries before checking used event. */
1587 /* Always notify when queue is empty (when feature acknowledge) */
1588 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
1589 !vq
->inuse
&& virtio_queue_empty(vq
)) {
1593 if (!virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
1594 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
1597 v
= vq
->signalled_used_valid
;
1598 vq
->signalled_used_valid
= true;
1599 old
= vq
->signalled_used
;
1600 new = vq
->signalled_used
= vq
->used_idx
;
1601 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
1604 void virtio_notify_irqfd(VirtIODevice
*vdev
, VirtQueue
*vq
)
1608 should_notify
= virtio_should_notify(vdev
, vq
);
1611 if (!should_notify
) {
1615 trace_virtio_notify_irqfd(vdev
, vq
);
1618 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1619 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
1620 * incorrectly polling this bit during crashdump and hibernation
1621 * in MSI mode, causing a hang if this bit is never updated.
1622 * Recent releases of Windows do not really shut down, but rather
1623 * log out and hibernate to make the next startup faster. Hence,
1624 * this manifested as a more serious hang during shutdown with
1626 * Next driver release from 2016 fixed this problem, so working around it
1627 * is not a must, but it's easy to do so let's do it here.
1629 * Note: it's safe to update ISR from any thread as it was switched
1630 * to an atomic operation.
1632 virtio_set_isr(vq
->vdev
, 0x1);
1633 event_notifier_set(&vq
->guest_notifier
);
1636 static void virtio_irq(VirtQueue
*vq
)
1638 virtio_set_isr(vq
->vdev
, 0x1);
1639 virtio_notify_vector(vq
->vdev
, vq
->vector
);
1642 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
1646 should_notify
= virtio_should_notify(vdev
, vq
);
1649 if (!should_notify
) {
1653 trace_virtio_notify(vdev
, vq
);
1657 void virtio_notify_config(VirtIODevice
*vdev
)
1659 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))
1662 virtio_set_isr(vdev
, 0x3);
1664 virtio_notify_vector(vdev
, vdev
->config_vector
);
1667 static bool virtio_device_endian_needed(void *opaque
)
1669 VirtIODevice
*vdev
= opaque
;
1671 assert(vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_UNKNOWN
);
1672 if (!virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1673 return vdev
->device_endian
!= virtio_default_endian();
1675 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1676 return vdev
->device_endian
!= VIRTIO_DEVICE_ENDIAN_LITTLE
;
1679 static bool virtio_64bit_features_needed(void *opaque
)
1681 VirtIODevice
*vdev
= opaque
;
1683 return (vdev
->host_features
>> 32) != 0;
1686 static bool virtio_virtqueue_needed(void *opaque
)
1688 VirtIODevice
*vdev
= opaque
;
1690 return virtio_host_has_feature(vdev
, VIRTIO_F_VERSION_1
);
1693 static bool virtio_ringsize_needed(void *opaque
)
1695 VirtIODevice
*vdev
= opaque
;
1698 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1699 if (vdev
->vq
[i
].vring
.num
!= vdev
->vq
[i
].vring
.num_default
) {
1706 static bool virtio_extra_state_needed(void *opaque
)
1708 VirtIODevice
*vdev
= opaque
;
1709 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1710 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1712 return k
->has_extra_state
&&
1713 k
->has_extra_state(qbus
->parent
);
1716 static bool virtio_broken_needed(void *opaque
)
1718 VirtIODevice
*vdev
= opaque
;
1720 return vdev
->broken
;
1723 static const VMStateDescription vmstate_virtqueue
= {
1724 .name
= "virtqueue_state",
1726 .minimum_version_id
= 1,
1727 .fields
= (VMStateField
[]) {
1728 VMSTATE_UINT64(vring
.avail
, struct VirtQueue
),
1729 VMSTATE_UINT64(vring
.used
, struct VirtQueue
),
1730 VMSTATE_END_OF_LIST()
1734 static const VMStateDescription vmstate_virtio_virtqueues
= {
1735 .name
= "virtio/virtqueues",
1737 .minimum_version_id
= 1,
1738 .needed
= &virtio_virtqueue_needed
,
1739 .fields
= (VMStateField
[]) {
1740 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
1741 VIRTIO_QUEUE_MAX
, 0, vmstate_virtqueue
, VirtQueue
),
1742 VMSTATE_END_OF_LIST()
1746 static const VMStateDescription vmstate_ringsize
= {
1747 .name
= "ringsize_state",
1749 .minimum_version_id
= 1,
1750 .fields
= (VMStateField
[]) {
1751 VMSTATE_UINT32(vring
.num_default
, struct VirtQueue
),
1752 VMSTATE_END_OF_LIST()
1756 static const VMStateDescription vmstate_virtio_ringsize
= {
1757 .name
= "virtio/ringsize",
1759 .minimum_version_id
= 1,
1760 .needed
= &virtio_ringsize_needed
,
1761 .fields
= (VMStateField
[]) {
1762 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq
, struct VirtIODevice
,
1763 VIRTIO_QUEUE_MAX
, 0, vmstate_ringsize
, VirtQueue
),
1764 VMSTATE_END_OF_LIST()
1768 static int get_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
1769 VMStateField
*field
)
1771 VirtIODevice
*vdev
= pv
;
1772 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1773 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1775 if (!k
->load_extra_state
) {
1778 return k
->load_extra_state(qbus
->parent
, f
);
1782 static int put_extra_state(QEMUFile
*f
, void *pv
, size_t size
,
1783 VMStateField
*field
, QJSON
*vmdesc
)
1785 VirtIODevice
*vdev
= pv
;
1786 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1787 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1789 k
->save_extra_state(qbus
->parent
, f
);
1793 static const VMStateInfo vmstate_info_extra_state
= {
1794 .name
= "virtqueue_extra_state",
1795 .get
= get_extra_state
,
1796 .put
= put_extra_state
,
1799 static const VMStateDescription vmstate_virtio_extra_state
= {
1800 .name
= "virtio/extra_state",
1802 .minimum_version_id
= 1,
1803 .needed
= &virtio_extra_state_needed
,
1804 .fields
= (VMStateField
[]) {
1806 .name
= "extra_state",
1808 .field_exists
= NULL
,
1810 .info
= &vmstate_info_extra_state
,
1811 .flags
= VMS_SINGLE
,
1814 VMSTATE_END_OF_LIST()
1818 static const VMStateDescription vmstate_virtio_device_endian
= {
1819 .name
= "virtio/device_endian",
1821 .minimum_version_id
= 1,
1822 .needed
= &virtio_device_endian_needed
,
1823 .fields
= (VMStateField
[]) {
1824 VMSTATE_UINT8(device_endian
, VirtIODevice
),
1825 VMSTATE_END_OF_LIST()
1829 static const VMStateDescription vmstate_virtio_64bit_features
= {
1830 .name
= "virtio/64bit_features",
1832 .minimum_version_id
= 1,
1833 .needed
= &virtio_64bit_features_needed
,
1834 .fields
= (VMStateField
[]) {
1835 VMSTATE_UINT64(guest_features
, VirtIODevice
),
1836 VMSTATE_END_OF_LIST()
1840 static const VMStateDescription vmstate_virtio_broken
= {
1841 .name
= "virtio/broken",
1843 .minimum_version_id
= 1,
1844 .needed
= &virtio_broken_needed
,
1845 .fields
= (VMStateField
[]) {
1846 VMSTATE_BOOL(broken
, VirtIODevice
),
1847 VMSTATE_END_OF_LIST()
1851 static const VMStateDescription vmstate_virtio
= {
1854 .minimum_version_id
= 1,
1855 .minimum_version_id_old
= 1,
1856 .fields
= (VMStateField
[]) {
1857 VMSTATE_END_OF_LIST()
1859 .subsections
= (const VMStateDescription
*[]) {
1860 &vmstate_virtio_device_endian
,
1861 &vmstate_virtio_64bit_features
,
1862 &vmstate_virtio_virtqueues
,
1863 &vmstate_virtio_ringsize
,
1864 &vmstate_virtio_broken
,
1865 &vmstate_virtio_extra_state
,
1870 void virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
)
1872 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1873 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1874 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1875 uint32_t guest_features_lo
= (vdev
->guest_features
& 0xffffffff);
1878 if (k
->save_config
) {
1879 k
->save_config(qbus
->parent
, f
);
1882 qemu_put_8s(f
, &vdev
->status
);
1883 qemu_put_8s(f
, &vdev
->isr
);
1884 qemu_put_be16s(f
, &vdev
->queue_sel
);
1885 qemu_put_be32s(f
, &guest_features_lo
);
1886 qemu_put_be32(f
, vdev
->config_len
);
1887 qemu_put_buffer(f
, vdev
->config
, vdev
->config_len
);
1889 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1890 if (vdev
->vq
[i
].vring
.num
== 0)
1894 qemu_put_be32(f
, i
);
1896 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1897 if (vdev
->vq
[i
].vring
.num
== 0)
1900 qemu_put_be32(f
, vdev
->vq
[i
].vring
.num
);
1901 if (k
->has_variable_vring_alignment
) {
1902 qemu_put_be32(f
, vdev
->vq
[i
].vring
.align
);
1905 * Save desc now, the rest of the ring addresses are saved in
1906 * subsections for VIRTIO-1 devices.
1908 qemu_put_be64(f
, vdev
->vq
[i
].vring
.desc
);
1909 qemu_put_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
1910 if (k
->save_queue
) {
1911 k
->save_queue(qbus
->parent
, i
, f
);
1915 if (vdc
->save
!= NULL
) {
1920 vmstate_save_state(f
, vdc
->vmsd
, vdev
, NULL
);
1924 vmstate_save_state(f
, &vmstate_virtio
, vdev
, NULL
);
1927 /* A wrapper for use as a VMState .put function */
1928 static int virtio_device_put(QEMUFile
*f
, void *opaque
, size_t size
,
1929 VMStateField
*field
, QJSON
*vmdesc
)
1931 virtio_save(VIRTIO_DEVICE(opaque
), f
);
1936 /* A wrapper for use as a VMState .get function */
1937 static int virtio_device_get(QEMUFile
*f
, void *opaque
, size_t size
,
1938 VMStateField
*field
)
1940 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
1941 DeviceClass
*dc
= DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev
));
1943 return virtio_load(vdev
, f
, dc
->vmsd
->version_id
);
1946 const VMStateInfo virtio_vmstate_info
= {
1948 .get
= virtio_device_get
,
1949 .put
= virtio_device_put
,
1952 static int virtio_set_features_nocheck(VirtIODevice
*vdev
, uint64_t val
)
1954 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1955 bool bad
= (val
& ~(vdev
->host_features
)) != 0;
1957 val
&= vdev
->host_features
;
1958 if (k
->set_features
) {
1959 k
->set_features(vdev
, val
);
1961 vdev
->guest_features
= val
;
1962 return bad
? -1 : 0;
1965 int virtio_set_features(VirtIODevice
*vdev
, uint64_t val
)
1968 * The driver must not attempt to set features after feature negotiation
1971 if (vdev
->status
& VIRTIO_CONFIG_S_FEATURES_OK
) {
1974 return virtio_set_features_nocheck(vdev
, val
);
1977 int virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
, int version_id
)
1983 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
1984 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1985 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1988 * We poison the endianness to ensure it does not get used before
1989 * subsections have been loaded.
1991 vdev
->device_endian
= VIRTIO_DEVICE_ENDIAN_UNKNOWN
;
1993 if (k
->load_config
) {
1994 ret
= k
->load_config(qbus
->parent
, f
);
1999 qemu_get_8s(f
, &vdev
->status
);
2000 qemu_get_8s(f
, &vdev
->isr
);
2001 qemu_get_be16s(f
, &vdev
->queue_sel
);
2002 if (vdev
->queue_sel
>= VIRTIO_QUEUE_MAX
) {
2005 qemu_get_be32s(f
, &features
);
2008 * Temporarily set guest_features low bits - needed by
2009 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
2010 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
2012 * Note: devices should always test host features in future - don't create
2013 * new dependencies like this.
2015 vdev
->guest_features
= features
;
2017 config_len
= qemu_get_be32(f
);
2020 * There are cases where the incoming config can be bigger or smaller
2021 * than what we have; so load what we have space for, and skip
2022 * any excess that's in the stream.
2024 qemu_get_buffer(f
, vdev
->config
, MIN(config_len
, vdev
->config_len
));
2026 while (config_len
> vdev
->config_len
) {
2031 num
= qemu_get_be32(f
);
2033 if (num
> VIRTIO_QUEUE_MAX
) {
2034 error_report("Invalid number of virtqueues: 0x%x", num
);
2038 for (i
= 0; i
< num
; i
++) {
2039 vdev
->vq
[i
].vring
.num
= qemu_get_be32(f
);
2040 if (k
->has_variable_vring_alignment
) {
2041 vdev
->vq
[i
].vring
.align
= qemu_get_be32(f
);
2043 vdev
->vq
[i
].vring
.desc
= qemu_get_be64(f
);
2044 qemu_get_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
2045 vdev
->vq
[i
].signalled_used_valid
= false;
2046 vdev
->vq
[i
].notification
= true;
2048 if (!vdev
->vq
[i
].vring
.desc
&& vdev
->vq
[i
].last_avail_idx
) {
2049 error_report("VQ %d address 0x0 "
2050 "inconsistent with Host index 0x%x",
2051 i
, vdev
->vq
[i
].last_avail_idx
);
2054 if (k
->load_queue
) {
2055 ret
= k
->load_queue(qbus
->parent
, i
, f
);
2061 virtio_notify_vector(vdev
, VIRTIO_NO_VECTOR
);
2063 if (vdc
->load
!= NULL
) {
2064 ret
= vdc
->load(vdev
, f
, version_id
);
2071 ret
= vmstate_load_state(f
, vdc
->vmsd
, vdev
, version_id
);
2078 ret
= vmstate_load_state(f
, &vmstate_virtio
, vdev
, 1);
2083 if (vdev
->device_endian
== VIRTIO_DEVICE_ENDIAN_UNKNOWN
) {
2084 vdev
->device_endian
= virtio_default_endian();
2087 if (virtio_64bit_features_needed(vdev
)) {
2089 * Subsection load filled vdev->guest_features. Run them
2090 * through virtio_set_features to sanity-check them against
2093 uint64_t features64
= vdev
->guest_features
;
2094 if (virtio_set_features_nocheck(vdev
, features64
) < 0) {
2095 error_report("Features 0x%" PRIx64
" unsupported. "
2096 "Allowed features: 0x%" PRIx64
,
2097 features64
, vdev
->host_features
);
2101 if (virtio_set_features_nocheck(vdev
, features
) < 0) {
2102 error_report("Features 0x%x unsupported. "
2103 "Allowed features: 0x%" PRIx64
,
2104 features
, vdev
->host_features
);
2110 for (i
= 0; i
< num
; i
++) {
2111 if (vdev
->vq
[i
].vring
.desc
) {
2115 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
2116 * only the region cache needs to be set up. Legacy devices need
2117 * to calculate used and avail ring addresses based on the desc
2120 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2121 virtio_init_region_cache(vdev
, i
);
2123 virtio_queue_update_rings(vdev
, i
);
2126 nheads
= vring_avail_idx(&vdev
->vq
[i
]) - vdev
->vq
[i
].last_avail_idx
;
2127 /* Check it isn't doing strange things with descriptor numbers. */
2128 if (nheads
> vdev
->vq
[i
].vring
.num
) {
2129 error_report("VQ %d size 0x%x Guest index 0x%x "
2130 "inconsistent with Host index 0x%x: delta 0x%x",
2131 i
, vdev
->vq
[i
].vring
.num
,
2132 vring_avail_idx(&vdev
->vq
[i
]),
2133 vdev
->vq
[i
].last_avail_idx
, nheads
);
2136 vdev
->vq
[i
].used_idx
= vring_used_idx(&vdev
->vq
[i
]);
2137 vdev
->vq
[i
].shadow_avail_idx
= vring_avail_idx(&vdev
->vq
[i
]);
2140 * Some devices migrate VirtQueueElements that have been popped
2141 * from the avail ring but not yet returned to the used ring.
2142 * Since max ring size < UINT16_MAX it's safe to use modulo
2143 * UINT16_MAX + 1 subtraction.
2145 vdev
->vq
[i
].inuse
= (uint16_t)(vdev
->vq
[i
].last_avail_idx
-
2146 vdev
->vq
[i
].used_idx
);
2147 if (vdev
->vq
[i
].inuse
> vdev
->vq
[i
].vring
.num
) {
2148 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
2150 i
, vdev
->vq
[i
].vring
.num
,
2151 vdev
->vq
[i
].last_avail_idx
,
2152 vdev
->vq
[i
].used_idx
);
2162 void virtio_cleanup(VirtIODevice
*vdev
)
2164 qemu_del_vm_change_state_handler(vdev
->vmstate
);
2167 static void virtio_vmstate_change(void *opaque
, int running
, RunState state
)
2169 VirtIODevice
*vdev
= opaque
;
2170 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2171 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2172 bool backend_run
= running
&& (vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
);
2173 vdev
->vm_running
= running
;
2176 virtio_set_status(vdev
, vdev
->status
);
2179 if (k
->vmstate_change
) {
2180 k
->vmstate_change(qbus
->parent
, backend_run
);
2184 virtio_set_status(vdev
, vdev
->status
);
2188 void virtio_instance_init_common(Object
*proxy_obj
, void *data
,
2189 size_t vdev_size
, const char *vdev_name
)
2191 DeviceState
*vdev
= data
;
2193 object_initialize(vdev
, vdev_size
, vdev_name
);
2194 object_property_add_child(proxy_obj
, "virtio-backend", OBJECT(vdev
), NULL
);
2195 object_unref(OBJECT(vdev
));
2196 qdev_alias_all_properties(vdev
, proxy_obj
);
2199 void virtio_init(VirtIODevice
*vdev
, const char *name
,
2200 uint16_t device_id
, size_t config_size
)
2202 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2203 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
2205 int nvectors
= k
->query_nvectors
? k
->query_nvectors(qbus
->parent
) : 0;
2208 vdev
->vector_queues
=
2209 g_malloc0(sizeof(*vdev
->vector_queues
) * nvectors
);
2212 vdev
->device_id
= device_id
;
2214 atomic_set(&vdev
->isr
, 0);
2215 vdev
->queue_sel
= 0;
2216 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
2217 vdev
->vq
= g_malloc0(sizeof(VirtQueue
) * VIRTIO_QUEUE_MAX
);
2218 vdev
->vm_running
= runstate_is_running();
2219 vdev
->broken
= false;
2220 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2221 vdev
->vq
[i
].vector
= VIRTIO_NO_VECTOR
;
2222 vdev
->vq
[i
].vdev
= vdev
;
2223 vdev
->vq
[i
].queue_index
= i
;
2227 vdev
->config_len
= config_size
;
2228 if (vdev
->config_len
) {
2229 vdev
->config
= g_malloc0(config_size
);
2231 vdev
->config
= NULL
;
2233 vdev
->vmstate
= qemu_add_vm_change_state_handler(virtio_vmstate_change
,
2235 vdev
->device_endian
= virtio_default_endian();
2236 vdev
->use_guest_notifier_mask
= true;
2239 hwaddr
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
)
2241 return vdev
->vq
[n
].vring
.desc
;
2244 hwaddr
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
)
2246 return vdev
->vq
[n
].vring
.avail
;
2249 hwaddr
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
)
2251 return vdev
->vq
[n
].vring
.used
;
2254 hwaddr
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
)
2256 return sizeof(VRingDesc
) * vdev
->vq
[n
].vring
.num
;
2259 hwaddr
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
)
2261 return offsetof(VRingAvail
, ring
) +
2262 sizeof(uint16_t) * vdev
->vq
[n
].vring
.num
;
2265 hwaddr
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
)
2267 return offsetof(VRingUsed
, ring
) +
2268 sizeof(VRingUsedElem
) * vdev
->vq
[n
].vring
.num
;
2271 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
)
2273 return vdev
->vq
[n
].last_avail_idx
;
2276 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
, uint16_t idx
)
2278 vdev
->vq
[n
].last_avail_idx
= idx
;
2279 vdev
->vq
[n
].shadow_avail_idx
= idx
;
2282 void virtio_queue_update_used_idx(VirtIODevice
*vdev
, int n
)
2285 if (vdev
->vq
[n
].vring
.desc
) {
2286 vdev
->vq
[n
].used_idx
= vring_used_idx(&vdev
->vq
[n
]);
2291 void virtio_queue_invalidate_signalled_used(VirtIODevice
*vdev
, int n
)
2293 vdev
->vq
[n
].signalled_used_valid
= false;
2296 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
)
2298 return vdev
->vq
+ n
;
2301 uint16_t virtio_get_queue_index(VirtQueue
*vq
)
2303 return vq
->queue_index
;
2306 static void virtio_queue_guest_notifier_read(EventNotifier
*n
)
2308 VirtQueue
*vq
= container_of(n
, VirtQueue
, guest_notifier
);
2309 if (event_notifier_test_and_clear(n
)) {
2314 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
2317 if (assign
&& !with_irqfd
) {
2318 event_notifier_set_handler(&vq
->guest_notifier
,
2319 virtio_queue_guest_notifier_read
);
2321 event_notifier_set_handler(&vq
->guest_notifier
, NULL
);
2324 /* Test and clear notifier before closing it,
2325 * in case poll callback didn't have time to run. */
2326 virtio_queue_guest_notifier_read(&vq
->guest_notifier
);
2330 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
)
2332 return &vq
->guest_notifier
;
2335 static void virtio_queue_host_notifier_aio_read(EventNotifier
*n
)
2337 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
2338 if (event_notifier_test_and_clear(n
)) {
2339 virtio_queue_notify_aio_vq(vq
);
2343 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier
*n
)
2345 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
2347 virtio_queue_set_notification(vq
, 0);
2350 static bool virtio_queue_host_notifier_aio_poll(void *opaque
)
2352 EventNotifier
*n
= opaque
;
2353 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
2356 if (!vq
->vring
.desc
|| virtio_queue_empty(vq
)) {
2360 progress
= virtio_queue_notify_aio_vq(vq
);
2362 /* In case the handler function re-enabled notifications */
2363 virtio_queue_set_notification(vq
, 0);
2367 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier
*n
)
2369 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
2371 /* Caller polls once more after this to catch requests that race with us */
2372 virtio_queue_set_notification(vq
, 1);
2375 void virtio_queue_aio_set_host_notifier_handler(VirtQueue
*vq
, AioContext
*ctx
,
2376 VirtIOHandleAIOOutput handle_output
)
2378 if (handle_output
) {
2379 vq
->handle_aio_output
= handle_output
;
2380 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true,
2381 virtio_queue_host_notifier_aio_read
,
2382 virtio_queue_host_notifier_aio_poll
);
2383 aio_set_event_notifier_poll(ctx
, &vq
->host_notifier
,
2384 virtio_queue_host_notifier_aio_poll_begin
,
2385 virtio_queue_host_notifier_aio_poll_end
);
2387 aio_set_event_notifier(ctx
, &vq
->host_notifier
, true, NULL
, NULL
);
2388 /* Test and clear notifier before after disabling event,
2389 * in case poll callback didn't have time to run. */
2390 virtio_queue_host_notifier_aio_read(&vq
->host_notifier
);
2391 vq
->handle_aio_output
= NULL
;
2395 void virtio_queue_host_notifier_read(EventNotifier
*n
)
2397 VirtQueue
*vq
= container_of(n
, VirtQueue
, host_notifier
);
2398 if (event_notifier_test_and_clear(n
)) {
2399 virtio_queue_notify_vq(vq
);
2403 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
)
2405 return &vq
->host_notifier
;
2408 void virtio_device_set_child_bus_name(VirtIODevice
*vdev
, char *bus_name
)
2410 g_free(vdev
->bus_name
);
2411 vdev
->bus_name
= g_strdup(bus_name
);
2414 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice
*vdev
, const char *fmt
, ...)
2419 error_vreport(fmt
, ap
);
2422 vdev
->broken
= true;
2424 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2425 virtio_set_status(vdev
, vdev
->status
| VIRTIO_CONFIG_S_NEEDS_RESET
);
2426 virtio_notify_config(vdev
);
2430 static void virtio_memory_listener_commit(MemoryListener
*listener
)
2432 VirtIODevice
*vdev
= container_of(listener
, VirtIODevice
, listener
);
2435 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2436 if (vdev
->vq
[i
].vring
.num
== 0) {
2439 virtio_init_region_cache(vdev
, i
);
2443 static void virtio_device_realize(DeviceState
*dev
, Error
**errp
)
2445 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
2446 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
2449 /* Devices should either use vmsd or the load/save methods */
2450 assert(!vdc
->vmsd
|| !vdc
->load
);
2452 if (vdc
->realize
!= NULL
) {
2453 vdc
->realize(dev
, &err
);
2455 error_propagate(errp
, err
);
2460 virtio_bus_device_plugged(vdev
, &err
);
2462 error_propagate(errp
, err
);
2466 vdev
->listener
.commit
= virtio_memory_listener_commit
;
2467 memory_listener_register(&vdev
->listener
, vdev
->dma_as
);
2470 static void virtio_device_unrealize(DeviceState
*dev
, Error
**errp
)
2472 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
2473 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(dev
);
2476 virtio_bus_device_unplugged(vdev
);
2478 if (vdc
->unrealize
!= NULL
) {
2479 vdc
->unrealize(dev
, &err
);
2481 error_propagate(errp
, err
);
2486 g_free(vdev
->bus_name
);
2487 vdev
->bus_name
= NULL
;
2490 static void virtio_device_free_virtqueues(VirtIODevice
*vdev
)
2497 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2498 if (vdev
->vq
[i
].vring
.num
== 0) {
2501 virtio_virtqueue_reset_region_cache(&vdev
->vq
[i
]);
2506 static void virtio_device_instance_finalize(Object
*obj
)
2508 VirtIODevice
*vdev
= VIRTIO_DEVICE(obj
);
2510 memory_listener_unregister(&vdev
->listener
);
2511 virtio_device_free_virtqueues(vdev
);
2513 g_free(vdev
->config
);
2514 g_free(vdev
->vector_queues
);
2517 static Property virtio_properties
[] = {
2518 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice
, host_features
),
2519 DEFINE_PROP_END_OF_LIST(),
2522 static int virtio_device_start_ioeventfd_impl(VirtIODevice
*vdev
)
2524 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
2527 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
2528 VirtQueue
*vq
= &vdev
->vq
[n
];
2529 if (!virtio_queue_get_num(vdev
, n
)) {
2532 r
= virtio_bus_set_host_notifier(qbus
, n
, true);
2537 event_notifier_set_handler(&vq
->host_notifier
,
2538 virtio_queue_host_notifier_read
);
2541 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
2542 /* Kick right away to begin processing requests already in vring */
2543 VirtQueue
*vq
= &vdev
->vq
[n
];
2544 if (!vq
->vring
.num
) {
2547 event_notifier_set(&vq
->host_notifier
);
2553 VirtQueue
*vq
= &vdev
->vq
[n
];
2554 if (!virtio_queue_get_num(vdev
, n
)) {
2558 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
2559 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
2565 int virtio_device_start_ioeventfd(VirtIODevice
*vdev
)
2567 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2568 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
2570 return virtio_bus_start_ioeventfd(vbus
);
2573 static void virtio_device_stop_ioeventfd_impl(VirtIODevice
*vdev
)
2575 VirtioBusState
*qbus
= VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev
)));
2578 for (n
= 0; n
< VIRTIO_QUEUE_MAX
; n
++) {
2579 VirtQueue
*vq
= &vdev
->vq
[n
];
2581 if (!virtio_queue_get_num(vdev
, n
)) {
2584 event_notifier_set_handler(&vq
->host_notifier
, NULL
);
2585 r
= virtio_bus_set_host_notifier(qbus
, n
, false);
2590 void virtio_device_stop_ioeventfd(VirtIODevice
*vdev
)
2592 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2593 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
2595 virtio_bus_stop_ioeventfd(vbus
);
2598 int virtio_device_grab_ioeventfd(VirtIODevice
*vdev
)
2600 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2601 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
2603 return virtio_bus_grab_ioeventfd(vbus
);
2606 void virtio_device_release_ioeventfd(VirtIODevice
*vdev
)
2608 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2609 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
2611 virtio_bus_release_ioeventfd(vbus
);
2614 static void virtio_device_class_init(ObjectClass
*klass
, void *data
)
2616 /* Set the default value here. */
2617 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
2618 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2620 dc
->realize
= virtio_device_realize
;
2621 dc
->unrealize
= virtio_device_unrealize
;
2622 dc
->bus_type
= TYPE_VIRTIO_BUS
;
2623 dc
->props
= virtio_properties
;
2624 vdc
->start_ioeventfd
= virtio_device_start_ioeventfd_impl
;
2625 vdc
->stop_ioeventfd
= virtio_device_stop_ioeventfd_impl
;
2627 vdc
->legacy_features
|= VIRTIO_LEGACY_FEATURES
;
2630 bool virtio_device_ioeventfd_enabled(VirtIODevice
*vdev
)
2632 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vdev
));
2633 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
2635 return virtio_bus_ioeventfd_enabled(vbus
);
2638 static const TypeInfo virtio_device_info
= {
2639 .name
= TYPE_VIRTIO_DEVICE
,
2640 .parent
= TYPE_DEVICE
,
2641 .instance_size
= sizeof(VirtIODevice
),
2642 .class_init
= virtio_device_class_init
,
2643 .instance_finalize
= virtio_device_instance_finalize
,
2645 .class_size
= sizeof(VirtioDeviceClass
),
2648 static void virtio_register_types(void)
2650 type_register_static(&virtio_device_info
);
2653 type_init(virtio_register_types
)