]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/virtio.c
Merge tag 'pull-maintainer-may24-160524-2' of https://gitlab.com/stsquad/qemu into...
[mirror_qemu.git] / hw / virtio / virtio.c
CommitLineData
967f97fa
AL
1/*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
9b8bfe21 14#include "qemu/osdep.h"
da34e65c 15#include "qapi/error.h"
a5ebce38 16#include "qapi/qapi-commands-virtio.h"
64979a4d 17#include "trace.h"
84d61e5f 18#include "qemu/defer-call.h"
1de7afc9 19#include "qemu/error-report.h"
d68cdae3 20#include "qemu/log.h"
db725815 21#include "qemu/main-loop.h"
0b8fa32f 22#include "qemu/module.h"
42508261 23#include "exec/tswap.h"
a5ebce38 24#include "qom/object_interfaces.h"
302f1fe1 25#include "hw/core/cpu.h"
0d09e41a 26#include "hw/virtio/virtio.h"
f139b837 27#include "hw/virtio/vhost.h"
ca77ee28 28#include "migration/qemu-file-types.h"
1de7afc9 29#include "qemu/atomic.h"
0d09e41a 30#include "hw/virtio/virtio-bus.h"
a27bd6c7 31#include "hw/qdev-properties.h"
cee3ca00 32#include "hw/virtio/virtio-access.h"
8607f5c3 33#include "sysemu/dma.h"
54d31236 34#include "sysemu/runstate.h"
28b629ab
PMD
35#include "virtio-qmp.h"
36
7c78bdd7 37#include "standard-headers/linux/virtio_ids.h"
f3034ad7
LV
38#include "standard-headers/linux/vhost_types.h"
39#include "standard-headers/linux/virtio_blk.h"
40#include "standard-headers/linux/virtio_console.h"
41#include "standard-headers/linux/virtio_gpu.h"
42#include "standard-headers/linux/virtio_net.h"
43#include "standard-headers/linux/virtio_scsi.h"
44#include "standard-headers/linux/virtio_i2c.h"
45#include "standard-headers/linux/virtio_balloon.h"
46#include "standard-headers/linux/virtio_iommu.h"
47#include "standard-headers/linux/virtio_mem.h"
48#include "standard-headers/linux/virtio_vsock.h"
967f97fa 49
f3034ad7
LV
50/*
51 * Maximum size of virtio device config space
52 */
53#define VHOST_USER_MAX_CONFIG_SIZE 256
54
6ce69d1c
PM
55/*
56 * The alignment to use between consumer and producer parts of vring.
57 * x86 pagesize again. This is the default, used by transports like PCI
58 * which don't provide a means for the guest to tell the host the alignment.
59 */
f46f15bc
AL
60#define VIRTIO_PCI_VRING_ALIGN 4096
61
967f97fa
AL
62typedef struct VRingDesc
63{
64 uint64_t addr;
65 uint32_t len;
66 uint16_t flags;
67 uint16_t next;
68} VRingDesc;
69
a40dcec9
WX
70typedef struct VRingPackedDesc {
71 uint64_t addr;
72 uint32_t len;
73 uint16_t id;
74 uint16_t flags;
75} VRingPackedDesc;
76
967f97fa
AL
77typedef struct VRingAvail
78{
79 uint16_t flags;
80 uint16_t idx;
f7795e40 81 uint16_t ring[];
967f97fa
AL
82} VRingAvail;
83
84typedef struct VRingUsedElem
85{
86 uint32_t id;
87 uint32_t len;
88} VRingUsedElem;
89
90typedef struct VRingUsed
91{
92 uint16_t flags;
93 uint16_t idx;
f7795e40 94 VRingUsedElem ring[];
967f97fa
AL
95} VRingUsed;
96
c611c764
PB
97typedef struct VRingMemoryRegionCaches {
98 struct rcu_head rcu;
99 MemoryRegionCache desc;
100 MemoryRegionCache avail;
101 MemoryRegionCache used;
102} VRingMemoryRegionCaches;
103
967f97fa
AL
104typedef struct VRing
105{
106 unsigned int num;
46c5d082 107 unsigned int num_default;
6ce69d1c 108 unsigned int align;
a8170e5e
AK
109 hwaddr desc;
110 hwaddr avail;
111 hwaddr used;
c611c764 112 VRingMemoryRegionCaches *caches;
967f97fa
AL
113} VRing;
114
a40dcec9
WX
115typedef struct VRingPackedDescEvent {
116 uint16_t off_wrap;
117 uint16_t flags;
118} VRingPackedDescEvent ;
119
967f97fa
AL
120struct VirtQueue
121{
122 VRing vring;
86044b24 123 VirtQueueElement *used_elems;
be1fea9b
VM
124
125 /* Next head to pop */
967f97fa 126 uint16_t last_avail_idx;
a40dcec9 127 bool last_avail_wrap_counter;
b796fcd1 128
be1fea9b
VM
129 /* Last avail_idx read from VQ. */
130 uint16_t shadow_avail_idx;
a40dcec9 131 bool shadow_avail_wrap_counter;
be1fea9b 132
b796fcd1 133 uint16_t used_idx;
a40dcec9 134 bool used_wrap_counter;
b796fcd1 135
bcbabae8
MT
136 /* Last used index value we have signalled on */
137 uint16_t signalled_used;
138
139 /* Last used index value we have signalled on */
140 bool signalled_used_valid;
141
332fa82d
SH
142 /* Notification enabled? */
143 bool notification;
bcbabae8 144
e78a2b42
JW
145 uint16_t queue_index;
146
e66bcc40 147 unsigned int inuse;
bcbabae8 148
7055e687 149 uint16_t vector;
bf1780b0 150 VirtIOHandleOutput handle_output;
1cbdabe2
MT
151 VirtIODevice *vdev;
152 EventNotifier guest_notifier;
153 EventNotifier host_notifier;
fcccb271 154 bool host_notifier_enabled;
e0d686bf 155 QLIST_ENTRY(VirtQueue) node;
967f97fa
AL
156};
157
3857cd5c
JP
158const char *virtio_device_names[] = {
159 [VIRTIO_ID_NET] = "virtio-net",
160 [VIRTIO_ID_BLOCK] = "virtio-blk",
161 [VIRTIO_ID_CONSOLE] = "virtio-serial",
162 [VIRTIO_ID_RNG] = "virtio-rng",
163 [VIRTIO_ID_BALLOON] = "virtio-balloon",
164 [VIRTIO_ID_IOMEM] = "virtio-iomem",
165 [VIRTIO_ID_RPMSG] = "virtio-rpmsg",
166 [VIRTIO_ID_SCSI] = "virtio-scsi",
167 [VIRTIO_ID_9P] = "virtio-9p",
168 [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan",
169 [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial",
170 [VIRTIO_ID_CAIF] = "virtio-caif",
171 [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon",
172 [VIRTIO_ID_GPU] = "virtio-gpu",
173 [VIRTIO_ID_CLOCK] = "virtio-clk",
174 [VIRTIO_ID_INPUT] = "virtio-input",
175 [VIRTIO_ID_VSOCK] = "vhost-vsock",
176 [VIRTIO_ID_CRYPTO] = "virtio-crypto",
177 [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal",
178 [VIRTIO_ID_PSTORE] = "virtio-pstore",
179 [VIRTIO_ID_IOMMU] = "virtio-iommu",
180 [VIRTIO_ID_MEM] = "virtio-mem",
181 [VIRTIO_ID_SOUND] = "virtio-sound",
182 [VIRTIO_ID_FS] = "virtio-user-fs",
183 [VIRTIO_ID_PMEM] = "virtio-pmem",
184 [VIRTIO_ID_RPMB] = "virtio-rpmb",
185 [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim",
186 [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder",
187 [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder",
188 [VIRTIO_ID_SCMI] = "virtio-scmi",
189 [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod",
190 [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c",
191 [VIRTIO_ID_WATCHDOG] = "virtio-watchdog",
192 [VIRTIO_ID_CAN] = "virtio-can",
193 [VIRTIO_ID_DMABUF] = "virtio-dmabuf",
194 [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
195 [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
196 [VIRTIO_ID_BT] = "virtio-bluetooth",
197 [VIRTIO_ID_GPIO] = "virtio-gpio"
198};
199
200static const char *virtio_id_to_name(uint16_t device_id)
201{
202 assert(device_id < G_N_ELEMENTS(virtio_device_names));
203 const char *name = virtio_device_names[device_id];
204 assert(name != NULL);
205 return name;
206}
207
b116d6c3 208/* Called within call_rcu(). */
c611c764
PB
209static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
210{
b116d6c3 211 assert(caches != NULL);
c611c764
PB
212 address_space_cache_destroy(&caches->desc);
213 address_space_cache_destroy(&caches->avail);
214 address_space_cache_destroy(&caches->used);
215 g_free(caches);
216}
217
45641dba
PB
218static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
219{
220 VRingMemoryRegionCaches *caches;
221
d73415a3
SH
222 caches = qatomic_read(&vq->vring.caches);
223 qatomic_rcu_set(&vq->vring.caches, NULL);
45641dba
PB
224 if (caches) {
225 call_rcu(caches, virtio_free_region_cache, rcu);
226 }
227}
228
f0d634ea 229void virtio_init_region_cache(VirtIODevice *vdev, int n)
c611c764
PB
230{
231 VirtQueue *vq = &vdev->vq[n];
232 VRingMemoryRegionCaches *old = vq->vring.caches;
45641dba 233 VRingMemoryRegionCaches *new = NULL;
c611c764 234 hwaddr addr, size;
e45da653 235 int64_t len;
86044b24 236 bool packed;
c611c764 237
c611c764
PB
238
239 addr = vq->vring.desc;
240 if (!addr) {
45641dba 241 goto out_no_cache;
c611c764
PB
242 }
243 new = g_new0(VRingMemoryRegionCaches, 1);
244 size = virtio_queue_get_desc_size(vdev, n);
86044b24
JW
245 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
246 true : false;
e45da653 247 len = address_space_cache_init(&new->desc, vdev->dma_as,
86044b24 248 addr, size, packed);
e45da653
JW
249 if (len < size) {
250 virtio_error(vdev, "Cannot map desc");
251 goto err_desc;
252 }
c611c764 253
f90cda63 254 size = virtio_queue_get_used_size(vdev, n);
e45da653
JW
255 len = address_space_cache_init(&new->used, vdev->dma_as,
256 vq->vring.used, size, true);
257 if (len < size) {
258 virtio_error(vdev, "Cannot map used");
259 goto err_used;
260 }
c611c764 261
f90cda63 262 size = virtio_queue_get_avail_size(vdev, n);
e45da653
JW
263 len = address_space_cache_init(&new->avail, vdev->dma_as,
264 vq->vring.avail, size, false);
265 if (len < size) {
266 virtio_error(vdev, "Cannot map avail");
267 goto err_avail;
268 }
c611c764 269
d73415a3 270 qatomic_rcu_set(&vq->vring.caches, new);
c611c764
PB
271 if (old) {
272 call_rcu(old, virtio_free_region_cache, rcu);
273 }
e45da653
JW
274 return;
275
276err_avail:
45641dba 277 address_space_cache_destroy(&new->avail);
e45da653 278err_used:
45641dba 279 address_space_cache_destroy(&new->used);
e45da653 280err_desc:
45641dba
PB
281 address_space_cache_destroy(&new->desc);
282out_no_cache:
e45da653 283 g_free(new);
45641dba 284 virtio_virtqueue_reset_region_cache(vq);
c611c764
PB
285}
286
967f97fa 287/* virt queue functions */
ab223c95 288void virtio_queue_update_rings(VirtIODevice *vdev, int n)
967f97fa 289{
ab223c95 290 VRing *vring = &vdev->vq[n].vring;
53c25cea 291
758ead31 292 if (!vring->num || !vring->desc || !vring->align) {
ab223c95
CH
293 /* not yet setup -> nothing to do */
294 return;
295 }
296 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
297 vring->used = vring_align(vring->avail +
298 offsetof(VRingAvail, ring[vring->num]),
299 vring->align);
c611c764 300 virtio_init_region_cache(vdev, n);
967f97fa
AL
301}
302
97cd965c 303/* Called within rcu_read_lock(). */
86044b24
JW
304static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
305 MemoryRegionCache *cache, int i)
967f97fa 306{
5eba0404
PB
307 address_space_read_cached(cache, i * sizeof(VRingDesc),
308 desc, sizeof(VRingDesc));
aa570d6f
PB
309 virtio_tswap64s(vdev, &desc->addr);
310 virtio_tswap32s(vdev, &desc->len);
311 virtio_tswap16s(vdev, &desc->flags);
312 virtio_tswap16s(vdev, &desc->next);
967f97fa
AL
313}
314
683f7665
JW
315static void vring_packed_event_read(VirtIODevice *vdev,
316 MemoryRegionCache *cache,
317 VRingPackedDescEvent *e)
318{
319 hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
320 hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
321
d152cdd6 322 e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
683f7665
JW
323 /* Make sure flags is seen before off_wrap */
324 smp_rmb();
d152cdd6 325 e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
683f7665
JW
326 virtio_tswap16s(vdev, &e->flags);
327}
328
329static void vring_packed_off_wrap_write(VirtIODevice *vdev,
330 MemoryRegionCache *cache,
331 uint16_t off_wrap)
332{
333 hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
334
d152cdd6 335 virtio_stw_phys_cached(vdev, cache, off, off_wrap);
683f7665
JW
336 address_space_cache_invalidate(cache, off, sizeof(off_wrap));
337}
338
339static void vring_packed_flags_write(VirtIODevice *vdev,
340 MemoryRegionCache *cache, uint16_t flags)
341{
342 hwaddr off = offsetof(VRingPackedDescEvent, flags);
343
d152cdd6 344 virtio_stw_phys_cached(vdev, cache, off, flags);
683f7665
JW
345 address_space_cache_invalidate(cache, off, sizeof(flags));
346}
347
86044b24 348/* Called within rcu_read_lock(). */
e0e2d644
JW
349static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
350{
d73415a3 351 return qatomic_rcu_read(&vq->vring.caches);
e0e2d644 352}
abdd16f4 353
97cd965c 354/* Called within rcu_read_lock(). */
967f97fa
AL
355static inline uint16_t vring_avail_flags(VirtQueue *vq)
356{
e0e2d644 357 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
97cd965c 358 hwaddr pa = offsetof(VRingAvail, flags);
abdd16f4
SH
359
360 if (!caches) {
361 return 0;
362 }
363
97cd965c 364 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
967f97fa
AL
365}
366
97cd965c 367/* Called within rcu_read_lock(). */
967f97fa
AL
368static inline uint16_t vring_avail_idx(VirtQueue *vq)
369{
e0e2d644 370 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
97cd965c 371 hwaddr pa = offsetof(VRingAvail, idx);
abdd16f4
SH
372
373 if (!caches) {
374 return 0;
375 }
376
97cd965c 377 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
be1fea9b 378 return vq->shadow_avail_idx;
967f97fa
AL
379}
380
97cd965c 381/* Called within rcu_read_lock(). */
967f97fa
AL
382static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
383{
e0e2d644 384 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
97cd965c 385 hwaddr pa = offsetof(VRingAvail, ring[i]);
abdd16f4
SH
386
387 if (!caches) {
388 return 0;
389 }
390
97cd965c 391 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
967f97fa
AL
392}
393
97cd965c 394/* Called within rcu_read_lock(). */
e9600c6c 395static inline uint16_t vring_get_used_event(VirtQueue *vq)
bcbabae8
MT
396{
397 return vring_avail_ring(vq, vq->vring.num);
398}
399
97cd965c 400/* Called within rcu_read_lock(). */
1cdd2ee5
VM
401static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
402 int i)
967f97fa 403{
e0e2d644 404 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
97cd965c 405 hwaddr pa = offsetof(VRingUsed, ring[i]);
abdd16f4
SH
406
407 if (!caches) {
408 return;
409 }
410
1cdd2ee5
VM
411 virtio_tswap32s(vq->vdev, &uelem->id);
412 virtio_tswap32s(vq->vdev, &uelem->len);
97cd965c
PB
413 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
414 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
967f97fa
AL
415}
416
1ee7bb5b
LV
417/* Called within rcu_read_lock(). */
418static inline uint16_t vring_used_flags(VirtQueue *vq)
419{
420 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
421 hwaddr pa = offsetof(VRingUsed, flags);
422
423 if (!caches) {
424 return 0;
425 }
426
427 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
428}
429
97cd965c 430/* Called within rcu_read_lock(). */
967f97fa
AL
431static uint16_t vring_used_idx(VirtQueue *vq)
432{
e0e2d644 433 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
97cd965c 434 hwaddr pa = offsetof(VRingUsed, idx);
abdd16f4
SH
435
436 if (!caches) {
437 return 0;
438 }
439
97cd965c 440 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
967f97fa
AL
441}
442
97cd965c 443/* Called within rcu_read_lock(). */
bcbabae8 444static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
967f97fa 445{
e0e2d644 446 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
97cd965c 447 hwaddr pa = offsetof(VRingUsed, idx);
abdd16f4
SH
448
449 if (caches) {
450 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
451 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
452 }
453
b796fcd1 454 vq->used_idx = val;
967f97fa
AL
455}
456
97cd965c 457/* Called within rcu_read_lock(). */
967f97fa
AL
458static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
459{
e0e2d644 460 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
cee3ca00 461 VirtIODevice *vdev = vq->vdev;
97cd965c 462 hwaddr pa = offsetof(VRingUsed, flags);
abdd16f4 463 uint16_t flags;
97cd965c 464
abdd16f4
SH
465 if (!caches) {
466 return;
467 }
468
469 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
97cd965c
PB
470 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
471 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
967f97fa
AL
472}
473
97cd965c 474/* Called within rcu_read_lock(). */
967f97fa
AL
475static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
476{
e0e2d644 477 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
cee3ca00 478 VirtIODevice *vdev = vq->vdev;
97cd965c 479 hwaddr pa = offsetof(VRingUsed, flags);
abdd16f4 480 uint16_t flags;
97cd965c 481
abdd16f4
SH
482 if (!caches) {
483 return;
484 }
485
486 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
97cd965c
PB
487 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
488 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
967f97fa
AL
489}
490
97cd965c 491/* Called within rcu_read_lock(). */
e9600c6c 492static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
bcbabae8 493{
97cd965c 494 VRingMemoryRegionCaches *caches;
a8170e5e 495 hwaddr pa;
332fa82d 496 if (!vq->notification) {
bcbabae8
MT
497 return;
498 }
97cd965c 499
e0e2d644 500 caches = vring_get_region_caches(vq);
abdd16f4
SH
501 if (!caches) {
502 return;
503 }
504
97cd965c
PB
505 pa = offsetof(VRingUsed, ring[vq->vring.num]);
506 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
3cdf8473 507 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
bcbabae8
MT
508}
509
683f7665 510static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
967f97fa 511{
b5f53d04
DDAG
512 RCU_READ_LOCK_GUARD();
513
95129d6f 514 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
e9600c6c 515 vring_set_avail_event(vq, vring_avail_idx(vq));
bcbabae8 516 } else if (enable) {
967f97fa 517 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
bcbabae8 518 } else {
967f97fa 519 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
bcbabae8 520 }
92045d80
MT
521 if (enable) {
522 /* Expose avail event/used flags before caller checks the avail idx. */
523 smp_mb();
524 }
967f97fa
AL
525}
526
683f7665
JW
527static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
528{
529 uint16_t off_wrap;
530 VRingPackedDescEvent e;
531 VRingMemoryRegionCaches *caches;
532
b5f53d04 533 RCU_READ_LOCK_GUARD();
abdd16f4
SH
534 caches = vring_get_region_caches(vq);
535 if (!caches) {
536 return;
537 }
538
683f7665
JW
539 vring_packed_event_read(vq->vdev, &caches->used, &e);
540
541 if (!enable) {
542 e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
543 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
544 off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
545 vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
546 /* Make sure off_wrap is wrote before flags */
547 smp_wmb();
548 e.flags = VRING_PACKED_EVENT_FLAG_DESC;
549 } else {
550 e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
551 }
552
553 vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
554 if (enable) {
555 /* Expose avail event/used flags before caller checks the avail idx. */
556 smp_mb();
557 }
683f7665
JW
558}
559
d0435bc5
SH
560bool virtio_queue_get_notification(VirtQueue *vq)
561{
562 return vq->notification;
563}
564
683f7665
JW
565void virtio_queue_set_notification(VirtQueue *vq, int enable)
566{
567 vq->notification = enable;
568
569 if (!vq->vring.desc) {
570 return;
571 }
572
573 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
574 virtio_queue_packed_set_notification(vq, enable);
575 } else {
576 virtio_queue_split_set_notification(vq, enable);
577 }
578}
579
967f97fa
AL
580int virtio_queue_ready(VirtQueue *vq)
581{
582 return vq->vring.avail != 0;
583}
584
86044b24
JW
585static void vring_packed_desc_read_flags(VirtIODevice *vdev,
586 uint16_t *flags,
587 MemoryRegionCache *cache,
588 int i)
589{
f463e761
JW
590 hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
591
592 *flags = virtio_lduw_phys_cached(vdev, cache, off);
86044b24
JW
593}
594
595static void vring_packed_desc_read(VirtIODevice *vdev,
596 VRingPackedDesc *desc,
597 MemoryRegionCache *cache,
598 int i, bool strict_order)
599{
600 hwaddr off = i * sizeof(VRingPackedDesc);
601
602 vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
603
604 if (strict_order) {
605 /* Make sure flags is read before the rest fields. */
606 smp_rmb();
607 }
608
609 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
610 &desc->addr, sizeof(desc->addr));
611 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
612 &desc->id, sizeof(desc->id));
613 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
614 &desc->len, sizeof(desc->len));
615 virtio_tswap64s(vdev, &desc->addr);
616 virtio_tswap16s(vdev, &desc->id);
617 virtio_tswap32s(vdev, &desc->len);
618}
619
620static void vring_packed_desc_write_data(VirtIODevice *vdev,
621 VRingPackedDesc *desc,
622 MemoryRegionCache *cache,
623 int i)
624{
625 hwaddr off_id = i * sizeof(VRingPackedDesc) +
626 offsetof(VRingPackedDesc, id);
627 hwaddr off_len = i * sizeof(VRingPackedDesc) +
628 offsetof(VRingPackedDesc, len);
629
630 virtio_tswap32s(vdev, &desc->len);
631 virtio_tswap16s(vdev, &desc->id);
632 address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
633 address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
634 address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
635 address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
636}
637
638static void vring_packed_desc_write_flags(VirtIODevice *vdev,
639 VRingPackedDesc *desc,
640 MemoryRegionCache *cache,
641 int i)
642{
643 hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
644
f463e761 645 virtio_stw_phys_cached(vdev, cache, off, desc->flags);
86044b24
JW
646 address_space_cache_invalidate(cache, off, sizeof(desc->flags));
647}
648
649static void vring_packed_desc_write(VirtIODevice *vdev,
650 VRingPackedDesc *desc,
651 MemoryRegionCache *cache,
652 int i, bool strict_order)
653{
654 vring_packed_desc_write_data(vdev, desc, cache, i);
655 if (strict_order) {
656 /* Make sure data is wrote before flags. */
657 smp_wmb();
658 }
659 vring_packed_desc_write_flags(vdev, desc, cache, i);
660}
661
662static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
663{
664 bool avail, used;
665
666 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
667 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
668 return (avail != used) && (avail == wrap_counter);
669}
670
be1fea9b 671/* Fetch avail_idx from VQ memory only when we really need to know if
97cd965c
PB
672 * guest has added some buffers.
673 * Called within rcu_read_lock(). */
674static int virtio_queue_empty_rcu(VirtQueue *vq)
967f97fa 675{
9d7bd082 676 if (virtio_device_disabled(vq->vdev)) {
2d1df859
FZ
677 return 1;
678 }
679
168e4af3
JW
680 if (unlikely(!vq->vring.avail)) {
681 return 1;
682 }
683
be1fea9b
VM
684 if (vq->shadow_avail_idx != vq->last_avail_idx) {
685 return 0;
686 }
687
967f97fa
AL
688 return vring_avail_idx(vq) == vq->last_avail_idx;
689}
690
86044b24 691static int virtio_queue_split_empty(VirtQueue *vq)
97cd965c
PB
692{
693 bool empty;
694
9d7bd082 695 if (virtio_device_disabled(vq->vdev)) {
2d1df859
FZ
696 return 1;
697 }
698
168e4af3
JW
699 if (unlikely(!vq->vring.avail)) {
700 return 1;
701 }
702
97cd965c
PB
703 if (vq->shadow_avail_idx != vq->last_avail_idx) {
704 return 0;
705 }
706
b5f53d04 707 RCU_READ_LOCK_GUARD();
97cd965c 708 empty = vring_avail_idx(vq) == vq->last_avail_idx;
97cd965c
PB
709 return empty;
710}
711
7f51bedd 712/* Called within rcu_read_lock(). */
86044b24
JW
713static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
714{
715 struct VRingPackedDesc desc;
716 VRingMemoryRegionCaches *cache;
717
718 if (unlikely(!vq->vring.desc)) {
719 return 1;
720 }
721
722 cache = vring_get_region_caches(vq);
abdd16f4
SH
723 if (!cache) {
724 return 1;
725 }
726
86044b24
JW
727 vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
728 vq->last_avail_idx);
729
730 return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
731}
732
733static int virtio_queue_packed_empty(VirtQueue *vq)
734{
b5f53d04
DDAG
735 RCU_READ_LOCK_GUARD();
736 return virtio_queue_packed_empty_rcu(vq);
86044b24
JW
737}
738
739int virtio_queue_empty(VirtQueue *vq)
740{
741 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
742 return virtio_queue_packed_empty(vq);
743 } else {
744 return virtio_queue_split_empty(vq);
745 }
746}
747
ce317461
JW
748static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
749 unsigned int len)
967f97fa 750{
8607f5c3 751 AddressSpace *dma_as = vq->vdev->dma_as;
967f97fa
AL
752 unsigned int offset;
753 int i;
754
967f97fa
AL
755 offset = 0;
756 for (i = 0; i < elem->in_num; i++) {
757 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
758
8607f5c3
JW
759 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
760 elem->in_sg[i].iov_len,
761 DMA_DIRECTION_FROM_DEVICE, size);
967f97fa 762
0cea71a2 763 offset += size;
967f97fa
AL
764 }
765
26b258e1 766 for (i = 0; i < elem->out_num; i++)
8607f5c3
JW
767 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
768 elem->out_sg[i].iov_len,
769 DMA_DIRECTION_TO_DEVICE,
770 elem->out_sg[i].iov_len);
ce317461
JW
771}
772
2640d2a5
SH
773/* virtqueue_detach_element:
774 * @vq: The #VirtQueue
775 * @elem: The #VirtQueueElement
776 * @len: number of bytes written
777 *
778 * Detach the element from the virtqueue. This function is suitable for device
779 * reset or other situations where a #VirtQueueElement is simply freed and will
780 * not be pushed or discarded.
781 */
782void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
783 unsigned int len)
784{
86044b24 785 vq->inuse -= elem->ndescs;
2640d2a5
SH
786 virtqueue_unmap_sg(vq, elem, len);
787}
788
86044b24
JW
789static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
790{
791 vq->last_avail_idx -= num;
792}
793
794static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
795{
796 if (vq->last_avail_idx < num) {
797 vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
798 vq->last_avail_wrap_counter ^= 1;
799 } else {
800 vq->last_avail_idx -= num;
801 }
802}
803
27e57efe 804/* virtqueue_unpop:
2640d2a5
SH
805 * @vq: The #VirtQueue
806 * @elem: The #VirtQueueElement
807 * @len: number of bytes written
808 *
809 * Pretend the most recent element wasn't popped from the virtqueue. The next
810 * call to virtqueue_pop() will refetch the element.
811 */
27e57efe
LP
812void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
813 unsigned int len)
29b9f5ef 814{
86044b24
JW
815
816 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
817 virtqueue_packed_rewind(vq, 1);
818 } else {
819 virtqueue_split_rewind(vq, 1);
820 }
821
2640d2a5 822 virtqueue_detach_element(vq, elem, len);
29b9f5ef
JW
823}
824
297a75e6
SH
825/* virtqueue_rewind:
826 * @vq: The #VirtQueue
827 * @num: Number of elements to push back
828 *
829 * Pretend that elements weren't popped from the virtqueue. The next
830 * virtqueue_pop() will refetch the oldest element.
831 *
27e57efe 832 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
297a75e6
SH
833 *
834 * Returns: true on success, false if @num is greater than the number of in use
835 * elements.
836 */
837bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
838{
839 if (num > vq->inuse) {
840 return false;
841 }
86044b24 842
297a75e6 843 vq->inuse -= num;
86044b24
JW
844 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
845 virtqueue_packed_rewind(vq, num);
846 } else {
847 virtqueue_split_rewind(vq, num);
848 }
297a75e6
SH
849 return true;
850}
851
86044b24 852static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
ce317461
JW
853 unsigned int len, unsigned int idx)
854{
1cdd2ee5
VM
855 VRingUsedElem uelem;
856
168e4af3
JW
857 if (unlikely(!vq->vring.used)) {
858 return;
859 }
860
b796fcd1 861 idx = (idx + vq->used_idx) % vq->vring.num;
967f97fa 862
1cdd2ee5
VM
863 uelem.id = elem->index;
864 uelem.len = len;
865 vring_used_write(vq, &uelem, idx);
967f97fa
AL
866}
867
86044b24
JW
868static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
869 unsigned int len, unsigned int idx)
870{
871 vq->used_elems[idx].index = elem->index;
872 vq->used_elems[idx].len = len;
873 vq->used_elems[idx].ndescs = elem->ndescs;
874}
875
876static void virtqueue_packed_fill_desc(VirtQueue *vq,
877 const VirtQueueElement *elem,
878 unsigned int idx,
879 bool strict_order)
880{
881 uint16_t head;
882 VRingMemoryRegionCaches *caches;
883 VRingPackedDesc desc = {
884 .id = elem->index,
885 .len = elem->len,
886 };
887 bool wrap_counter = vq->used_wrap_counter;
888
889 if (unlikely(!vq->vring.desc)) {
890 return;
891 }
892
893 head = vq->used_idx + idx;
894 if (head >= vq->vring.num) {
895 head -= vq->vring.num;
896 wrap_counter ^= 1;
897 }
898 if (wrap_counter) {
899 desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
900 desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
901 } else {
902 desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
903 desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
904 }
905
906 caches = vring_get_region_caches(vq);
abdd16f4
SH
907 if (!caches) {
908 return;
909 }
910
86044b24
JW
911 vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
912}
913
97cd965c 914/* Called within rcu_read_lock(). */
86044b24
JW
915void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
916 unsigned int len, unsigned int idx)
967f97fa 917{
86044b24
JW
918 trace_virtqueue_fill(vq, elem, len, idx);
919
920 virtqueue_unmap_sg(vq, elem, len);
f5ed3663 921
9d7bd082 922 if (virtio_device_disabled(vq->vdev)) {
f5ed3663
SH
923 return;
924 }
925
86044b24
JW
926 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
927 virtqueue_packed_fill(vq, elem, len, idx);
928 } else {
929 virtqueue_split_fill(vq, elem, len, idx);
930 }
931}
932
933/* Called within rcu_read_lock(). */
934static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
935{
936 uint16_t old, new;
937
168e4af3
JW
938 if (unlikely(!vq->vring.used)) {
939 return;
940 }
941
967f97fa 942 /* Make sure buffer is written before we update index. */
b90d2f35 943 smp_wmb();
64979a4d 944 trace_virtqueue_flush(vq, count);
b796fcd1 945 old = vq->used_idx;
bcbabae8
MT
946 new = old + count;
947 vring_used_idx_set(vq, new);
967f97fa 948 vq->inuse -= count;
bcbabae8
MT
949 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
950 vq->signalled_used_valid = false;
967f97fa
AL
951}
952
86044b24
JW
953static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
954{
955 unsigned int i, ndescs = 0;
956
957 if (unlikely(!vq->vring.desc)) {
958 return;
959 }
960
2d9a31b3
W
961 /*
962 * For indirect element's 'ndescs' is 1.
963 * For all other elemment's 'ndescs' is the
964 * number of descriptors chained by NEXT (as set in virtqueue_packed_pop).
965 * So When the 'elem' be filled into the descriptor ring,
966 * The 'idx' of this 'elem' shall be
967 * the value of 'vq->used_idx' plus the 'ndescs'.
968 */
969 ndescs += vq->used_elems[0].ndescs;
86044b24 970 for (i = 1; i < count; i++) {
2d9a31b3 971 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
86044b24
JW
972 ndescs += vq->used_elems[i].ndescs;
973 }
974 virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
86044b24
JW
975
976 vq->inuse -= ndescs;
977 vq->used_idx += ndescs;
978 if (vq->used_idx >= vq->vring.num) {
979 vq->used_idx -= vq->vring.num;
980 vq->used_wrap_counter ^= 1;
750539c4 981 vq->signalled_used_valid = false;
86044b24
JW
982 }
983}
984
985void virtqueue_flush(VirtQueue *vq, unsigned int count)
986{
9d7bd082 987 if (virtio_device_disabled(vq->vdev)) {
86044b24
JW
988 vq->inuse -= count;
989 return;
990 }
991
992 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
993 virtqueue_packed_flush(vq, count);
994 } else {
995 virtqueue_split_flush(vq, count);
996 }
997}
998
967f97fa
AL
999void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
1000 unsigned int len)
1001{
b5f53d04 1002 RCU_READ_LOCK_GUARD();
967f97fa
AL
1003 virtqueue_fill(vq, elem, len, 0);
1004 virtqueue_flush(vq, 1);
1005}
1006
97cd965c 1007/* Called within rcu_read_lock(). */
967f97fa
AL
1008static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
1009{
850cd20b
IM
1010 uint16_t avail_idx, num_heads;
1011
1012 /* Use shadow index whenever possible. */
1013 avail_idx = (vq->shadow_avail_idx != idx) ? vq->shadow_avail_idx
1014 : vring_avail_idx(vq);
1015 num_heads = avail_idx - idx;
967f97fa
AL
1016
1017 /* Check it isn't doing very strange things with descriptor numbers. */
bb6834cf 1018 if (num_heads > vq->vring.num) {
4355c1ab 1019 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
be1fea9b 1020 idx, vq->shadow_avail_idx);
4355c1ab 1021 return -EINVAL;
bb6834cf 1022 }
850cd20b
IM
1023 /*
1024 * On success, callers read a descriptor at vq->last_avail_idx.
1025 * Make sure descriptor read does not bypass avail index read.
1026 *
1027 * This is necessary even if we are using a shadow index, since
1028 * the shadow index could have been initialized by calling
1029 * vring_avail_idx() outside of this function, i.e., by a guest
1030 * memory read not accompanied by a barrier.
1031 */
a821ce59
MT
1032 if (num_heads) {
1033 smp_rmb();
1034 }
967f97fa
AL
1035
1036 return num_heads;
1037}
1038
97cd965c 1039/* Called within rcu_read_lock(). */
fb1131b6
SH
1040static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
1041 unsigned int *head)
967f97fa 1042{
967f97fa
AL
1043 /* Grab the next descriptor number they're advertising, and increment
1044 * the index we've seen. */
fb1131b6 1045 *head = vring_avail_ring(vq, idx % vq->vring.num);
967f97fa
AL
1046
1047 /* If their number is silly, that's a fatal mistake. */
fb1131b6
SH
1048 if (*head >= vq->vring.num) {
1049 virtio_error(vq->vdev, "Guest says index %u is available", *head);
1050 return false;
bb6834cf 1051 }
967f97fa 1052
fb1131b6 1053 return true;
967f97fa
AL
1054}
1055
412e0e81
SH
1056enum {
1057 VIRTQUEUE_READ_DESC_ERROR = -1,
1058 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
1059 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
1060};
967f97fa 1061
70f88436 1062/* Reads the 'desc->next' descriptor into '*desc'. */
86044b24
JW
1063static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
1064 MemoryRegionCache *desc_cache,
70f88436 1065 unsigned int max)
412e0e81 1066{
967f97fa 1067 /* If this descriptor says it doesn't chain, we're done. */
aa570d6f 1068 if (!(desc->flags & VRING_DESC_F_NEXT)) {
412e0e81 1069 return VIRTQUEUE_READ_DESC_DONE;
cee3ca00 1070 }
967f97fa
AL
1071
1072 /* Check they're not leading us off end of descriptors. */
70f88436
IM
1073 if (desc->next >= max) {
1074 virtio_error(vdev, "Desc next is %u", desc->next);
412e0e81 1075 return VIRTQUEUE_READ_DESC_ERROR;
bb6834cf 1076 }
967f97fa 1077
70f88436 1078 vring_split_desc_read(vdev, desc, desc_cache, desc->next);
412e0e81 1079 return VIRTQUEUE_READ_DESC_MORE;
967f97fa
AL
1080}
1081
d6ed27ba 1082/* Called within rcu_read_lock(). */
86044b24
JW
1083static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
1084 unsigned int *in_bytes, unsigned int *out_bytes,
d6ed27ba
PMD
1085 unsigned max_in_bytes, unsigned max_out_bytes,
1086 VRingMemoryRegionCaches *caches)
967f97fa 1087{
9796d0ac 1088 VirtIODevice *vdev = vq->vdev;
bbc1c327 1089 unsigned int idx;
385ce95d 1090 unsigned int total_bufs, in_total, out_total;
43d63769 1091 MemoryRegionCache indirect_desc_cache;
5eba0404 1092 int64_t len = 0;
412e0e81 1093 int rc;
967f97fa 1094
43d63769
IM
1095 address_space_cache_init_empty(&indirect_desc_cache);
1096
967f97fa 1097 idx = vq->last_avail_idx;
efeea6d0 1098 total_bufs = in_total = out_total = 0;
9796d0ac 1099
4355c1ab 1100 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
991976f7 1101 MemoryRegionCache *desc_cache = &caches->desc;
9796d0ac 1102 unsigned int num_bufs;
aa570d6f 1103 VRingDesc desc;
b1c7c07f 1104 unsigned int i;
bbc1c327 1105 unsigned int max = vq->vring.num;
967f97fa 1106
efeea6d0 1107 num_bufs = total_bufs;
fb1131b6
SH
1108
1109 if (!virtqueue_get_head(vq, idx++, &i)) {
1110 goto err;
1111 }
1112
86044b24 1113 vring_split_desc_read(vdev, &desc, desc_cache, i);
efeea6d0 1114
aa570d6f 1115 if (desc.flags & VRING_DESC_F_INDIRECT) {
74231929 1116 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
d65abf85
SH
1117 virtio_error(vdev, "Invalid size for indirect buffer table");
1118 goto err;
efeea6d0
MM
1119 }
1120
1121 /* If we've got too many, that implies a descriptor loop. */
1122 if (num_bufs >= max) {
d65abf85
SH
1123 virtio_error(vdev, "Looped descriptor");
1124 goto err;
efeea6d0
MM
1125 }
1126
1127 /* loop over the indirect descriptor table */
5eba0404
PB
1128 len = address_space_cache_init(&indirect_desc_cache,
1129 vdev->dma_as,
1130 desc.addr, desc.len, false);
1131 desc_cache = &indirect_desc_cache;
9796d0ac
PB
1132 if (len < desc.len) {
1133 virtio_error(vdev, "Cannot map indirect buffer");
1134 goto err;
1135 }
1136
aa570d6f 1137 max = desc.len / sizeof(VRingDesc);
1ae2757c 1138 num_bufs = i = 0;
86044b24 1139 vring_split_desc_read(vdev, &desc, desc_cache, i);
efeea6d0
MM
1140 }
1141
967f97fa
AL
1142 do {
1143 /* If we've got too many, that implies a descriptor loop. */
5774cf98 1144 if (++num_bufs > max) {
d65abf85
SH
1145 virtio_error(vdev, "Looped descriptor");
1146 goto err;
bb6834cf 1147 }
967f97fa 1148
aa570d6f
PB
1149 if (desc.flags & VRING_DESC_F_WRITE) {
1150 in_total += desc.len;
967f97fa 1151 } else {
aa570d6f 1152 out_total += desc.len;
967f97fa 1153 }
e1f7b481
MT
1154 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1155 goto done;
1156 }
412e0e81 1157
70f88436 1158 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
412e0e81
SH
1159 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1160
1161 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1162 goto err;
1163 }
efeea6d0 1164
5eba0404
PB
1165 if (desc_cache == &indirect_desc_cache) {
1166 address_space_cache_destroy(&indirect_desc_cache);
efeea6d0 1167 total_bufs++;
9796d0ac
PB
1168 } else {
1169 total_bufs = num_bufs;
1170 }
967f97fa 1171 }
4355c1ab
SH
1172
1173 if (rc < 0) {
1174 goto err;
1175 }
1176
e1f7b481 1177done:
5eba0404 1178 address_space_cache_destroy(&indirect_desc_cache);
0d8d7690 1179 if (in_bytes) {
86044b24
JW
1180 *in_bytes = in_total;
1181 }
1182 if (out_bytes) {
1183 *out_bytes = out_total;
1184 }
86044b24
JW
1185 return;
1186
1187err:
1188 in_total = out_total = 0;
1189 goto done;
1190}
1191
1192static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1193 VRingPackedDesc *desc,
1194 MemoryRegionCache
1195 *desc_cache,
1196 unsigned int max,
1197 unsigned int *next,
1198 bool indirect)
1199{
1200 /* If this descriptor says it doesn't chain, we're done. */
1201 if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1202 return VIRTQUEUE_READ_DESC_DONE;
1203 }
1204
1205 ++*next;
1206 if (*next == max) {
1207 if (indirect) {
1208 return VIRTQUEUE_READ_DESC_DONE;
1209 } else {
1210 (*next) -= vq->vring.num;
1211 }
1212 }
1213
1214 vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1215 return VIRTQUEUE_READ_DESC_MORE;
1216}
1217
d6ed27ba 1218/* Called within rcu_read_lock(). */
86044b24
JW
1219static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1220 unsigned int *in_bytes,
1221 unsigned int *out_bytes,
1222 unsigned max_in_bytes,
d6ed27ba
PMD
1223 unsigned max_out_bytes,
1224 VRingMemoryRegionCaches *caches)
86044b24
JW
1225{
1226 VirtIODevice *vdev = vq->vdev;
bbc1c327 1227 unsigned int idx;
86044b24 1228 unsigned int total_bufs, in_total, out_total;
43d63769 1229 MemoryRegionCache indirect_desc_cache;
86044b24 1230 MemoryRegionCache *desc_cache;
86044b24
JW
1231 int64_t len = 0;
1232 VRingPackedDesc desc;
1233 bool wrap_counter;
1234
43d63769
IM
1235 address_space_cache_init_empty(&indirect_desc_cache);
1236
86044b24
JW
1237 idx = vq->last_avail_idx;
1238 wrap_counter = vq->last_avail_wrap_counter;
1239 total_bufs = in_total = out_total = 0;
1240
86044b24
JW
1241 for (;;) {
1242 unsigned int num_bufs = total_bufs;
1243 unsigned int i = idx;
1244 int rc;
bbc1c327 1245 unsigned int max = vq->vring.num;
86044b24
JW
1246
1247 desc_cache = &caches->desc;
bbc1c327 1248
86044b24
JW
1249 vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1250 if (!is_desc_avail(desc.flags, wrap_counter)) {
1251 break;
1252 }
1253
1254 if (desc.flags & VRING_DESC_F_INDIRECT) {
1255 if (desc.len % sizeof(VRingPackedDesc)) {
1256 virtio_error(vdev, "Invalid size for indirect buffer table");
1257 goto err;
1258 }
1259
1260 /* If we've got too many, that implies a descriptor loop. */
1261 if (num_bufs >= max) {
1262 virtio_error(vdev, "Looped descriptor");
1263 goto err;
1264 }
1265
1266 /* loop over the indirect descriptor table */
1267 len = address_space_cache_init(&indirect_desc_cache,
1268 vdev->dma_as,
1269 desc.addr, desc.len, false);
1270 desc_cache = &indirect_desc_cache;
1271 if (len < desc.len) {
1272 virtio_error(vdev, "Cannot map indirect buffer");
1273 goto err;
1274 }
1275
1276 max = desc.len / sizeof(VRingPackedDesc);
1277 num_bufs = i = 0;
1278 vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1279 }
1280
1281 do {
1282 /* If we've got too many, that implies a descriptor loop. */
1283 if (++num_bufs > max) {
1284 virtio_error(vdev, "Looped descriptor");
1285 goto err;
1286 }
1287
1288 if (desc.flags & VRING_DESC_F_WRITE) {
1289 in_total += desc.len;
1290 } else {
1291 out_total += desc.len;
1292 }
1293 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1294 goto done;
1295 }
1296
1297 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1298 &i, desc_cache ==
1299 &indirect_desc_cache);
1300 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1301
1302 if (desc_cache == &indirect_desc_cache) {
1303 address_space_cache_destroy(&indirect_desc_cache);
1304 total_bufs++;
1305 idx++;
1306 } else {
1307 idx += num_bufs - total_bufs;
1308 total_bufs = num_bufs;
1309 }
1310
1311 if (idx >= vq->vring.num) {
1312 idx -= vq->vring.num;
1313 wrap_counter ^= 1;
1314 }
1315 }
1316
1317 /* Record the index and wrap counter for a kick we want */
1318 vq->shadow_avail_idx = idx;
1319 vq->shadow_avail_wrap_counter = wrap_counter;
1320done:
1321 address_space_cache_destroy(&indirect_desc_cache);
1322 if (in_bytes) {
1323 *in_bytes = in_total;
1324 }
1325 if (out_bytes) {
1326 *out_bytes = out_total;
1327 }
86044b24
JW
1328 return;
1329
1330err:
1331 in_total = out_total = 0;
1332 goto done;
1333}
1334
1335void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1336 unsigned int *out_bytes,
1337 unsigned max_in_bytes, unsigned max_out_bytes)
1338{
1339 uint16_t desc_size;
1340 VRingMemoryRegionCaches *caches;
1341
d6ed27ba
PMD
1342 RCU_READ_LOCK_GUARD();
1343
86044b24
JW
1344 if (unlikely(!vq->vring.desc)) {
1345 goto err;
1346 }
1347
1348 caches = vring_get_region_caches(vq);
abdd16f4
SH
1349 if (!caches) {
1350 goto err;
1351 }
1352
86044b24
JW
1353 desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1354 sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1355 if (caches->desc.len < vq->vring.num * desc_size) {
1356 virtio_error(vq->vdev, "Cannot map descriptor ring");
1357 goto err;
1358 }
1359
1360 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1361 virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
d6ed27ba
PMD
1362 max_in_bytes, max_out_bytes,
1363 caches);
86044b24
JW
1364 } else {
1365 virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
d6ed27ba
PMD
1366 max_in_bytes, max_out_bytes,
1367 caches);
86044b24
JW
1368 }
1369
1370 return;
1371err:
1372 if (in_bytes) {
1373 *in_bytes = 0;
0d8d7690
AS
1374 }
1375 if (out_bytes) {
86044b24 1376 *out_bytes = 0;
0d8d7690
AS
1377 }
1378}
967f97fa 1379
0d8d7690
AS
1380int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1381 unsigned int out_bytes)
1382{
1383 unsigned int in_total, out_total;
1384
e1f7b481
MT
1385 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1386 return in_bytes <= in_total && out_bytes <= out_total;
967f97fa
AL
1387}
1388
ec55da19
SH
1389static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1390 hwaddr *addr, struct iovec *iov,
3b3b0628
PB
1391 unsigned int max_num_sg, bool is_write,
1392 hwaddr pa, size_t sz)
1393{
ec55da19 1394 bool ok = false;
3b3b0628
PB
1395 unsigned num_sg = *p_num_sg;
1396 assert(num_sg <= max_num_sg);
1397
1e7aed70 1398 if (!sz) {
ec55da19
SH
1399 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1400 goto out;
1e7aed70
PP
1401 }
1402
3b3b0628
PB
1403 while (sz) {
1404 hwaddr len = sz;
1405
1406 if (num_sg == max_num_sg) {
ec55da19
SH
1407 virtio_error(vdev, "virtio: too many write descriptors in "
1408 "indirect table");
1409 goto out;
3b3b0628
PB
1410 }
1411
8607f5c3
JW
1412 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1413 is_write ?
1414 DMA_DIRECTION_FROM_DEVICE :
a1d4b0a3
PMD
1415 DMA_DIRECTION_TO_DEVICE,
1416 MEMTXATTRS_UNSPECIFIED);
973e7170 1417 if (!iov[num_sg].iov_base) {
ec55da19
SH
1418 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1419 goto out;
973e7170
PP
1420 }
1421
3b3b0628
PB
1422 iov[num_sg].iov_len = len;
1423 addr[num_sg] = pa;
1424
1425 sz -= len;
1426 pa += len;
1427 num_sg++;
1428 }
ec55da19
SH
1429 ok = true;
1430
1431out:
3b3b0628 1432 *p_num_sg = num_sg;
ec55da19
SH
1433 return ok;
1434}
1435
1436/* Only used by error code paths before we have a VirtQueueElement (therefore
1437 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
1438 * yet.
1439 */
1440static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1441 struct iovec *iov)
1442{
1443 unsigned int i;
1444
1445 for (i = 0; i < out_num + in_num; i++) {
1446 int is_write = i >= out_num;
1447
1448 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1449 iov++;
1450 }
3b3b0628
PB
1451}
1452
8607f5c3 1453static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
e4fbf5b2 1454 hwaddr *addr, unsigned int num_sg,
22953364 1455 bool is_write)
42fb2e07
KW
1456{
1457 unsigned int i;
a8170e5e 1458 hwaddr len;
42fb2e07 1459
e4fbf5b2 1460 for (i = 0; i < num_sg; i++) {
42fb2e07 1461 len = sg[i].iov_len;
8607f5c3
JW
1462 sg[i].iov_base = dma_memory_map(vdev->dma_as,
1463 addr[i], &len, is_write ?
1464 DMA_DIRECTION_FROM_DEVICE :
a1d4b0a3
PMD
1465 DMA_DIRECTION_TO_DEVICE,
1466 MEMTXATTRS_UNSPECIFIED);
8059feee 1467 if (!sg[i].iov_base) {
1a285899 1468 error_report("virtio: error trying to map MMIO memory");
42fb2e07
KW
1469 exit(1);
1470 }
3b3b0628
PB
1471 if (len != sg[i].iov_len) {
1472 error_report("virtio: unexpected memory split");
8059feee
MT
1473 exit(1);
1474 }
42fb2e07
KW
1475 }
1476}
1477
8607f5c3 1478void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
8059feee 1479{
22953364
PMD
1480 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1481 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1482 false);
3724650d
PB
1483}
1484
bf91bd27 1485static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
3724650d
PB
1486{
1487 VirtQueueElement *elem;
1488 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1489 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1490 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1491 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1492 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1493 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1494
1495 assert(sz >= sizeof(VirtQueueElement));
1496 elem = g_malloc(out_sg_end);
b0ac429f 1497 trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
3724650d
PB
1498 elem->out_num = out_num;
1499 elem->in_num = in_num;
1500 elem->in_addr = (void *)elem + in_addr_ofs;
1501 elem->out_addr = (void *)elem + out_addr_ofs;
1502 elem->in_sg = (void *)elem + in_sg_ofs;
1503 elem->out_sg = (void *)elem + out_sg_ofs;
1504 return elem;
8059feee
MT
1505}
1506
86044b24 1507static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
967f97fa 1508{
5774cf98 1509 unsigned int i, head, max;
991976f7 1510 VRingMemoryRegionCaches *caches;
43d63769 1511 MemoryRegionCache indirect_desc_cache;
5eba0404
PB
1512 MemoryRegionCache *desc_cache;
1513 int64_t len;
cee3ca00 1514 VirtIODevice *vdev = vq->vdev;
9796d0ac 1515 VirtQueueElement *elem = NULL;
37ef70be 1516 unsigned out_num, in_num, elem_entries;
3b3b0628
PB
1517 hwaddr addr[VIRTQUEUE_MAX_SIZE];
1518 struct iovec iov[VIRTQUEUE_MAX_SIZE];
aa570d6f 1519 VRingDesc desc;
412e0e81 1520 int rc;
967f97fa 1521
43d63769
IM
1522 address_space_cache_init_empty(&indirect_desc_cache);
1523
b5f53d04 1524 RCU_READ_LOCK_GUARD();
97cd965c
PB
1525 if (virtio_queue_empty_rcu(vq)) {
1526 goto done;
51b19ebe 1527 }
be1fea9b
VM
1528 /* Needed after virtio_queue_empty(), see comment in
1529 * virtqueue_num_heads(). */
1530 smp_rmb();
967f97fa
AL
1531
1532 /* When we start there are none of either input nor output. */
37ef70be 1533 out_num = in_num = elem_entries = 0;
967f97fa 1534
5774cf98
MM
1535 max = vq->vring.num;
1536
afd9096e 1537 if (vq->inuse >= vq->vring.num) {
ec55da19 1538 virtio_error(vdev, "Virtqueue size exceeded");
97cd965c 1539 goto done;
afd9096e
SH
1540 }
1541
fb1131b6 1542 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
97cd965c 1543 goto done;
fb1131b6
SH
1544 }
1545
95129d6f 1546 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
e9600c6c 1547 vring_set_avail_event(vq, vq->last_avail_idx);
bcbabae8 1548 }
efeea6d0 1549
fb1131b6 1550 i = head;
9796d0ac 1551
e0e2d644 1552 caches = vring_get_region_caches(vq);
abdd16f4
SH
1553 if (!caches) {
1554 virtio_error(vdev, "Region caches not initialized");
1555 goto done;
1556 }
1557
991976f7 1558 if (caches->desc.len < max * sizeof(VRingDesc)) {
9796d0ac
PB
1559 virtio_error(vdev, "Cannot map descriptor ring");
1560 goto done;
1561 }
1562
991976f7 1563 desc_cache = &caches->desc;
86044b24 1564 vring_split_desc_read(vdev, &desc, desc_cache, i);
aa570d6f 1565 if (desc.flags & VRING_DESC_F_INDIRECT) {
74231929 1566 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
ec55da19 1567 virtio_error(vdev, "Invalid size for indirect buffer table");
9796d0ac 1568 goto done;
efeea6d0
MM
1569 }
1570
1571 /* loop over the indirect descriptor table */
5eba0404
PB
1572 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1573 desc.addr, desc.len, false);
1574 desc_cache = &indirect_desc_cache;
9796d0ac
PB
1575 if (len < desc.len) {
1576 virtio_error(vdev, "Cannot map indirect buffer");
1577 goto done;
1578 }
1579
aa570d6f 1580 max = desc.len / sizeof(VRingDesc);
efeea6d0 1581 i = 0;
86044b24 1582 vring_split_desc_read(vdev, &desc, desc_cache, i);
efeea6d0
MM
1583 }
1584
42fb2e07 1585 /* Collect all the descriptors */
967f97fa 1586 do {
ec55da19
SH
1587 bool map_ok;
1588
aa570d6f 1589 if (desc.flags & VRING_DESC_F_WRITE) {
ec55da19
SH
1590 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1591 iov + out_num,
1592 VIRTQUEUE_MAX_SIZE - out_num, true,
1593 desc.addr, desc.len);
42fb2e07 1594 } else {
3b3b0628 1595 if (in_num) {
ec55da19
SH
1596 virtio_error(vdev, "Incorrect order for descriptors");
1597 goto err_undo_map;
c8eac1cf 1598 }
ec55da19
SH
1599 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1600 VIRTQUEUE_MAX_SIZE, false,
1601 desc.addr, desc.len);
1602 }
1603 if (!map_ok) {
1604 goto err_undo_map;
42fb2e07 1605 }
967f97fa 1606
967f97fa 1607 /* If we've got too many, that implies a descriptor loop. */
37ef70be 1608 if (++elem_entries > max) {
ec55da19
SH
1609 virtio_error(vdev, "Looped descriptor");
1610 goto err_undo_map;
bb6834cf 1611 }
412e0e81 1612
70f88436 1613 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
412e0e81
SH
1614 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1615
1616 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1617 goto err_undo_map;
1618 }
967f97fa 1619
3b3b0628
PB
1620 /* Now copy what we have collected and mapped */
1621 elem = virtqueue_alloc_element(sz, out_num, in_num);
967f97fa 1622 elem->index = head;
86044b24 1623 elem->ndescs = 1;
3b3b0628
PB
1624 for (i = 0; i < out_num; i++) {
1625 elem->out_addr[i] = addr[i];
1626 elem->out_sg[i] = iov[i];
1627 }
1628 for (i = 0; i < in_num; i++) {
1629 elem->in_addr[i] = addr[out_num + i];
1630 elem->in_sg[i] = iov[out_num + i];
1631 }
967f97fa
AL
1632
1633 vq->inuse++;
1634
64979a4d 1635 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
9796d0ac 1636done:
5eba0404 1637 address_space_cache_destroy(&indirect_desc_cache);
9796d0ac 1638
51b19ebe 1639 return elem;
ec55da19
SH
1640
1641err_undo_map:
1642 virtqueue_undo_map_desc(out_num, in_num, iov);
9796d0ac 1643 goto done;
967f97fa
AL
1644}
1645
86044b24
JW
1646static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
1647{
1648 unsigned int i, max;
1649 VRingMemoryRegionCaches *caches;
43d63769 1650 MemoryRegionCache indirect_desc_cache;
86044b24
JW
1651 MemoryRegionCache *desc_cache;
1652 int64_t len;
1653 VirtIODevice *vdev = vq->vdev;
1654 VirtQueueElement *elem = NULL;
1655 unsigned out_num, in_num, elem_entries;
1656 hwaddr addr[VIRTQUEUE_MAX_SIZE];
1657 struct iovec iov[VIRTQUEUE_MAX_SIZE];
1658 VRingPackedDesc desc;
1659 uint16_t id;
1660 int rc;
1661
43d63769
IM
1662 address_space_cache_init_empty(&indirect_desc_cache);
1663
b5f53d04 1664 RCU_READ_LOCK_GUARD();
86044b24
JW
1665 if (virtio_queue_packed_empty_rcu(vq)) {
1666 goto done;
1667 }
1668
1669 /* When we start there are none of either input nor output. */
1670 out_num = in_num = elem_entries = 0;
1671
1672 max = vq->vring.num;
1673
1674 if (vq->inuse >= vq->vring.num) {
1675 virtio_error(vdev, "Virtqueue size exceeded");
1676 goto done;
1677 }
1678
1679 i = vq->last_avail_idx;
1680
1681 caches = vring_get_region_caches(vq);
abdd16f4
SH
1682 if (!caches) {
1683 virtio_error(vdev, "Region caches not initialized");
1684 goto done;
1685 }
1686
86044b24
JW
1687 if (caches->desc.len < max * sizeof(VRingDesc)) {
1688 virtio_error(vdev, "Cannot map descriptor ring");
1689 goto done;
1690 }
1691
1692 desc_cache = &caches->desc;
1693 vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
1694 id = desc.id;
1695 if (desc.flags & VRING_DESC_F_INDIRECT) {
1696 if (desc.len % sizeof(VRingPackedDesc)) {
1697 virtio_error(vdev, "Invalid size for indirect buffer table");
1698 goto done;
1699 }
1700
1701 /* loop over the indirect descriptor table */
1702 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1703 desc.addr, desc.len, false);
1704 desc_cache = &indirect_desc_cache;
1705 if (len < desc.len) {
1706 virtio_error(vdev, "Cannot map indirect buffer");
1707 goto done;
1708 }
1709
1710 max = desc.len / sizeof(VRingPackedDesc);
1711 i = 0;
1712 vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1713 }
1714
1715 /* Collect all the descriptors */
1716 do {
1717 bool map_ok;
1718
1719 if (desc.flags & VRING_DESC_F_WRITE) {
1720 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1721 iov + out_num,
1722 VIRTQUEUE_MAX_SIZE - out_num, true,
1723 desc.addr, desc.len);
1724 } else {
1725 if (in_num) {
1726 virtio_error(vdev, "Incorrect order for descriptors");
1727 goto err_undo_map;
1728 }
1729 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1730 VIRTQUEUE_MAX_SIZE, false,
1731 desc.addr, desc.len);
1732 }
1733 if (!map_ok) {
1734 goto err_undo_map;
1735 }
1736
1737 /* If we've got too many, that implies a descriptor loop. */
1738 if (++elem_entries > max) {
1739 virtio_error(vdev, "Looped descriptor");
1740 goto err_undo_map;
1741 }
1742
1743 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
1744 desc_cache ==
1745 &indirect_desc_cache);
1746 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1747
1748 /* Now copy what we have collected and mapped */
1749 elem = virtqueue_alloc_element(sz, out_num, in_num);
1750 for (i = 0; i < out_num; i++) {
1751 elem->out_addr[i] = addr[i];
1752 elem->out_sg[i] = iov[i];
1753 }
1754 for (i = 0; i < in_num; i++) {
1755 elem->in_addr[i] = addr[out_num + i];
1756 elem->in_sg[i] = iov[out_num + i];
1757 }
1758
1759 elem->index = id;
1760 elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1761 vq->last_avail_idx += elem->ndescs;
1762 vq->inuse += elem->ndescs;
1763
1764 if (vq->last_avail_idx >= vq->vring.num) {
1765 vq->last_avail_idx -= vq->vring.num;
1766 vq->last_avail_wrap_counter ^= 1;
1767 }
1768
1769 vq->shadow_avail_idx = vq->last_avail_idx;
1770 vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1771
1772 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1773done:
1774 address_space_cache_destroy(&indirect_desc_cache);
86044b24
JW
1775
1776 return elem;
1777
1778err_undo_map:
1779 virtqueue_undo_map_desc(out_num, in_num, iov);
1780 goto done;
1781}
1782
1783void *virtqueue_pop(VirtQueue *vq, size_t sz)
1784{
9d7bd082 1785 if (virtio_device_disabled(vq->vdev)) {
86044b24
JW
1786 return NULL;
1787 }
1788
1789 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1790 return virtqueue_packed_pop(vq, sz);
1791 } else {
1792 return virtqueue_split_pop(vq, sz);
1793 }
1794}
1795
1796static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
54e17709 1797{
86044b24
JW
1798 VRingMemoryRegionCaches *caches;
1799 MemoryRegionCache *desc_cache;
54e17709
YB
1800 unsigned int dropped = 0;
1801 VirtQueueElement elem = {};
1802 VirtIODevice *vdev = vq->vdev;
86044b24 1803 VRingPackedDesc desc;
54e17709 1804
ab4dd274
PMD
1805 RCU_READ_LOCK_GUARD();
1806
86044b24 1807 caches = vring_get_region_caches(vq);
abdd16f4
SH
1808 if (!caches) {
1809 return 0;
1810 }
1811
86044b24
JW
1812 desc_cache = &caches->desc;
1813
1814 virtio_queue_set_notification(vq, 0);
1815
1816 while (vq->inuse < vq->vring.num) {
1817 unsigned int idx = vq->last_avail_idx;
1818 /*
1819 * works similar to virtqueue_pop but does not map buffers
1820 * and does not allocate any memory.
1821 */
1822 vring_packed_desc_read(vdev, &desc, desc_cache,
1823 vq->last_avail_idx , true);
1824 if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
1825 break;
1826 }
1827 elem.index = desc.id;
1828 elem.ndescs = 1;
1829 while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
1830 vq->vring.num, &idx, false)) {
1831 ++elem.ndescs;
1832 }
1833 /*
1834 * immediately push the element, nothing to unmap
1835 * as both in_num and out_num are set to 0.
1836 */
1837 virtqueue_push(vq, &elem, 0);
1838 dropped++;
1839 vq->last_avail_idx += elem.ndescs;
1840 if (vq->last_avail_idx >= vq->vring.num) {
1841 vq->last_avail_idx -= vq->vring.num;
1842 vq->last_avail_wrap_counter ^= 1;
1843 }
54e17709
YB
1844 }
1845
86044b24
JW
1846 return dropped;
1847}
1848
1849static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
1850{
1851 unsigned int dropped = 0;
1852 VirtQueueElement elem = {};
1853 VirtIODevice *vdev = vq->vdev;
1854 bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1855
54e17709
YB
1856 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1857 /* works similar to virtqueue_pop but does not map buffers
1858 * and does not allocate any memory */
1859 smp_rmb();
1860 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1861 break;
1862 }
1863 vq->inuse++;
1864 vq->last_avail_idx++;
1865 if (fEventIdx) {
1866 vring_set_avail_event(vq, vq->last_avail_idx);
1867 }
1868 /* immediately push the element, nothing to unmap
1869 * as both in_num and out_num are set to 0 */
1870 virtqueue_push(vq, &elem, 0);
1871 dropped++;
1872 }
1873
1874 return dropped;
1875}
1876
86044b24
JW
1877/* virtqueue_drop_all:
1878 * @vq: The #VirtQueue
1879 * Drops all queued buffers and indicates them to the guest
1880 * as if they are done. Useful when buffers can not be
1881 * processed but must be returned to the guest.
1882 */
1883unsigned int virtqueue_drop_all(VirtQueue *vq)
1884{
1885 struct VirtIODevice *vdev = vq->vdev;
1886
9d7bd082 1887 if (virtio_device_disabled(vq->vdev)) {
86044b24
JW
1888 return 0;
1889 }
1890
1891 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1892 return virtqueue_packed_drop_all(vq);
1893 } else {
1894 return virtqueue_split_drop_all(vq);
1895 }
1896}
1897
3724650d
PB
1898/* Reading and writing a structure directly to QEMUFile is *awful*, but
1899 * it is what QEMU has always done by mistake. We can change it sooner
1900 * or later by bumping the version number of the affected vm states.
1901 * In the meanwhile, since the in-memory layout of VirtQueueElement
1902 * has changed, we need to marshal to and from the layout that was
1903 * used before the change.
1904 */
1905typedef struct VirtQueueElementOld {
1906 unsigned int index;
1907 unsigned int out_num;
1908 unsigned int in_num;
1909 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1910 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1911 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1912 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1913} VirtQueueElementOld;
1914
8607f5c3 1915void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
ab281c17 1916{
3724650d
PB
1917 VirtQueueElement *elem;
1918 VirtQueueElementOld data;
1919 int i;
1920
1921 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1922
6bdc21c0
MT
1923 /* TODO: teach all callers that this can fail, and return failure instead
1924 * of asserting here.
262a69f4
EB
1925 * This is just one thing (there are probably more) that must be
1926 * fixed before we can allow NDEBUG compilation.
6bdc21c0 1927 */
6bdc21c0
MT
1928 assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1929 assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1930
3724650d
PB
1931 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1932 elem->index = data.index;
1933
1934 for (i = 0; i < elem->in_num; i++) {
1935 elem->in_addr[i] = data.in_addr[i];
1936 }
1937
1938 for (i = 0; i < elem->out_num; i++) {
1939 elem->out_addr[i] = data.out_addr[i];
1940 }
1941
1942 for (i = 0; i < elem->in_num; i++) {
1943 /* Base is overwritten by virtqueue_map. */
1944 elem->in_sg[i].iov_base = 0;
1945 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1946 }
1947
1948 for (i = 0; i < elem->out_num; i++) {
1949 /* Base is overwritten by virtqueue_map. */
1950 elem->out_sg[i].iov_base = 0;
1951 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1952 }
1953
86044b24
JW
1954 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1955 qemu_get_be32s(f, &elem->ndescs);
1956 }
1957
8607f5c3 1958 virtqueue_map(vdev, elem);
ab281c17
PB
1959 return elem;
1960}
1961
86044b24
JW
1962void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
1963 VirtQueueElement *elem)
ab281c17 1964{
3724650d
PB
1965 VirtQueueElementOld data;
1966 int i;
1967
1968 memset(&data, 0, sizeof(data));
1969 data.index = elem->index;
1970 data.in_num = elem->in_num;
1971 data.out_num = elem->out_num;
1972
1973 for (i = 0; i < elem->in_num; i++) {
1974 data.in_addr[i] = elem->in_addr[i];
1975 }
1976
1977 for (i = 0; i < elem->out_num; i++) {
1978 data.out_addr[i] = elem->out_addr[i];
1979 }
1980
1981 for (i = 0; i < elem->in_num; i++) {
1982 /* Base is overwritten by virtqueue_map when loading. Do not
1983 * save it, as it would leak the QEMU address space layout. */
1984 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1985 }
1986
1987 for (i = 0; i < elem->out_num; i++) {
1988 /* Do not save iov_base as above. */
1989 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1990 }
86044b24
JW
1991
1992 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1993 qemu_put_be32s(f, &elem->ndescs);
1994 }
1995
3724650d 1996 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
ab281c17
PB
1997}
1998
967f97fa 1999/* virtio device */
7055e687
MT
2000static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
2001{
1c819449
FK
2002 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2003 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2004
9d7bd082 2005 if (virtio_device_disabled(vdev)) {
f5ed3663
SH
2006 return;
2007 }
2008
1c819449
FK
2009 if (k->notify) {
2010 k->notify(qbus->parent, vector);
7055e687
MT
2011 }
2012}
967f97fa 2013
53c25cea 2014void virtio_update_irq(VirtIODevice *vdev)
967f97fa 2015{
7055e687 2016 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
967f97fa
AL
2017}
2018
0b352fd6
CH
2019static int virtio_validate_features(VirtIODevice *vdev)
2020{
2021 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2022
8607f5c3
JW
2023 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
2024 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
2025 return -EFAULT;
2026 }
2027
0b352fd6
CH
2028 if (k->validate_features) {
2029 return k->validate_features(vdev);
2030 } else {
2031 return 0;
2032 }
2033}
2034
2035int virtio_set_status(VirtIODevice *vdev, uint8_t val)
4e1837f8 2036{
181103cd 2037 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
4e1837f8
SH
2038 trace_virtio_set_status(vdev, val);
2039
95129d6f 2040 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
0b352fd6
CH
2041 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
2042 val & VIRTIO_CONFIG_S_FEATURES_OK) {
2043 int ret = virtio_validate_features(vdev);
2044
2045 if (ret) {
2046 return ret;
2047 }
2048 }
2049 }
e57f2c31 2050
4c5cf37b
XY
2051 if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
2052 (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
2053 virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
2054 }
badaf79c 2055
181103cd
FK
2056 if (k->set_status) {
2057 k->set_status(vdev, val);
4e1837f8
SH
2058 }
2059 vdev->status = val;
badaf79c 2060
0b352fd6 2061 return 0;
4e1837f8
SH
2062}
2063
616a6552
GK
2064static enum virtio_device_endian virtio_default_endian(void)
2065{
2066 if (target_words_bigendian()) {
2067 return VIRTIO_DEVICE_ENDIAN_BIG;
2068 } else {
2069 return VIRTIO_DEVICE_ENDIAN_LITTLE;
2070 }
2071}
2072
2073static enum virtio_device_endian virtio_current_cpu_endian(void)
2074{
cdba7e2f 2075 if (cpu_virtio_is_big_endian(current_cpu)) {
616a6552
GK
2076 return VIRTIO_DEVICE_ENDIAN_BIG;
2077 } else {
2078 return VIRTIO_DEVICE_ENDIAN_LITTLE;
2079 }
2080}
2081
3b43302c
XZ
2082static void __virtio_queue_reset(VirtIODevice *vdev, uint32_t i)
2083{
2084 vdev->vq[i].vring.desc = 0;
2085 vdev->vq[i].vring.avail = 0;
2086 vdev->vq[i].vring.used = 0;
2087 vdev->vq[i].last_avail_idx = 0;
2088 vdev->vq[i].shadow_avail_idx = 0;
2089 vdev->vq[i].used_idx = 0;
2090 vdev->vq[i].last_avail_wrap_counter = true;
2091 vdev->vq[i].shadow_avail_wrap_counter = true;
2092 vdev->vq[i].used_wrap_counter = true;
2093 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2094 vdev->vq[i].signalled_used = 0;
2095 vdev->vq[i].signalled_used_valid = false;
2096 vdev->vq[i].notification = true;
2097 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2098 vdev->vq[i].inuse = 0;
2099 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2100}
2101
b3a8d6f4
XZ
2102void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
2103{
2104 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2105
2106 if (k->queue_reset) {
2107 k->queue_reset(vdev, queue_index);
2108 }
2109
2110 __virtio_queue_reset(vdev, queue_index);
2111}
2112
3c37f8b8
KX
2113void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
2114{
2115 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2116
b7c61789
MT
2117 /*
2118 * TODO: Seabios is currently out of spec and triggering this error.
2119 * So this needs to be fixed in Seabios, then this can
2120 * be re-enabled for new machine types only, and also after
2121 * being converted to LOG_GUEST_ERROR.
2122 *
3c37f8b8 2123 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
9b4b4e51 2124 error_report("queue_enable is only supported in devices of virtio "
3c37f8b8
KX
2125 "1.0 or later.");
2126 }
b7c61789 2127 */
3c37f8b8
KX
2128
2129 if (k->queue_enable) {
2130 k->queue_enable(vdev, queue_index);
2131 }
2132}
2133
53c25cea 2134void virtio_reset(void *opaque)
967f97fa
AL
2135{
2136 VirtIODevice *vdev = opaque;
181103cd 2137 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
2138 int i;
2139
e0c472d8 2140 virtio_set_status(vdev, 0);
616a6552
GK
2141 if (current_cpu) {
2142 /* Guest initiated reset */
2143 vdev->device_endian = virtio_current_cpu_endian();
2144 } else {
2145 /* System reset */
2146 vdev->device_endian = virtio_default_endian();
2147 }
e0c472d8 2148
95e1019a 2149 if (vdev->vhost_started && k->get_vhost) {
c0c4f147
SH
2150 vhost_reset_device(k->get_vhost(vdev));
2151 }
2152
181103cd
FK
2153 if (k->reset) {
2154 k->reset(vdev);
2155 }
967f97fa 2156
868a8f44 2157 vdev->start_on_kick = false;
badaf79c 2158 vdev->started = false;
f5ed3663 2159 vdev->broken = false;
704a76fc 2160 vdev->guest_features = 0;
967f97fa
AL
2161 vdev->queue_sel = 0;
2162 vdev->status = 0;
9d7bd082 2163 vdev->disabled = false;
d73415a3 2164 qatomic_set(&vdev->isr, 0);
7055e687
MT
2165 vdev->config_vector = VIRTIO_NO_VECTOR;
2166 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa 2167
87b3bd1c 2168 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3b43302c 2169 __virtio_queue_reset(vdev, i);
967f97fa
AL
2170 }
2171}
2172
a8170e5e 2173void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
967f97fa 2174{
758ead31
PP
2175 if (!vdev->vq[n].vring.num) {
2176 return;
2177 }
ab223c95
CH
2178 vdev->vq[n].vring.desc = addr;
2179 virtio_queue_update_rings(vdev, n);
53c25cea
PB
2180}
2181
a8170e5e 2182hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
53c25cea 2183{
ab223c95
CH
2184 return vdev->vq[n].vring.desc;
2185}
2186
2187void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2188 hwaddr avail, hwaddr used)
2189{
758ead31
PP
2190 if (!vdev->vq[n].vring.num) {
2191 return;
2192 }
ab223c95
CH
2193 vdev->vq[n].vring.desc = desc;
2194 vdev->vq[n].vring.avail = avail;
2195 vdev->vq[n].vring.used = used;
c611c764 2196 virtio_init_region_cache(vdev, n);
53c25cea
PB
2197}
2198
e63c0ba1
PM
2199void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2200{
f6049f44
PM
2201 /* Don't allow guest to flip queue between existent and
2202 * nonexistent states, or to set it to an invalid size.
2203 */
2204 if (!!num != !!vdev->vq[n].vring.num ||
2205 num > VIRTQUEUE_MAX_SIZE ||
2206 num < 0) {
2207 return;
e63c0ba1 2208 }
f6049f44 2209 vdev->vq[n].vring.num = num;
e63c0ba1
PM
2210}
2211
e0d686bf
JW
2212VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2213{
2214 return QLIST_FIRST(&vdev->vector_queues[vector]);
2215}
2216
2217VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2218{
2219 return QLIST_NEXT(vq, node);
2220}
2221
53c25cea
PB
2222int virtio_queue_get_num(VirtIODevice *vdev, int n)
2223{
2224 return vdev->vq[n].vring.num;
2225}
967f97fa 2226
8c797e75
MT
2227int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2228{
2229 return vdev->vq[n].vring.num_default;
2230}
2231
8ad176aa
JW
2232int virtio_get_num_queues(VirtIODevice *vdev)
2233{
2234 int i;
2235
87b3bd1c 2236 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
8ad176aa
JW
2237 if (!virtio_queue_get_num(vdev, i)) {
2238 break;
2239 }
2240 }
2241
2242 return i;
2243}
2244
6ce69d1c
PM
2245void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2246{
2247 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2248 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2249
ab223c95 2250 /* virtio-1 compliant devices cannot change the alignment */
95129d6f 2251 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
ab223c95
CH
2252 error_report("tried to modify queue alignment for virtio-1 device");
2253 return;
2254 }
6ce69d1c
PM
2255 /* Check that the transport told us it was going to do this
2256 * (so a buggy transport will immediately assert rather than
2257 * silently failing to migrate this state)
2258 */
2259 assert(k->has_variable_vring_alignment);
2260
758ead31
PP
2261 if (align) {
2262 vdev->vq[n].vring.align = align;
2263 virtio_queue_update_rings(vdev, n);
2264 }
6ce69d1c
PM
2265}
2266
2b2cbcad 2267static void virtio_queue_notify_vq(VirtQueue *vq)
25db9ebe 2268{
9e0f5b81 2269 if (vq->vring.desc && vq->handle_output) {
25db9ebe 2270 VirtIODevice *vdev = vq->vdev;
9e0f5b81 2271
f5ed3663
SH
2272 if (unlikely(vdev->broken)) {
2273 return;
2274 }
2275
25db9ebe
SH
2276 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2277 vq->handle_output(vdev, vq);
badaf79c
XY
2278
2279 if (unlikely(vdev->start_on_kick)) {
e57f2c31 2280 virtio_set_started(vdev, true);
badaf79c 2281 }
25db9ebe
SH
2282 }
2283}
2284
53c25cea
PB
2285void virtio_queue_notify(VirtIODevice *vdev, int n)
2286{
e49a6618
PB
2287 VirtQueue *vq = &vdev->vq[n];
2288
2289 if (unlikely(!vq->vring.desc || vdev->broken)) {
2290 return;
2291 }
2292
2293 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
fcccb271 2294 if (vq->host_notifier_enabled) {
e49a6618
PB
2295 event_notifier_set(&vq->host_notifier);
2296 } else if (vq->handle_output) {
2297 vq->handle_output(vdev, vq);
badaf79c 2298
8b04e2c7
XY
2299 if (unlikely(vdev->start_on_kick)) {
2300 virtio_set_started(vdev, true);
2301 }
badaf79c 2302 }
967f97fa
AL
2303}
2304
7055e687
MT
2305uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2306{
87b3bd1c 2307 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
7055e687
MT
2308 VIRTIO_NO_VECTOR;
2309}
2310
2311void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2312{
e0d686bf
JW
2313 VirtQueue *vq = &vdev->vq[n];
2314
87b3bd1c 2315 if (n < VIRTIO_QUEUE_MAX) {
e0d686bf
JW
2316 if (vdev->vector_queues &&
2317 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2318 QLIST_REMOVE(vq, node);
2319 }
7055e687 2320 vdev->vq[n].vector = vector;
e0d686bf
JW
2321 if (vdev->vector_queues &&
2322 vector != VIRTIO_NO_VECTOR) {
2323 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2324 }
2325 }
7055e687
MT
2326}
2327
f1ac6a55
PB
2328VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2329 VirtIOHandleOutput handle_output)
967f97fa
AL
2330{
2331 int i;
2332
87b3bd1c 2333 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
2334 if (vdev->vq[i].vring.num == 0)
2335 break;
2336 }
2337
87b3bd1c 2338 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
967f97fa
AL
2339 abort();
2340
2341 vdev->vq[i].vring.num = queue_size;
46c5d082 2342 vdev->vq[i].vring.num_default = queue_size;
6ce69d1c 2343 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
967f97fa 2344 vdev->vq[i].handle_output = handle_output;
b21e2380 2345 vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size);
967f97fa
AL
2346
2347 return &vdev->vq[i];
2348}
2349
722f8c51
MT
2350void virtio_delete_queue(VirtQueue *vq)
2351{
2352 vq->vring.num = 0;
2353 vq->vring.num_default = 0;
2354 vq->handle_output = NULL;
722f8c51 2355 g_free(vq->used_elems);
8cd353ea 2356 vq->used_elems = NULL;
421afd2f 2357 virtio_virtqueue_reset_region_cache(vq);
722f8c51
MT
2358}
2359
f23fd811
JW
2360void virtio_del_queue(VirtIODevice *vdev, int n)
2361{
87b3bd1c 2362 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
f23fd811
JW
2363 abort();
2364 }
2365
722f8c51 2366 virtio_delete_queue(&vdev->vq[n]);
f23fd811
JW
2367}
2368
0687c37c
PB
2369static void virtio_set_isr(VirtIODevice *vdev, int value)
2370{
d73415a3 2371 uint8_t old = qatomic_read(&vdev->isr);
0687c37c
PB
2372
2373 /* Do not write ISR if it does not change, so that its cacheline remains
2374 * shared in the common case where the guest does not read it.
2375 */
2376 if ((old & value) != value) {
d73415a3 2377 qatomic_or(&vdev->isr, value);
0687c37c
PB
2378 }
2379}
2380
4c6dd9a0 2381/* Called within rcu_read_lock(). */
683f7665 2382static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
bcbabae8
MT
2383{
2384 uint16_t old, new;
2385 bool v;
a281ebc1
MT
2386 /* We need to expose used array entries before checking used event. */
2387 smp_mb();
97b83deb 2388 /* Always notify when queue is empty (when feature acknowledge) */
95129d6f 2389 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
be1fea9b 2390 !vq->inuse && virtio_queue_empty(vq)) {
bcbabae8
MT
2391 return true;
2392 }
2393
95129d6f 2394 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
bcbabae8
MT
2395 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2396 }
2397
2398 v = vq->signalled_used_valid;
2399 vq->signalled_used_valid = true;
2400 old = vq->signalled_used;
b796fcd1 2401 new = vq->signalled_used = vq->used_idx;
e9600c6c 2402 return !v || vring_need_event(vring_get_used_event(vq), new, old);
bcbabae8
MT
2403}
2404
683f7665
JW
2405static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2406 uint16_t off_wrap, uint16_t new,
2407 uint16_t old)
2408{
2409 int off = off_wrap & ~(1 << 15);
2410
2411 if (wrap != off_wrap >> 15) {
2412 off -= vq->vring.num;
2413 }
2414
2415 return vring_need_event(off, new, old);
2416}
2417
4c6dd9a0 2418/* Called within rcu_read_lock(). */
683f7665
JW
2419static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2420{
2421 VRingPackedDescEvent e;
2422 uint16_t old, new;
2423 bool v;
2424 VRingMemoryRegionCaches *caches;
2425
2426 caches = vring_get_region_caches(vq);
abdd16f4
SH
2427 if (!caches) {
2428 return false;
2429 }
2430
683f7665
JW
2431 vring_packed_event_read(vdev, &caches->avail, &e);
2432
2433 old = vq->signalled_used;
2434 new = vq->signalled_used = vq->used_idx;
2435 v = vq->signalled_used_valid;
2436 vq->signalled_used_valid = true;
2437
2438 if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2439 return false;
2440 } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2441 return true;
2442 }
2443
2444 return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2445 e.off_wrap, new, old);
2446}
2447
2448/* Called within rcu_read_lock(). */
2449static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2450{
2451 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2452 return virtio_packed_should_notify(vdev, vq);
2453 } else {
2454 return virtio_split_should_notify(vdev, vq);
2455 }
2456}
2457
84d61e5f
SH
2458/* Batch irqs while inside a defer_call_begin()/defer_call_end() section */
2459static void virtio_notify_irqfd_deferred_fn(void *opaque)
2460{
2461 EventNotifier *notifier = opaque;
2462 VirtQueue *vq = container_of(notifier, VirtQueue, guest_notifier);
2463
2464 trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq);
2465 event_notifier_set(notifier);
2466}
2467
83d768b5
PB
2468void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2469{
b5f53d04
DDAG
2470 WITH_RCU_READ_LOCK_GUARD() {
2471 if (!virtio_should_notify(vdev, vq)) {
2472 return;
2473 }
83d768b5
PB
2474 }
2475
2476 trace_virtio_notify_irqfd(vdev, vq);
2477
2478 /*
2479 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2480 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2481 * incorrectly polling this bit during crashdump and hibernation
2482 * in MSI mode, causing a hang if this bit is never updated.
2483 * Recent releases of Windows do not really shut down, but rather
2484 * log out and hibernate to make the next startup faster. Hence,
2485 * this manifested as a more serious hang during shutdown with
2486 *
2487 * Next driver release from 2016 fixed this problem, so working around it
2488 * is not a must, but it's easy to do so let's do it here.
2489 *
2490 * Note: it's safe to update ISR from any thread as it was switched
2491 * to an atomic operation.
2492 */
2493 virtio_set_isr(vq->vdev, 0x1);
84d61e5f 2494 defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
83d768b5
PB
2495}
2496
b4b9862b
MT
2497static void virtio_irq(VirtQueue *vq)
2498{
2499 virtio_set_isr(vq->vdev, 0x1);
2500 virtio_notify_vector(vq->vdev, vq->vector);
2501}
2502
bcbabae8
MT
2503void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
2504{
b5f53d04
DDAG
2505 WITH_RCU_READ_LOCK_GUARD() {
2506 if (!virtio_should_notify(vdev, vq)) {
2507 return;
2508 }
bcbabae8 2509 }
967f97fa 2510
64979a4d 2511 trace_virtio_notify(vdev, vq);
b4b9862b 2512 virtio_irq(vq);
967f97fa
AL
2513}
2514
2515void virtio_notify_config(VirtIODevice *vdev)
2516{
7625162c
AL
2517 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2518 return;
2519
0687c37c 2520 virtio_set_isr(vdev, 0x3);
b8f05908 2521 vdev->generation++;
7055e687 2522 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
2523}
2524
616a6552
GK
2525static bool virtio_device_endian_needed(void *opaque)
2526{
2527 VirtIODevice *vdev = opaque;
2528
2529 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
95129d6f 2530 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3c185597
CH
2531 return vdev->device_endian != virtio_default_endian();
2532 }
2533 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2534 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
616a6552
GK
2535}
2536
019a3edb
GH
2537static bool virtio_64bit_features_needed(void *opaque)
2538{
2539 VirtIODevice *vdev = opaque;
2540
2541 return (vdev->host_features >> 32) != 0;
2542}
2543
74aae7b2
JW
2544static bool virtio_virtqueue_needed(void *opaque)
2545{
2546 VirtIODevice *vdev = opaque;
2547
2548 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
2549}
2550
86044b24
JW
2551static bool virtio_packed_virtqueue_needed(void *opaque)
2552{
2553 VirtIODevice *vdev = opaque;
2554
2555 return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
2556}
2557
46c5d082
CH
2558static bool virtio_ringsize_needed(void *opaque)
2559{
2560 VirtIODevice *vdev = opaque;
2561 int i;
2562
2563 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2564 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2565 return true;
2566 }
2567 }
2568 return false;
2569}
2570
a6df8adf
JW
2571static bool virtio_extra_state_needed(void *opaque)
2572{
2573 VirtIODevice *vdev = opaque;
2574 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2575 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2576
2577 return k->has_extra_state &&
2578 k->has_extra_state(qbus->parent);
2579}
2580
791b1daf
SH
2581static bool virtio_broken_needed(void *opaque)
2582{
2583 VirtIODevice *vdev = opaque;
2584
2585 return vdev->broken;
2586}
2587
badaf79c
XY
2588static bool virtio_started_needed(void *opaque)
2589{
2590 VirtIODevice *vdev = opaque;
2591
2592 return vdev->started;
2593}
2594
9d7bd082
MR
2595static bool virtio_disabled_needed(void *opaque)
2596{
2597 VirtIODevice *vdev = opaque;
2598
2599 return vdev->disabled;
2600}
2601
50e5ae4d 2602static const VMStateDescription vmstate_virtqueue = {
74aae7b2 2603 .name = "virtqueue_state",
50e5ae4d
DDAG
2604 .version_id = 1,
2605 .minimum_version_id = 1,
ca02a170 2606 .fields = (const VMStateField[]) {
50e5ae4d
DDAG
2607 VMSTATE_UINT64(vring.avail, struct VirtQueue),
2608 VMSTATE_UINT64(vring.used, struct VirtQueue),
2609 VMSTATE_END_OF_LIST()
2610 }
74aae7b2
JW
2611};
2612
86044b24
JW
2613static const VMStateDescription vmstate_packed_virtqueue = {
2614 .name = "packed_virtqueue_state",
2615 .version_id = 1,
2616 .minimum_version_id = 1,
ca02a170 2617 .fields = (const VMStateField[]) {
86044b24
JW
2618 VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
2619 VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
2620 VMSTATE_UINT16(used_idx, struct VirtQueue),
2621 VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
2622 VMSTATE_UINT32(inuse, struct VirtQueue),
2623 VMSTATE_END_OF_LIST()
2624 }
2625};
2626
74aae7b2
JW
2627static const VMStateDescription vmstate_virtio_virtqueues = {
2628 .name = "virtio/virtqueues",
2629 .version_id = 1,
2630 .minimum_version_id = 1,
2631 .needed = &virtio_virtqueue_needed,
ca02a170 2632 .fields = (const VMStateField[]) {
3e996cc5
DDAG
2633 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2634 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
74aae7b2
JW
2635 VMSTATE_END_OF_LIST()
2636 }
2637};
2638
86044b24
JW
2639static const VMStateDescription vmstate_virtio_packed_virtqueues = {
2640 .name = "virtio/packed_virtqueues",
2641 .version_id = 1,
2642 .minimum_version_id = 1,
2643 .needed = &virtio_packed_virtqueue_needed,
ca02a170 2644 .fields = (const VMStateField[]) {
86044b24
JW
2645 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2646 VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
2647 VMSTATE_END_OF_LIST()
2648 }
2649};
2650
50e5ae4d 2651static const VMStateDescription vmstate_ringsize = {
46c5d082 2652 .name = "ringsize_state",
50e5ae4d
DDAG
2653 .version_id = 1,
2654 .minimum_version_id = 1,
ca02a170 2655 .fields = (const VMStateField[]) {
50e5ae4d
DDAG
2656 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
2657 VMSTATE_END_OF_LIST()
2658 }
46c5d082
CH
2659};
2660
2661static const VMStateDescription vmstate_virtio_ringsize = {
2662 .name = "virtio/ringsize",
2663 .version_id = 1,
2664 .minimum_version_id = 1,
2665 .needed = &virtio_ringsize_needed,
ca02a170 2666 .fields = (const VMStateField[]) {
3e996cc5
DDAG
2667 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2668 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
46c5d082
CH
2669 VMSTATE_END_OF_LIST()
2670 }
2671};
2672
2c21ee76 2673static int get_extra_state(QEMUFile *f, void *pv, size_t size,
03fee66f 2674 const VMStateField *field)
a6df8adf
JW
2675{
2676 VirtIODevice *vdev = pv;
2677 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2678 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2679
2680 if (!k->load_extra_state) {
2681 return -1;
2682 } else {
2683 return k->load_extra_state(qbus->parent, f);
2684 }
2685}
2686
2c21ee76 2687static int put_extra_state(QEMUFile *f, void *pv, size_t size,
3ddba9a9 2688 const VMStateField *field, JSONWriter *vmdesc)
a6df8adf
JW
2689{
2690 VirtIODevice *vdev = pv;
2691 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2692 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2693
2694 k->save_extra_state(qbus->parent, f);
2c21ee76 2695 return 0;
a6df8adf
JW
2696}
2697
2698static const VMStateInfo vmstate_info_extra_state = {
2699 .name = "virtqueue_extra_state",
2700 .get = get_extra_state,
2701 .put = put_extra_state,
2702};
2703
2704static const VMStateDescription vmstate_virtio_extra_state = {
2705 .name = "virtio/extra_state",
2706 .version_id = 1,
2707 .minimum_version_id = 1,
2708 .needed = &virtio_extra_state_needed,
ca02a170 2709 .fields = (const VMStateField[]) {
a6df8adf
JW
2710 {
2711 .name = "extra_state",
2712 .version_id = 0,
2713 .field_exists = NULL,
2714 .size = 0,
2715 .info = &vmstate_info_extra_state,
2716 .flags = VMS_SINGLE,
2717 .offset = 0,
2718 },
2719 VMSTATE_END_OF_LIST()
2720 }
2721};
2722
616a6552
GK
2723static const VMStateDescription vmstate_virtio_device_endian = {
2724 .name = "virtio/device_endian",
2725 .version_id = 1,
2726 .minimum_version_id = 1,
5cd8cada 2727 .needed = &virtio_device_endian_needed,
ca02a170 2728 .fields = (const VMStateField[]) {
616a6552
GK
2729 VMSTATE_UINT8(device_endian, VirtIODevice),
2730 VMSTATE_END_OF_LIST()
2731 }
2732};
2733
019a3edb
GH
2734static const VMStateDescription vmstate_virtio_64bit_features = {
2735 .name = "virtio/64bit_features",
2736 .version_id = 1,
2737 .minimum_version_id = 1,
5cd8cada 2738 .needed = &virtio_64bit_features_needed,
ca02a170 2739 .fields = (const VMStateField[]) {
019a3edb
GH
2740 VMSTATE_UINT64(guest_features, VirtIODevice),
2741 VMSTATE_END_OF_LIST()
2742 }
2743};
2744
791b1daf
SH
2745static const VMStateDescription vmstate_virtio_broken = {
2746 .name = "virtio/broken",
2747 .version_id = 1,
2748 .minimum_version_id = 1,
2749 .needed = &virtio_broken_needed,
ca02a170 2750 .fields = (const VMStateField[]) {
791b1daf
SH
2751 VMSTATE_BOOL(broken, VirtIODevice),
2752 VMSTATE_END_OF_LIST()
2753 }
2754};
2755
badaf79c
XY
2756static const VMStateDescription vmstate_virtio_started = {
2757 .name = "virtio/started",
2758 .version_id = 1,
2759 .minimum_version_id = 1,
2760 .needed = &virtio_started_needed,
ca02a170 2761 .fields = (const VMStateField[]) {
badaf79c
XY
2762 VMSTATE_BOOL(started, VirtIODevice),
2763 VMSTATE_END_OF_LIST()
2764 }
2765};
2766
9d7bd082
MR
2767static const VMStateDescription vmstate_virtio_disabled = {
2768 .name = "virtio/disabled",
2769 .version_id = 1,
2770 .minimum_version_id = 1,
2771 .needed = &virtio_disabled_needed,
ca02a170 2772 .fields = (const VMStateField[]) {
9d7bd082
MR
2773 VMSTATE_BOOL(disabled, VirtIODevice),
2774 VMSTATE_END_OF_LIST()
2775 }
2776};
2777
6b321a3d
GK
2778static const VMStateDescription vmstate_virtio = {
2779 .name = "virtio",
2780 .version_id = 1,
2781 .minimum_version_id = 1,
ca02a170 2782 .fields = (const VMStateField[]) {
6b321a3d 2783 VMSTATE_END_OF_LIST()
616a6552 2784 },
ca02a170 2785 .subsections = (const VMStateDescription * const []) {
5cd8cada
JQ
2786 &vmstate_virtio_device_endian,
2787 &vmstate_virtio_64bit_features,
74aae7b2 2788 &vmstate_virtio_virtqueues,
46c5d082 2789 &vmstate_virtio_ringsize,
791b1daf 2790 &vmstate_virtio_broken,
a6df8adf 2791 &vmstate_virtio_extra_state,
badaf79c 2792 &vmstate_virtio_started,
86044b24 2793 &vmstate_virtio_packed_virtqueues,
9d7bd082 2794 &vmstate_virtio_disabled,
5cd8cada 2795 NULL
6b321a3d
GK
2796 }
2797};
2798
2f168d07 2799int virtio_save(VirtIODevice *vdev, QEMUFile *f)
967f97fa 2800{
1c819449
FK
2801 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2802 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1b5fc0de 2803 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
019a3edb 2804 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
967f97fa
AL
2805 int i;
2806
1c819449
FK
2807 if (k->save_config) {
2808 k->save_config(qbus->parent, f);
2809 }
967f97fa 2810
967f97fa
AL
2811 qemu_put_8s(f, &vdev->status);
2812 qemu_put_8s(f, &vdev->isr);
2813 qemu_put_be16s(f, &vdev->queue_sel);
019a3edb 2814 qemu_put_be32s(f, &guest_features_lo);
967f97fa
AL
2815 qemu_put_be32(f, vdev->config_len);
2816 qemu_put_buffer(f, vdev->config, vdev->config_len);
2817
87b3bd1c 2818 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
2819 if (vdev->vq[i].vring.num == 0)
2820 break;
2821 }
2822
2823 qemu_put_be32(f, i);
2824
87b3bd1c 2825 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
2826 if (vdev->vq[i].vring.num == 0)
2827 break;
2828
2829 qemu_put_be32(f, vdev->vq[i].vring.num);
6ce69d1c
PM
2830 if (k->has_variable_vring_alignment) {
2831 qemu_put_be32(f, vdev->vq[i].vring.align);
2832 }
874adf45
SH
2833 /*
2834 * Save desc now, the rest of the ring addresses are saved in
2835 * subsections for VIRTIO-1 devices.
2836 */
ab223c95 2837 qemu_put_be64(f, vdev->vq[i].vring.desc);
967f97fa 2838 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1c819449
FK
2839 if (k->save_queue) {
2840 k->save_queue(qbus->parent, i, f);
2841 }
967f97fa 2842 }
1b5fc0de
GK
2843
2844 if (vdc->save != NULL) {
2845 vdc->save(vdev, f);
2846 }
6b321a3d 2847
ea43e259 2848 if (vdc->vmsd) {
2f168d07
DDAG
2849 int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
2850 if (ret) {
2851 return ret;
2852 }
ea43e259
DDAG
2853 }
2854
6b321a3d 2855 /* Subsections */
2f168d07 2856 return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
967f97fa
AL
2857}
2858
1a665855 2859/* A wrapper for use as a VMState .put function */
2c21ee76 2860static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
3ddba9a9 2861 const VMStateField *field, JSONWriter *vmdesc)
1a665855 2862{
2f168d07 2863 return virtio_save(VIRTIO_DEVICE(opaque), f);
1a665855
HP
2864}
2865
2866/* A wrapper for use as a VMState .get function */
92e2e6a8
KW
2867static int coroutine_mixed_fn
2868virtio_device_get(QEMUFile *f, void *opaque, size_t size,
2869 const VMStateField *field)
1a665855
HP
2870{
2871 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
2872 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
2873
2874 return virtio_load(vdev, f, dc->vmsd->version_id);
2875}
2876
2877const VMStateInfo virtio_vmstate_info = {
2878 .name = "virtio",
2879 .get = virtio_device_get,
2880 .put = virtio_device_put,
2881};
2882
6c0196d7 2883static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
ad0c9332 2884{
181103cd 2885 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
6b8f1020 2886 bool bad = (val & ~(vdev->host_features)) != 0;
ad0c9332 2887
6b8f1020 2888 val &= vdev->host_features;
181103cd
FK
2889 if (k->set_features) {
2890 k->set_features(vdev, val);
ad0c9332
PB
2891 }
2892 vdev->guest_features = val;
2893 return bad ? -1 : 0;
2894}
2895
92e2e6a8
KW
2896typedef struct VirtioSetFeaturesNocheckData {
2897 Coroutine *co;
2898 VirtIODevice *vdev;
2899 uint64_t val;
2900 int ret;
2901} VirtioSetFeaturesNocheckData;
2902
2903static void virtio_set_features_nocheck_bh(void *opaque)
2904{
2905 VirtioSetFeaturesNocheckData *data = opaque;
2906
2907 data->ret = virtio_set_features_nocheck(data->vdev, data->val);
2908 aio_co_wake(data->co);
2909}
2910
2911static int coroutine_mixed_fn
2912virtio_set_features_nocheck_maybe_co(VirtIODevice *vdev, uint64_t val)
2913{
2914 if (qemu_in_coroutine()) {
2915 VirtioSetFeaturesNocheckData data = {
2916 .co = qemu_coroutine_self(),
2917 .vdev = vdev,
2918 .val = val,
2919 };
2920 aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
2921 virtio_set_features_nocheck_bh, &data);
2922 qemu_coroutine_yield();
2923 return data.ret;
2924 } else {
2925 return virtio_set_features_nocheck(vdev, val);
2926 }
2927}
2928
6c0196d7
CH
2929int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2930{
db812c40
PB
2931 int ret;
2932 /*
6c0196d7
CH
2933 * The driver must not attempt to set features after feature negotiation
2934 * has finished.
2935 */
2936 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2937 return -EINVAL;
2938 }
8d11c10d
AB
2939
2940 if (val & (1ull << VIRTIO_F_BAD_FEATURE)) {
2941 qemu_log_mask(LOG_GUEST_ERROR,
2942 "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n",
2943 __func__, vdev->name);
2944 }
2945
db812c40 2946 ret = virtio_set_features_nocheck(vdev, val);
2d69eba5
LQ
2947 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2948 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
2949 int i;
2950 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2951 if (vdev->vq[i].vring.num != 0) {
2952 virtio_init_region_cache(vdev, i);
db812c40
PB
2953 }
2954 }
2d69eba5
LQ
2955 }
2956 if (!ret) {
868a8f44
XY
2957 if (!virtio_device_started(vdev, vdev->status) &&
2958 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2959 vdev->start_on_kick = true;
2960 }
db812c40
PB
2961 }
2962 return ret;
6c0196d7
CH
2963}
2964
d74c30c8
DT
2965size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
2966 uint64_t host_features)
ba550851 2967{
d74c30c8
DT
2968 size_t config_size = params->min_size;
2969 const VirtIOFeature *feature_sizes = params->feature_sizes;
2970 size_t i;
ba550851
SG
2971
2972 for (i = 0; feature_sizes[i].flags != 0; i++) {
2973 if (host_features & feature_sizes[i].flags) {
2974 config_size = MAX(feature_sizes[i].end, config_size);
2975 }
2976 }
2977
d74c30c8 2978 assert(config_size <= params->max_size);
ba550851
SG
2979 return config_size;
2980}
2981
92e2e6a8
KW
2982int coroutine_mixed_fn
2983virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
967f97fa 2984{
cc459952 2985 int i, ret;
a890a2f9 2986 int32_t config_len;
cc459952 2987 uint32_t num;
6d74ca5a 2988 uint32_t features;
1c819449
FK
2989 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2990 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1b5fc0de 2991 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa 2992
616a6552
GK
2993 /*
2994 * We poison the endianness to ensure it does not get used before
2995 * subsections have been loaded.
2996 */
2997 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
2998
1c819449
FK
2999 if (k->load_config) {
3000 ret = k->load_config(qbus->parent, f);
ff24bd58
MT
3001 if (ret)
3002 return ret;
3003 }
967f97fa 3004
967f97fa
AL
3005 qemu_get_8s(f, &vdev->status);
3006 qemu_get_8s(f, &vdev->isr);
3007 qemu_get_be16s(f, &vdev->queue_sel);
87b3bd1c 3008 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
4b53c2c7
MR
3009 return -1;
3010 }
6d74ca5a 3011 qemu_get_be32s(f, &features);
ad0c9332 3012
62cee1a2
MT
3013 /*
3014 * Temporarily set guest_features low bits - needed by
3015 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3016 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3017 *
3018 * Note: devices should always test host features in future - don't create
3019 * new dependencies like this.
3020 */
3021 vdev->guest_features = features;
3022
a890a2f9 3023 config_len = qemu_get_be32(f);
2f5732e9
DDAG
3024
3025 /*
3026 * There are cases where the incoming config can be bigger or smaller
3027 * than what we have; so load what we have space for, and skip
3028 * any excess that's in the stream.
3029 */
3030 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3031
3032 while (config_len > vdev->config_len) {
3033 qemu_get_byte(f);
3034 config_len--;
a890a2f9 3035 }
967f97fa
AL
3036
3037 num = qemu_get_be32(f);
3038
87b3bd1c 3039 if (num > VIRTIO_QUEUE_MAX) {
8a1be662 3040 error_report("Invalid number of virtqueues: 0x%x", num);
cc459952
MT
3041 return -1;
3042 }
3043
967f97fa
AL
3044 for (i = 0; i < num; i++) {
3045 vdev->vq[i].vring.num = qemu_get_be32(f);
6ce69d1c
PM
3046 if (k->has_variable_vring_alignment) {
3047 vdev->vq[i].vring.align = qemu_get_be32(f);
3048 }
ab223c95 3049 vdev->vq[i].vring.desc = qemu_get_be64(f);
967f97fa 3050 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
bcbabae8 3051 vdev->vq[i].signalled_used_valid = false;
332fa82d 3052 vdev->vq[i].notification = true;
967f97fa 3053
874adf45 3054 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
1abeb5a6 3055 error_report("VQ %d address 0x0 "
6daf194d 3056 "inconsistent with Host index 0x%x",
1abeb5a6 3057 i, vdev->vq[i].last_avail_idx);
874adf45 3058 return -1;
8275e2f6 3059 }
1c819449
FK
3060 if (k->load_queue) {
3061 ret = k->load_queue(qbus->parent, i, f);
ff24bd58
MT
3062 if (ret)
3063 return ret;
7055e687 3064 }
967f97fa
AL
3065 }
3066
7055e687 3067 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1b5fc0de
GK
3068
3069 if (vdc->load != NULL) {
6b321a3d
GK
3070 ret = vdc->load(vdev, f, version_id);
3071 if (ret) {
3072 return ret;
3073 }
1b5fc0de
GK
3074 }
3075
ea43e259
DDAG
3076 if (vdc->vmsd) {
3077 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3078 if (ret) {
3079 return ret;
3080 }
3081 }
3082
616a6552
GK
3083 /* Subsections */
3084 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3085 if (ret) {
3086 return ret;
3087 }
3088
3089 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3090 vdev->device_endian = virtio_default_endian();
3091 }
3092
019a3edb
GH
3093 if (virtio_64bit_features_needed(vdev)) {
3094 /*
3095 * Subsection load filled vdev->guest_features. Run them
3096 * through virtio_set_features to sanity-check them against
3097 * host_features.
3098 */
3099 uint64_t features64 = vdev->guest_features;
92e2e6a8 3100 if (virtio_set_features_nocheck_maybe_co(vdev, features64) < 0) {
019a3edb
GH
3101 error_report("Features 0x%" PRIx64 " unsupported. "
3102 "Allowed features: 0x%" PRIx64,
3103 features64, vdev->host_features);
3104 return -1;
3105 }
3106 } else {
92e2e6a8 3107 if (virtio_set_features_nocheck_maybe_co(vdev, features) < 0) {
019a3edb
GH
3108 error_report("Features 0x%x unsupported. "
3109 "Allowed features: 0x%" PRIx64,
3110 features, vdev->host_features);
3111 return -1;
3112 }
3113 }
3114
868a8f44
XY
3115 if (!virtio_device_started(vdev, vdev->status) &&
3116 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3117 vdev->start_on_kick = true;
3118 }
3119
b5f53d04 3120 RCU_READ_LOCK_GUARD();
616a6552 3121 for (i = 0; i < num; i++) {
ab223c95 3122 if (vdev->vq[i].vring.desc) {
616a6552 3123 uint16_t nheads;
874adf45
SH
3124
3125 /*
3126 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3127 * only the region cache needs to be set up. Legacy devices need
3128 * to calculate used and avail ring addresses based on the desc
3129 * address.
3130 */
3131 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3132 virtio_init_region_cache(vdev, i);
3133 } else {
3134 virtio_queue_update_rings(vdev, i);
3135 }
3136
86044b24
JW
3137 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3138 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3139 vdev->vq[i].shadow_avail_wrap_counter =
3140 vdev->vq[i].last_avail_wrap_counter;
3141 continue;
3142 }
3143
616a6552
GK
3144 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3145 /* Check it isn't doing strange things with descriptor numbers. */
3146 if (nheads > vdev->vq[i].vring.num) {
4aedda25
JL
3147 virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
3148 "inconsistent with Host index 0x%x: delta 0x%x",
3149 i, vdev->vq[i].vring.num,
3150 vring_avail_idx(&vdev->vq[i]),
3151 vdev->vq[i].last_avail_idx, nheads);
3152 vdev->vq[i].used_idx = 0;
3153 vdev->vq[i].shadow_avail_idx = 0;
3154 vdev->vq[i].inuse = 0;
3155 continue;
616a6552 3156 }
b796fcd1 3157 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
be1fea9b 3158 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
bccdef6b
SH
3159
3160 /*
3161 * Some devices migrate VirtQueueElements that have been popped
3162 * from the avail ring but not yet returned to the used ring.
e66bcc40
HP
3163 * Since max ring size < UINT16_MAX it's safe to use modulo
3164 * UINT16_MAX + 1 subtraction.
bccdef6b 3165 */
e66bcc40
HP
3166 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3167 vdev->vq[i].used_idx);
bccdef6b
SH
3168 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3169 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3170 "used_idx 0x%x",
3171 i, vdev->vq[i].vring.num,
3172 vdev->vq[i].last_avail_idx,
3173 vdev->vq[i].used_idx);
3174 return -1;
3175 }
616a6552
GK
3176 }
3177 }
3178
1dd71383
MT
3179 if (vdc->post_load) {
3180 ret = vdc->post_load(vdev);
3181 if (ret) {
3182 return ret;
3183 }
3184 }
3185
616a6552 3186 return 0;
967f97fa
AL
3187}
3188
6a1a8cc7 3189void virtio_cleanup(VirtIODevice *vdev)
b946a153 3190{
85cf2a8d 3191 qemu_del_vm_change_state_handler(vdev->vmstate);
8e05db92
FK
3192}
3193
538f0497 3194static void virtio_vmstate_change(void *opaque, bool running, RunState state)
85cf2a8d
MT
3195{
3196 VirtIODevice *vdev = opaque;
1c819449
FK
3197 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3198 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
e57f2c31 3199 bool backend_run = running && virtio_device_started(vdev, vdev->status);
9e8e8c48 3200 vdev->vm_running = running;
85cf2a8d
MT
3201
3202 if (backend_run) {
3203 virtio_set_status(vdev, vdev->status);
3204 }
3205
1c819449
FK
3206 if (k->vmstate_change) {
3207 k->vmstate_change(qbus->parent, backend_run);
85cf2a8d
MT
3208 }
3209
3210 if (!backend_run) {
3211 virtio_set_status(vdev, vdev->status);
3212 }
3213}
3214
c8075caf
GA
3215void virtio_instance_init_common(Object *proxy_obj, void *data,
3216 size_t vdev_size, const char *vdev_name)
3217{
3218 DeviceState *vdev = data;
3219
9fc7fc4d
MA
3220 object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3221 vdev_size, vdev_name, &error_abort,
3222 NULL);
c8075caf
GA
3223 qdev_alias_all_properties(vdev, proxy_obj);
3224}
3225
3857cd5c 3226void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size)
967f97fa 3227{
e0d686bf
JW
3228 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3229 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
b8193adb 3230 int i;
e0d686bf
JW
3231 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3232
3233 if (nvectors) {
3234 vdev->vector_queues =
3235 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3236 }
3237
868a8f44 3238 vdev->start_on_kick = false;
badaf79c 3239 vdev->started = false;
c255488d 3240 vdev->vhost_started = false;
53c25cea 3241 vdev->device_id = device_id;
967f97fa 3242 vdev->status = 0;
d73415a3 3243 qatomic_set(&vdev->isr, 0);
967f97fa 3244 vdev->queue_sel = 0;
7055e687 3245 vdev->config_vector = VIRTIO_NO_VECTOR;
b21e2380 3246 vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX);
1354869c 3247 vdev->vm_running = runstate_is_running();
f5ed3663 3248 vdev->broken = false;
87b3bd1c 3249 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
b8193adb 3250 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1cbdabe2 3251 vdev->vq[i].vdev = vdev;
e78a2b42 3252 vdev->vq[i].queue_index = i;
fcccb271 3253 vdev->vq[i].host_notifier_enabled = false;
1cbdabe2 3254 }
967f97fa 3255
3857cd5c 3256 vdev->name = virtio_id_to_name(device_id);
967f97fa 3257 vdev->config_len = config_size;
8e05db92 3258 if (vdev->config_len) {
7267c094 3259 vdev->config = g_malloc0(config_size);
8e05db92 3260 } else {
967f97fa 3261 vdev->config = NULL;
8e05db92 3262 }
1a8c091c
SH
3263 vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3264 virtio_vmstate_change, vdev);
616a6552 3265 vdev->device_endian = virtio_default_endian();
5669655a 3266 vdev->use_guest_notifier_mask = true;
8e05db92 3267}
967f97fa 3268
7c78bdd7
CH
3269/*
3270 * Only devices that have already been around prior to defining the virtio
3271 * standard support legacy mode; this includes devices not specified in the
3272 * standard. All newer devices conform to the virtio standard only.
3273 */
3274bool virtio_legacy_allowed(VirtIODevice *vdev)
3275{
3276 switch (vdev->device_id) {
3277 case VIRTIO_ID_NET:
3278 case VIRTIO_ID_BLOCK:
3279 case VIRTIO_ID_CONSOLE:
3280 case VIRTIO_ID_RNG:
3281 case VIRTIO_ID_BALLOON:
3282 case VIRTIO_ID_RPMSG:
3283 case VIRTIO_ID_SCSI:
3284 case VIRTIO_ID_9P:
3285 case VIRTIO_ID_RPROC_SERIAL:
3286 case VIRTIO_ID_CAIF:
3287 return true;
3288 default:
3289 return false;
3290 }
3291}
3292
d55f5182
SG
3293bool virtio_legacy_check_disabled(VirtIODevice *vdev)
3294{
3295 return vdev->disable_legacy_check;
3296}
3297
a8170e5e 3298hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
3299{
3300 return vdev->vq[n].vring.desc;
3301}
3302
0c9753eb
LV
3303bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
3304{
3305 return virtio_queue_get_desc_addr(vdev, n) != 0;
3306}
3307
23bfaf77
JW
3308bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3309{
b2a5f62a
JW
3310 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3311 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3312
3313 if (k->queue_enabled) {
3314 return k->queue_enabled(qbus->parent, n);
3315 }
0c9753eb 3316 return virtio_queue_enabled_legacy(vdev, n);
23bfaf77
JW
3317}
3318
a8170e5e 3319hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
3320{
3321 return vdev->vq[n].vring.avail;
3322}
3323
a8170e5e 3324hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
3325{
3326 return vdev->vq[n].vring.used;
3327}
3328
a8170e5e 3329hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
3330{
3331 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3332}
3333
a8170e5e 3334hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1cbdabe2 3335{
f90cda63
WX
3336 int s;
3337
86044b24
JW
3338 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3339 return sizeof(struct VRingPackedDescEvent);
3340 }
3341
f90cda63 3342 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1cbdabe2 3343 return offsetof(VRingAvail, ring) +
f90cda63 3344 sizeof(uint16_t) * vdev->vq[n].vring.num + s;
1cbdabe2
MT
3345}
3346
a8170e5e 3347hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1cbdabe2 3348{
f90cda63
WX
3349 int s;
3350
86044b24
JW
3351 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3352 return sizeof(struct VRingPackedDescEvent);
3353 }
3354
f90cda63 3355 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1cbdabe2 3356 return offsetof(VRingUsed, ring) +
f90cda63 3357 sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
1cbdabe2
MT
3358}
3359
86044b24
JW
3360static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3361 int n)
3362{
3363 unsigned int avail, used;
3364
3365 avail = vdev->vq[n].last_avail_idx;
3366 avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3367
3368 used = vdev->vq[n].used_idx;
3369 used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3370
3371 return avail | used << 16;
3372}
3373
3374static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3375 int n)
1cbdabe2
MT
3376{
3377 return vdev->vq[n].last_avail_idx;
3378}
3379
86044b24 3380unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
1cbdabe2 3381{
86044b24
JW
3382 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3383 return virtio_queue_packed_get_last_avail_idx(vdev, n);
3384 } else {
3385 return virtio_queue_split_get_last_avail_idx(vdev, n);
3386 }
1cbdabe2
MT
3387}
3388
86044b24
JW
3389static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3390 int n, unsigned int idx)
3391{
3392 struct VirtQueue *vq = &vdev->vq[n];
3393
3394 vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3395 vq->last_avail_wrap_counter =
3396 vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3397 idx >>= 16;
c92f4fca 3398 vq->used_idx = idx & 0x7fff;
86044b24
JW
3399 vq->used_wrap_counter = !!(idx & 0x8000);
3400}
3401
3402static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3403 int n, unsigned int idx)
3404{
3405 vdev->vq[n].last_avail_idx = idx;
3406 vdev->vq[n].shadow_avail_idx = idx;
3407}
3408
3409void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3410 unsigned int idx)
3411{
3412 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3413 virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3414 } else {
3415 virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3416 }
3417}
3418
3419static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3420 int n)
3421{
3422 /* We don't have a reference like avail idx in shared memory */
3423 return;
3424}
3425
3426static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3427 int n)
2d4ba6cc 3428{
b5f53d04 3429 RCU_READ_LOCK_GUARD();
2d4ba6cc
MC
3430 if (vdev->vq[n].vring.desc) {
3431 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3432 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3433 }
2d4ba6cc
MC
3434}
3435
86044b24
JW
3436void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3437{
3438 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3439 virtio_queue_packed_restore_last_avail_idx(vdev, n);
3440 } else {
3441 virtio_queue_split_restore_last_avail_idx(vdev, n);
3442 }
3443}
3444
3445static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3446{
3447 /* used idx was updated through set_last_avail_idx() */
3448 return;
3449}
3450
3451static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
312d3b35 3452{
b5f53d04 3453 RCU_READ_LOCK_GUARD();
ca0176ad
PB
3454 if (vdev->vq[n].vring.desc) {
3455 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3456 }
312d3b35
YB
3457}
3458
86044b24
JW
3459void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3460{
3461 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3462 return virtio_queue_packed_update_used_idx(vdev, n);
3463 } else {
3464 return virtio_split_packed_update_used_idx(vdev, n);
3465 }
3466}
3467
6793dfd1
SH
3468void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3469{
3470 vdev->vq[n].signalled_used_valid = false;
3471}
3472
1cbdabe2
MT
3473VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3474{
3475 return vdev->vq + n;
3476}
3477
e78a2b42
JW
3478uint16_t virtio_get_queue_index(VirtQueue *vq)
3479{
3480 return vq->queue_index;
3481}
3482
15b2bd18
PB
3483static void virtio_queue_guest_notifier_read(EventNotifier *n)
3484{
3485 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3486 if (event_notifier_test_and_clear(n)) {
b4b9862b 3487 virtio_irq(vq);
15b2bd18
PB
3488 }
3489}
7d847d0c
CL
3490static void virtio_config_guest_notifier_read(EventNotifier *n)
3491{
3492 VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
15b2bd18 3493
7d847d0c
CL
3494 if (event_notifier_test_and_clear(n)) {
3495 virtio_notify_config(vdev);
3496 }
3497}
15b2bd18
PB
3498void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3499 bool with_irqfd)
3500{
3501 if (assign && !with_irqfd) {
d6da1e9e 3502 event_notifier_set_handler(&vq->guest_notifier,
15b2bd18
PB
3503 virtio_queue_guest_notifier_read);
3504 } else {
d6da1e9e 3505 event_notifier_set_handler(&vq->guest_notifier, NULL);
15b2bd18
PB
3506 }
3507 if (!assign) {
3508 /* Test and clear notifier before closing it,
3509 * in case poll callback didn't have time to run. */
3510 virtio_queue_guest_notifier_read(&vq->guest_notifier);
3511 }
3512}
3513
7d847d0c
CL
3514void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev,
3515 bool assign, bool with_irqfd)
3516{
3517 EventNotifier *n;
3518 n = &vdev->config_notifier;
3519 if (assign && !with_irqfd) {
3520 event_notifier_set_handler(n, virtio_config_guest_notifier_read);
3521 } else {
3522 event_notifier_set_handler(n, NULL);
3523 }
3524 if (!assign) {
3525 /* Test and clear notifier before closing it,*/
3526 /* in case poll callback didn't have time to run. */
3527 virtio_config_guest_notifier_read(n);
3528 }
3529}
3530
1cbdabe2
MT
3531EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3532{
3533 return &vq->guest_notifier;
3534}
b1f416aa 3535
a7c8215e
SH
3536static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
3537{
3538 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3539
3540 virtio_queue_set_notification(vq, 0);
3541}
3542
0062ea0f
SH
3543static bool virtio_queue_host_notifier_aio_poll(void *opaque)
3544{
3545 EventNotifier *n = opaque;
3546 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3547
826cc324
SH
3548 return vq->vring.desc && !virtio_queue_empty(vq);
3549}
3550
3551static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
3552{
3553 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
0062ea0f 3554
d6fbfe2b 3555 virtio_queue_notify_vq(vq);
0062ea0f
SH
3556}
3557
a7c8215e
SH
3558static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
3559{
3560 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3561
3562 /* Caller polls once more after this to catch requests that race with us */
3563 virtio_queue_set_notification(vq, 1);
3564}
3565
db608fb7
SH
3566void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
3567{
5bdbaebc
HC
3568 /*
3569 * virtio_queue_aio_detach_host_notifier() can leave notifications disabled.
3570 * Re-enable them. (And if detach has not been used before, notifications
3571 * being enabled is still the default state while a notifier is attached;
3572 * see virtio_queue_host_notifier_aio_poll_end(), which will always leave
3573 * notifications enabled once the polling section is left.)
3574 */
3575 if (!virtio_queue_get_notification(vq)) {
3576 virtio_queue_set_notification(vq, 1);
3577 }
3578
60f782b6 3579 aio_set_event_notifier(ctx, &vq->host_notifier,
db608fb7
SH
3580 virtio_queue_host_notifier_read,
3581 virtio_queue_host_notifier_aio_poll,
3582 virtio_queue_host_notifier_aio_poll_ready);
3583 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3584 virtio_queue_host_notifier_aio_poll_begin,
3585 virtio_queue_host_notifier_aio_poll_end);
5bdbaebc
HC
3586
3587 /*
3588 * We will have ignored notifications about new requests from the guest
3589 * while no notifiers were attached, so "kick" the virt queue to process
3590 * those requests now.
3591 */
3592 event_notifier_set(&vq->host_notifier);
db608fb7
SH
3593}
3594
38738f7d
SH
3595/*
3596 * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
3597 * this for rx virtqueues and similar cases where the virtqueue handler
3598 * function does not pop all elements. When the virtqueue is left non-empty
3599 * polling consumes CPU cycles and should not be used.
3600 */
3601void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
3602{
5bdbaebc
HC
3603 /* See virtio_queue_aio_attach_host_notifier() */
3604 if (!virtio_queue_get_notification(vq)) {
3605 virtio_queue_set_notification(vq, 1);
3606 }
3607
60f782b6 3608 aio_set_event_notifier(ctx, &vq->host_notifier,
38738f7d
SH
3609 virtio_queue_host_notifier_read,
3610 NULL, NULL);
5bdbaebc
HC
3611
3612 /*
3613 * See virtio_queue_aio_attach_host_notifier().
3614 * Note that this may be unnecessary for the type of virtqueues this
3615 * function is used for. Still, it will not hurt to have a quick look into
3616 * whether we can/should process any of the virtqueue elements.
3617 */
3618 event_notifier_set(&vq->host_notifier);
38738f7d
SH
3619}
3620
db608fb7
SH
3621void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
3622{
60f782b6 3623 aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL);
5bdbaebc
HC
3624
3625 /*
3626 * aio_set_event_notifier_poll() does not guarantee whether io_poll_end()
3627 * will run after io_poll_begin(), so by removing the notifier, we do not
3628 * know whether virtio_queue_host_notifier_aio_poll_end() has run after a
3629 * previous virtio_queue_host_notifier_aio_poll_begin(), i.e. whether
3630 * notifications are enabled or disabled. It does not really matter anyway;
3631 * we just removed the notifier, so we do not care about notifications until
3632 * we potentially re-attach it. The attach_host_notifier functions will
3633 * ensure that notifications are enabled again when they are needed.
3634 */
344dc16f
MT
3635}
3636
fa283a4a 3637void virtio_queue_host_notifier_read(EventNotifier *n)
344dc16f
MT
3638{
3639 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3640 if (event_notifier_test_and_clear(n)) {
3641 virtio_queue_notify_vq(vq);
a1afb606
PB
3642 }
3643}
3644
1cbdabe2
MT
3645EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
3646{
3647 return &vq->host_notifier;
3648}
8e05db92 3649
7d847d0c
CL
3650EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev)
3651{
3652 return &vdev->config_notifier;
3653}
3654
fcccb271
SH
3655void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
3656{
3657 vq->host_notifier_enabled = enabled;
3658}
3659
6f80e617
TB
3660int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
3661 MemoryRegion *mr, bool assign)
3662{
3663 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3664 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3665
3666 if (k->set_host_notifier_mr) {
3667 return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3668 }
3669
3670 return -1;
3671}
3672
1034e9cf
FK
3673void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
3674{
9e288406 3675 g_free(vdev->bus_name);
80e0090a 3676 vdev->bus_name = g_strdup(bus_name);
1034e9cf
FK
3677}
3678
9edc6313 3679void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
f5ed3663
SH
3680{
3681 va_list ap;
3682
3683 va_start(ap, fmt);
3684 error_vreport(fmt, ap);
3685 va_end(ap);
3686
f5ed3663 3687 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
8fc47c87 3688 vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
f5ed3663
SH
3689 virtio_notify_config(vdev);
3690 }
66453cff
GK
3691
3692 vdev->broken = true;
f5ed3663
SH
3693}
3694
c611c764
PB
3695static void virtio_memory_listener_commit(MemoryListener *listener)
3696{
3697 VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
3698 int i;
3699
3700 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3701 if (vdev->vq[i].vring.num == 0) {
3702 break;
3703 }
3704 virtio_init_region_cache(vdev, i);
3705 }
3706}
3707
1d244b42
AF
3708static void virtio_device_realize(DeviceState *dev, Error **errp)
3709{
3710 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3711 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3712 Error *err = NULL;
3713
ea43e259
DDAG
3714 /* Devices should either use vmsd or the load/save methods */
3715 assert(!vdc->vmsd || !vdc->load);
3716
1d244b42
AF
3717 if (vdc->realize != NULL) {
3718 vdc->realize(dev, &err);
3719 if (err != NULL) {
3720 error_propagate(errp, err);
3721 return;
3722 }
8e05db92 3723 }
e8398045
JW
3724
3725 virtio_bus_device_plugged(vdev, &err);
3726 if (err != NULL) {
3727 error_propagate(errp, err);
b69c3c21 3728 vdc->unrealize(dev);
e8398045
JW
3729 return;
3730 }
c611c764
PB
3731
3732 vdev->listener.commit = virtio_memory_listener_commit;
142518bd 3733 vdev->listener.name = "virtio";
c611c764 3734 memory_listener_register(&vdev->listener, vdev->dma_as);
8e05db92
FK
3735}
3736
b69c3c21 3737static void virtio_device_unrealize(DeviceState *dev)
1034e9cf 3738{
1d244b42 3739 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
306ec6c3 3740 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
1d244b42 3741
f6ab64c0 3742 memory_listener_unregister(&vdev->listener);
83d07047
PB
3743 virtio_bus_device_unplugged(vdev);
3744
306ec6c3 3745 if (vdc->unrealize != NULL) {
b69c3c21 3746 vdc->unrealize(dev);
5e96f5d2 3747 }
1d244b42 3748
9e288406
MA
3749 g_free(vdev->bus_name);
3750 vdev->bus_name = NULL;
1034e9cf
FK
3751}
3752
c611c764
PB
3753static void virtio_device_free_virtqueues(VirtIODevice *vdev)
3754{
3755 int i;
3756 if (!vdev->vq) {
3757 return;
3758 }
3759
3760 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
c611c764
PB
3761 if (vdev->vq[i].vring.num == 0) {
3762 break;
3763 }
e0e2d644 3764 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
c611c764
PB
3765 }
3766 g_free(vdev->vq);
3767}
3768
3769static void virtio_device_instance_finalize(Object *obj)
3770{
3771 VirtIODevice *vdev = VIRTIO_DEVICE(obj);
3772
c611c764
PB
3773 virtio_device_free_virtqueues(vdev);
3774
3775 g_free(vdev->config);
3776 g_free(vdev->vector_queues);
3777}
3778
6b8f1020
CH
3779static Property virtio_properties[] = {
3780 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
e57f2c31 3781 DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
9d7bd082 3782 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
d55f5182
SG
3783 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
3784 disable_legacy_check, false),
6b8f1020
CH
3785 DEFINE_PROP_END_OF_LIST(),
3786};
3787
ff4c07df
PB
3788static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
3789{
3790 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
710fccf8 3791 int i, n, r, err;
ff4c07df 3792
9cf4fd87
GK
3793 /*
3794 * Batch all the host notifiers in a single transaction to avoid
3795 * quadratic time complexity in address_space_update_ioeventfds().
3796 */
710fccf8 3797 memory_region_transaction_begin();
ff4c07df 3798 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
fa283a4a 3799 VirtQueue *vq = &vdev->vq[n];
ff4c07df
PB
3800 if (!virtio_queue_get_num(vdev, n)) {
3801 continue;
3802 }
ed08a2a0 3803 r = virtio_bus_set_host_notifier(qbus, n, true);
ff4c07df
PB
3804 if (r < 0) {
3805 err = r;
3806 goto assign_error;
3807 }
d6da1e9e 3808 event_notifier_set_handler(&vq->host_notifier,
fa283a4a 3809 virtio_queue_host_notifier_read);
6019f3b9
PB
3810 }
3811
3812 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3813 /* Kick right away to begin processing requests already in vring */
3814 VirtQueue *vq = &vdev->vq[n];
3815 if (!vq->vring.num) {
3816 continue;
3817 }
3818 event_notifier_set(&vq->host_notifier);
ff4c07df 3819 }
710fccf8 3820 memory_region_transaction_commit();
ff4c07df
PB
3821 return 0;
3822
3823assign_error:
710fccf8 3824 i = n; /* save n for a second iteration after transaction is committed. */
ff4c07df 3825 while (--n >= 0) {
fa283a4a 3826 VirtQueue *vq = &vdev->vq[n];
ff4c07df
PB
3827 if (!virtio_queue_get_num(vdev, n)) {
3828 continue;
3829 }
3830
d6da1e9e 3831 event_notifier_set_handler(&vq->host_notifier, NULL);
ed08a2a0 3832 r = virtio_bus_set_host_notifier(qbus, n, false);
ff4c07df 3833 assert(r >= 0);
710fccf8 3834 }
9cf4fd87
GK
3835 /*
3836 * The transaction expects the ioeventfds to be open when it
3837 * commits. Do it now, before the cleanup loop.
3838 */
710fccf8
GH
3839 memory_region_transaction_commit();
3840
3841 while (--i >= 0) {
3842 if (!virtio_queue_get_num(vdev, i)) {
3843 continue;
3844 }
3845 virtio_bus_cleanup_host_notifier(qbus, i);
ff4c07df
PB
3846 }
3847 return err;
3848}
3849
3850int virtio_device_start_ioeventfd(VirtIODevice *vdev)
3851{
3852 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3853 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3854
3855 return virtio_bus_start_ioeventfd(vbus);
3856}
3857
3858static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
3859{
3860 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
ff4c07df
PB
3861 int n, r;
3862
9cf4fd87
GK
3863 /*
3864 * Batch all the host notifiers in a single transaction to avoid
3865 * quadratic time complexity in address_space_update_ioeventfds().
3866 */
710fccf8 3867 memory_region_transaction_begin();
ff4c07df 3868 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
fa283a4a
PB
3869 VirtQueue *vq = &vdev->vq[n];
3870
ff4c07df
PB
3871 if (!virtio_queue_get_num(vdev, n)) {
3872 continue;
3873 }
d6da1e9e 3874 event_notifier_set_handler(&vq->host_notifier, NULL);
ed08a2a0 3875 r = virtio_bus_set_host_notifier(qbus, n, false);
ff4c07df 3876 assert(r >= 0);
710fccf8 3877 }
9cf4fd87
GK
3878 /*
3879 * The transaction expects the ioeventfds to be open when it
3880 * commits. Do it now, before the cleanup loop.
3881 */
710fccf8
GH
3882 memory_region_transaction_commit();
3883
3884 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3885 if (!virtio_queue_get_num(vdev, n)) {
3886 continue;
3887 }
76143618 3888 virtio_bus_cleanup_host_notifier(qbus, n);
ff4c07df
PB
3889 }
3890}
3891
310837de
PB
3892int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
3893{
3894 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3895 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3896
3897 return virtio_bus_grab_ioeventfd(vbus);
3898}
3899
3900void virtio_device_release_ioeventfd(VirtIODevice *vdev)
3901{
3902 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3903 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3904
3905 virtio_bus_release_ioeventfd(vbus);
3906}
3907
8e05db92
FK
3908static void virtio_device_class_init(ObjectClass *klass, void *data)
3909{
3910 /* Set the default value here. */
ff4c07df 3911 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
8e05db92 3912 DeviceClass *dc = DEVICE_CLASS(klass);
1d244b42
AF
3913
3914 dc->realize = virtio_device_realize;
3915 dc->unrealize = virtio_device_unrealize;
8e05db92 3916 dc->bus_type = TYPE_VIRTIO_BUS;
4f67d30b 3917 device_class_set_props(dc, virtio_properties);
ff4c07df
PB
3918 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
3919 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
9b706dbb
MT
3920
3921 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
8e05db92
FK
3922}
3923
8e93cef1
PB
3924bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
3925{
3926 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3927 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3928
3929 return virtio_bus_ioeventfd_enabled(vbus);
3930}
3931
07536ddd
LV
3932VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path,
3933 uint16_t queue,
3934 Error **errp)
3935{
3936 VirtIODevice *vdev;
3937 VirtQueueStatus *status;
3938
dd92cbb3 3939 vdev = qmp_find_virtio_device(path);
07536ddd
LV
3940 if (vdev == NULL) {
3941 error_setg(errp, "Path %s is not a VirtIODevice", path);
3942 return NULL;
3943 }
3944
3945 if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
3946 error_setg(errp, "Invalid virtqueue number %d", queue);
3947 return NULL;
3948 }
3949
3950 status = g_new0(VirtQueueStatus, 1);
3951 status->name = g_strdup(vdev->name);
3952 status->queue_index = vdev->vq[queue].queue_index;
3953 status->inuse = vdev->vq[queue].inuse;
3954 status->vring_num = vdev->vq[queue].vring.num;
3955 status->vring_num_default = vdev->vq[queue].vring.num_default;
3956 status->vring_align = vdev->vq[queue].vring.align;
3957 status->vring_desc = vdev->vq[queue].vring.desc;
3958 status->vring_avail = vdev->vq[queue].vring.avail;
3959 status->vring_used = vdev->vq[queue].vring.used;
3960 status->used_idx = vdev->vq[queue].used_idx;
3961 status->signalled_used = vdev->vq[queue].signalled_used;
3962 status->signalled_used_valid = vdev->vq[queue].signalled_used_valid;
3963
3964 if (vdev->vhost_started) {
3965 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3966 struct vhost_dev *hdev = vdc->get_vhost(vdev);
3967
3968 /* check if vq index exists for vhost as well */
3969 if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) {
3970 status->has_last_avail_idx = true;
3971
3972 int vhost_vq_index =
3973 hdev->vhost_ops->vhost_get_vq_index(hdev, queue);
3974 struct vhost_vring_state state = {
3975 .index = vhost_vq_index,
3976 };
3977
3978 status->last_avail_idx =
3979 hdev->vhost_ops->vhost_get_vring_base(hdev, &state);
3980 }
3981 } else {
3982 status->has_shadow_avail_idx = true;
3983 status->has_last_avail_idx = true;
3984 status->last_avail_idx = vdev->vq[queue].last_avail_idx;
3985 status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx;
3986 }
3987
3988 return status;
3989}
3990
1ee7bb5b
LV
3991static strList *qmp_decode_vring_desc_flags(uint16_t flags)
3992{
3993 strList *list = NULL;
3994 strList *node;
3995 int i;
3996
3997 struct {
3998 uint16_t flag;
3999 const char *value;
4000 } map[] = {
4001 { VRING_DESC_F_NEXT, "next" },
4002 { VRING_DESC_F_WRITE, "write" },
4003 { VRING_DESC_F_INDIRECT, "indirect" },
4004 { 1 << VRING_PACKED_DESC_F_AVAIL, "avail" },
4005 { 1 << VRING_PACKED_DESC_F_USED, "used" },
4006 { 0, "" }
4007 };
4008
4009 for (i = 0; map[i].flag; i++) {
4010 if ((map[i].flag & flags) == 0) {
4011 continue;
4012 }
4013 node = g_malloc0(sizeof(strList));
4014 node->value = g_strdup(map[i].value);
4015 node->next = list;
4016 list = node;
4017 }
4018
4019 return list;
4020}
4021
4022VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path,
4023 uint16_t queue,
4024 bool has_index,
4025 uint16_t index,
4026 Error **errp)
4027{
4028 VirtIODevice *vdev;
4029 VirtQueue *vq;
4030 VirtioQueueElement *element = NULL;
4031
dd92cbb3 4032 vdev = qmp_find_virtio_device(path);
1ee7bb5b
LV
4033 if (vdev == NULL) {
4034 error_setg(errp, "Path %s is not a VirtIO device", path);
4035 return NULL;
4036 }
4037
4038 if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
4039 error_setg(errp, "Invalid virtqueue number %d", queue);
4040 return NULL;
4041 }
4042 vq = &vdev->vq[queue];
4043
4044 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
4045 error_setg(errp, "Packed ring not supported");
4046 return NULL;
4047 } else {
4048 unsigned int head, i, max;
4049 VRingMemoryRegionCaches *caches;
43d63769 4050 MemoryRegionCache indirect_desc_cache;
1ee7bb5b
LV
4051 MemoryRegionCache *desc_cache;
4052 VRingDesc desc;
4053 VirtioRingDescList *list = NULL;
4054 VirtioRingDescList *node;
4055 int rc; int ndescs;
4056
43d63769
IM
4057 address_space_cache_init_empty(&indirect_desc_cache);
4058
1ee7bb5b
LV
4059 RCU_READ_LOCK_GUARD();
4060
4061 max = vq->vring.num;
4062
4063 if (!has_index) {
4064 head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num);
4065 } else {
4066 head = vring_avail_ring(vq, index % vq->vring.num);
4067 }
4068 i = head;
4069
4070 caches = vring_get_region_caches(vq);
4071 if (!caches) {
4072 error_setg(errp, "Region caches not initialized");
4073 return NULL;
4074 }
4075 if (caches->desc.len < max * sizeof(VRingDesc)) {
4076 error_setg(errp, "Cannot map descriptor ring");
4077 return NULL;
4078 }
4079
4080 desc_cache = &caches->desc;
4081 vring_split_desc_read(vdev, &desc, desc_cache, i);
4082 if (desc.flags & VRING_DESC_F_INDIRECT) {
4083 int64_t len;
4084 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
4085 desc.addr, desc.len, false);
4086 desc_cache = &indirect_desc_cache;
4087 if (len < desc.len) {
4088 error_setg(errp, "Cannot map indirect buffer");
4089 goto done;
4090 }
4091
4092 max = desc.len / sizeof(VRingDesc);
4093 i = 0;
4094 vring_split_desc_read(vdev, &desc, desc_cache, i);
4095 }
4096
4097 element = g_new0(VirtioQueueElement, 1);
4098 element->avail = g_new0(VirtioRingAvail, 1);
4099 element->used = g_new0(VirtioRingUsed, 1);
4100 element->name = g_strdup(vdev->name);
4101 element->index = head;
4102 element->avail->flags = vring_avail_flags(vq);
4103 element->avail->idx = vring_avail_idx(vq);
4104 element->avail->ring = head;
4105 element->used->flags = vring_used_flags(vq);
4106 element->used->idx = vring_used_idx(vq);
4107 ndescs = 0;
4108
4109 do {
4110 /* A buggy driver may produce an infinite loop */
4111 if (ndescs >= max) {
4112 break;
4113 }
4114 node = g_new0(VirtioRingDescList, 1);
4115 node->value = g_new0(VirtioRingDesc, 1);
4116 node->value->addr = desc.addr;
4117 node->value->len = desc.len;
4118 node->value->flags = qmp_decode_vring_desc_flags(desc.flags);
4119 node->next = list;
4120 list = node;
4121
4122 ndescs++;
70f88436 4123 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
1ee7bb5b
LV
4124 } while (rc == VIRTQUEUE_READ_DESC_MORE);
4125 element->descs = list;
4126done:
4127 address_space_cache_destroy(&indirect_desc_cache);
4128 }
4129
4130 return element;
4131}
4132
8e05db92
FK
4133static const TypeInfo virtio_device_info = {
4134 .name = TYPE_VIRTIO_DEVICE,
4135 .parent = TYPE_DEVICE,
4136 .instance_size = sizeof(VirtIODevice),
4137 .class_init = virtio_device_class_init,
c611c764 4138 .instance_finalize = virtio_device_instance_finalize,
8e05db92
FK
4139 .abstract = true,
4140 .class_size = sizeof(VirtioDeviceClass),
4141};
4142
4143static void virtio_register_types(void)
4144{
4145 type_register_static(&virtio_device_info);
4146}
4147
4148type_init(virtio_register_types)
ec0504b9
PMD
4149
4150QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev,
4151 QEMUBHFunc *cb, void *opaque,
4152 const char *name)
4153{
4154 DeviceState *transport = qdev_get_parent_bus(dev)->parent;
4155
4156 return qemu_bh_new_full(cb, opaque, name,
4157 &transport->mem_reentrancy_guard);
4158}