]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/virtio.c
virtio: memory accessors for endian-ambivalent targets
[mirror_qemu.git] / hw / virtio / virtio.c
CommitLineData
967f97fa
AL
1/*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include <inttypes.h>
967f97fa 15
64979a4d 16#include "trace.h"
fdfba1a2 17#include "exec/address-spaces.h"
1de7afc9 18#include "qemu/error-report.h"
0d09e41a 19#include "hw/virtio/virtio.h"
1de7afc9 20#include "qemu/atomic.h"
0d09e41a 21#include "hw/virtio/virtio-bus.h"
6b321a3d 22#include "migration/migration.h"
967f97fa 23
6ce69d1c
PM
24/*
25 * The alignment to use between consumer and producer parts of vring.
26 * x86 pagesize again. This is the default, used by transports like PCI
27 * which don't provide a means for the guest to tell the host the alignment.
28 */
f46f15bc
AL
29#define VIRTIO_PCI_VRING_ALIGN 4096
30
967f97fa
AL
31typedef struct VRingDesc
32{
33 uint64_t addr;
34 uint32_t len;
35 uint16_t flags;
36 uint16_t next;
37} VRingDesc;
38
39typedef struct VRingAvail
40{
41 uint16_t flags;
42 uint16_t idx;
43 uint16_t ring[0];
44} VRingAvail;
45
46typedef struct VRingUsedElem
47{
48 uint32_t id;
49 uint32_t len;
50} VRingUsedElem;
51
52typedef struct VRingUsed
53{
54 uint16_t flags;
55 uint16_t idx;
56 VRingUsedElem ring[0];
57} VRingUsed;
58
59typedef struct VRing
60{
61 unsigned int num;
6ce69d1c 62 unsigned int align;
a8170e5e
AK
63 hwaddr desc;
64 hwaddr avail;
65 hwaddr used;
967f97fa
AL
66} VRing;
67
68struct VirtQueue
69{
70 VRing vring;
a8170e5e 71 hwaddr pa;
967f97fa 72 uint16_t last_avail_idx;
bcbabae8
MT
73 /* Last used index value we have signalled on */
74 uint16_t signalled_used;
75
76 /* Last used index value we have signalled on */
77 bool signalled_used_valid;
78
79 /* Notification enabled? */
80 bool notification;
81
e78a2b42
JW
82 uint16_t queue_index;
83
967f97fa 84 int inuse;
bcbabae8 85
7055e687 86 uint16_t vector;
967f97fa 87 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
1cbdabe2
MT
88 VirtIODevice *vdev;
89 EventNotifier guest_notifier;
90 EventNotifier host_notifier;
967f97fa
AL
91};
92
967f97fa 93/* virt queue functions */
53c25cea 94static void virtqueue_init(VirtQueue *vq)
967f97fa 95{
a8170e5e 96 hwaddr pa = vq->pa;
53c25cea 97
967f97fa
AL
98 vq->vring.desc = pa;
99 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
f46f15bc
AL
100 vq->vring.used = vring_align(vq->vring.avail +
101 offsetof(VRingAvail, ring[vq->vring.num]),
6ce69d1c 102 vq->vring.align);
967f97fa
AL
103}
104
a8170e5e 105static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i)
967f97fa 106{
a8170e5e 107 hwaddr pa;
5774cf98 108 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
2c17449b 109 return ldq_phys(&address_space_memory, pa);
967f97fa
AL
110}
111
a8170e5e 112static inline uint32_t vring_desc_len(hwaddr desc_pa, int i)
967f97fa 113{
a8170e5e 114 hwaddr pa;
5774cf98 115 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
fdfba1a2 116 return ldl_phys(&address_space_memory, pa);
967f97fa
AL
117}
118
a8170e5e 119static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i)
967f97fa 120{
a8170e5e 121 hwaddr pa;
5774cf98 122 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
41701aa4 123 return lduw_phys(&address_space_memory, pa);
967f97fa
AL
124}
125
a8170e5e 126static inline uint16_t vring_desc_next(hwaddr desc_pa, int i)
967f97fa 127{
a8170e5e 128 hwaddr pa;
5774cf98 129 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
41701aa4 130 return lduw_phys(&address_space_memory, pa);
967f97fa
AL
131}
132
133static inline uint16_t vring_avail_flags(VirtQueue *vq)
134{
a8170e5e 135 hwaddr pa;
967f97fa 136 pa = vq->vring.avail + offsetof(VRingAvail, flags);
41701aa4 137 return lduw_phys(&address_space_memory, pa);
967f97fa
AL
138}
139
140static inline uint16_t vring_avail_idx(VirtQueue *vq)
141{
a8170e5e 142 hwaddr pa;
967f97fa 143 pa = vq->vring.avail + offsetof(VRingAvail, idx);
41701aa4 144 return lduw_phys(&address_space_memory, pa);
967f97fa
AL
145}
146
147static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
148{
a8170e5e 149 hwaddr pa;
967f97fa 150 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
41701aa4 151 return lduw_phys(&address_space_memory, pa);
967f97fa
AL
152}
153
bcbabae8
MT
154static inline uint16_t vring_used_event(VirtQueue *vq)
155{
156 return vring_avail_ring(vq, vq->vring.num);
157}
158
967f97fa
AL
159static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
160{
a8170e5e 161 hwaddr pa;
967f97fa 162 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
ab1da857 163 stl_phys(&address_space_memory, pa, val);
967f97fa
AL
164}
165
166static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
167{
a8170e5e 168 hwaddr pa;
967f97fa 169 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
ab1da857 170 stl_phys(&address_space_memory, pa, val);
967f97fa
AL
171}
172
173static uint16_t vring_used_idx(VirtQueue *vq)
174{
a8170e5e 175 hwaddr pa;
967f97fa 176 pa = vq->vring.used + offsetof(VRingUsed, idx);
41701aa4 177 return lduw_phys(&address_space_memory, pa);
967f97fa
AL
178}
179
bcbabae8 180static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
967f97fa 181{
a8170e5e 182 hwaddr pa;
967f97fa 183 pa = vq->vring.used + offsetof(VRingUsed, idx);
5ce5944d 184 stw_phys(&address_space_memory, pa, val);
967f97fa
AL
185}
186
187static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
188{
a8170e5e 189 hwaddr pa;
967f97fa 190 pa = vq->vring.used + offsetof(VRingUsed, flags);
5ce5944d
EI
191 stw_phys(&address_space_memory,
192 pa, lduw_phys(&address_space_memory, pa) | mask);
967f97fa
AL
193}
194
195static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
196{
a8170e5e 197 hwaddr pa;
967f97fa 198 pa = vq->vring.used + offsetof(VRingUsed, flags);
5ce5944d
EI
199 stw_phys(&address_space_memory,
200 pa, lduw_phys(&address_space_memory, pa) & ~mask);
967f97fa
AL
201}
202
bcbabae8
MT
203static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
204{
a8170e5e 205 hwaddr pa;
bcbabae8
MT
206 if (!vq->notification) {
207 return;
208 }
209 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
5ce5944d 210 stw_phys(&address_space_memory, pa, val);
bcbabae8
MT
211}
212
967f97fa
AL
213void virtio_queue_set_notification(VirtQueue *vq, int enable)
214{
bcbabae8
MT
215 vq->notification = enable;
216 if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
217 vring_avail_event(vq, vring_avail_idx(vq));
218 } else if (enable) {
967f97fa 219 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
bcbabae8 220 } else {
967f97fa 221 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
bcbabae8 222 }
92045d80
MT
223 if (enable) {
224 /* Expose avail event/used flags before caller checks the avail idx. */
225 smp_mb();
226 }
967f97fa
AL
227}
228
229int virtio_queue_ready(VirtQueue *vq)
230{
231 return vq->vring.avail != 0;
232}
233
234int virtio_queue_empty(VirtQueue *vq)
235{
236 return vring_avail_idx(vq) == vq->last_avail_idx;
237}
238
239void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
240 unsigned int len, unsigned int idx)
241{
242 unsigned int offset;
243 int i;
244
64979a4d
SH
245 trace_virtqueue_fill(vq, elem, len, idx);
246
967f97fa
AL
247 offset = 0;
248 for (i = 0; i < elem->in_num; i++) {
249 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
250
26b258e1
AL
251 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
252 elem->in_sg[i].iov_len,
253 1, size);
967f97fa 254
0cea71a2 255 offset += size;
967f97fa
AL
256 }
257
26b258e1
AL
258 for (i = 0; i < elem->out_num; i++)
259 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
260 elem->out_sg[i].iov_len,
261 0, elem->out_sg[i].iov_len);
262
967f97fa
AL
263 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
264
265 /* Get a pointer to the next entry in the used ring. */
266 vring_used_ring_id(vq, idx, elem->index);
267 vring_used_ring_len(vq, idx, len);
268}
269
270void virtqueue_flush(VirtQueue *vq, unsigned int count)
271{
bcbabae8 272 uint16_t old, new;
967f97fa 273 /* Make sure buffer is written before we update index. */
b90d2f35 274 smp_wmb();
64979a4d 275 trace_virtqueue_flush(vq, count);
bcbabae8
MT
276 old = vring_used_idx(vq);
277 new = old + count;
278 vring_used_idx_set(vq, new);
967f97fa 279 vq->inuse -= count;
bcbabae8
MT
280 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
281 vq->signalled_used_valid = false;
967f97fa
AL
282}
283
284void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
285 unsigned int len)
286{
287 virtqueue_fill(vq, elem, len, 0);
288 virtqueue_flush(vq, 1);
289}
290
291static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
292{
293 uint16_t num_heads = vring_avail_idx(vq) - idx;
294
295 /* Check it isn't doing very strange things with descriptor numbers. */
bb6834cf 296 if (num_heads > vq->vring.num) {
ce67ed65
SH
297 error_report("Guest moved used index from %u to %u",
298 idx, vring_avail_idx(vq));
bb6834cf
AL
299 exit(1);
300 }
a821ce59
MT
301 /* On success, callers read a descriptor at vq->last_avail_idx.
302 * Make sure descriptor read does not bypass avail index read. */
303 if (num_heads) {
304 smp_rmb();
305 }
967f97fa
AL
306
307 return num_heads;
308}
309
310static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
311{
312 unsigned int head;
313
314 /* Grab the next descriptor number they're advertising, and increment
315 * the index we've seen. */
316 head = vring_avail_ring(vq, idx % vq->vring.num);
317
318 /* If their number is silly, that's a fatal mistake. */
bb6834cf 319 if (head >= vq->vring.num) {
ce67ed65 320 error_report("Guest says index %u is available", head);
bb6834cf
AL
321 exit(1);
322 }
967f97fa
AL
323
324 return head;
325}
326
a8170e5e 327static unsigned virtqueue_next_desc(hwaddr desc_pa,
5774cf98 328 unsigned int i, unsigned int max)
967f97fa
AL
329{
330 unsigned int next;
331
332 /* If this descriptor says it doesn't chain, we're done. */
5774cf98
MM
333 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
334 return max;
967f97fa
AL
335
336 /* Check they're not leading us off end of descriptors. */
5774cf98 337 next = vring_desc_next(desc_pa, i);
967f97fa 338 /* Make sure compiler knows to grab that: we don't want it changing! */
b90d2f35 339 smp_wmb();
967f97fa 340
5774cf98 341 if (next >= max) {
ce67ed65 342 error_report("Desc next is %u", next);
bb6834cf
AL
343 exit(1);
344 }
967f97fa
AL
345
346 return next;
347}
348
0d8d7690 349void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
e1f7b481
MT
350 unsigned int *out_bytes,
351 unsigned max_in_bytes, unsigned max_out_bytes)
967f97fa 352{
efeea6d0 353 unsigned int idx;
385ce95d 354 unsigned int total_bufs, in_total, out_total;
967f97fa
AL
355
356 idx = vq->last_avail_idx;
357
efeea6d0 358 total_bufs = in_total = out_total = 0;
967f97fa 359 while (virtqueue_num_heads(vq, idx)) {
efeea6d0 360 unsigned int max, num_bufs, indirect = 0;
a8170e5e 361 hwaddr desc_pa;
967f97fa
AL
362 int i;
363
efeea6d0
MM
364 max = vq->vring.num;
365 num_bufs = total_bufs;
967f97fa 366 i = virtqueue_get_head(vq, idx++);
efeea6d0
MM
367 desc_pa = vq->vring.desc;
368
369 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
370 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
ce67ed65 371 error_report("Invalid size for indirect buffer table");
efeea6d0
MM
372 exit(1);
373 }
374
375 /* If we've got too many, that implies a descriptor loop. */
376 if (num_bufs >= max) {
ce67ed65 377 error_report("Looped descriptor");
efeea6d0
MM
378 exit(1);
379 }
380
381 /* loop over the indirect descriptor table */
382 indirect = 1;
383 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
efeea6d0 384 desc_pa = vring_desc_addr(desc_pa, i);
1ae2757c 385 num_bufs = i = 0;
efeea6d0
MM
386 }
387
967f97fa
AL
388 do {
389 /* If we've got too many, that implies a descriptor loop. */
5774cf98 390 if (++num_bufs > max) {
ce67ed65 391 error_report("Looped descriptor");
bb6834cf
AL
392 exit(1);
393 }
967f97fa 394
5774cf98 395 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
0d8d7690 396 in_total += vring_desc_len(desc_pa, i);
967f97fa 397 } else {
0d8d7690 398 out_total += vring_desc_len(desc_pa, i);
967f97fa 399 }
e1f7b481
MT
400 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
401 goto done;
402 }
5774cf98 403 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
efeea6d0
MM
404
405 if (!indirect)
406 total_bufs = num_bufs;
407 else
408 total_bufs++;
967f97fa 409 }
e1f7b481 410done:
0d8d7690
AS
411 if (in_bytes) {
412 *in_bytes = in_total;
413 }
414 if (out_bytes) {
415 *out_bytes = out_total;
416 }
417}
967f97fa 418
0d8d7690
AS
419int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
420 unsigned int out_bytes)
421{
422 unsigned int in_total, out_total;
423
e1f7b481
MT
424 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
425 return in_bytes <= in_total && out_bytes <= out_total;
967f97fa
AL
426}
427
a8170e5e 428void virtqueue_map_sg(struct iovec *sg, hwaddr *addr,
42fb2e07
KW
429 size_t num_sg, int is_write)
430{
431 unsigned int i;
a8170e5e 432 hwaddr len;
42fb2e07 433
93725140 434 if (num_sg > VIRTQUEUE_MAX_SIZE) {
36cf2a37
MT
435 error_report("virtio: map attempt out of bounds: %zd > %d",
436 num_sg, VIRTQUEUE_MAX_SIZE);
437 exit(1);
438 }
439
42fb2e07
KW
440 for (i = 0; i < num_sg; i++) {
441 len = sg[i].iov_len;
442 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
443 if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
1a285899 444 error_report("virtio: error trying to map MMIO memory");
42fb2e07
KW
445 exit(1);
446 }
447 }
448}
449
967f97fa
AL
450int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
451{
5774cf98 452 unsigned int i, head, max;
a8170e5e 453 hwaddr desc_pa = vq->vring.desc;
967f97fa
AL
454
455 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
456 return 0;
457
458 /* When we start there are none of either input nor output. */
459 elem->out_num = elem->in_num = 0;
460
5774cf98
MM
461 max = vq->vring.num;
462
967f97fa 463 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
bcbabae8
MT
464 if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
465 vring_avail_event(vq, vring_avail_idx(vq));
466 }
efeea6d0
MM
467
468 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
469 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
ce67ed65 470 error_report("Invalid size for indirect buffer table");
efeea6d0
MM
471 exit(1);
472 }
473
474 /* loop over the indirect descriptor table */
475 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
476 desc_pa = vring_desc_addr(desc_pa, i);
477 i = 0;
478 }
479
42fb2e07 480 /* Collect all the descriptors */
967f97fa
AL
481 do {
482 struct iovec *sg;
483
5774cf98 484 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
c8eac1cf
MT
485 if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
486 error_report("Too many write descriptors in indirect table");
487 exit(1);
488 }
5774cf98 489 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
967f97fa 490 sg = &elem->in_sg[elem->in_num++];
42fb2e07 491 } else {
c8eac1cf
MT
492 if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
493 error_report("Too many read descriptors in indirect table");
494 exit(1);
495 }
42fb2e07 496 elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
967f97fa 497 sg = &elem->out_sg[elem->out_num++];
42fb2e07 498 }
967f97fa 499
5774cf98 500 sg->iov_len = vring_desc_len(desc_pa, i);
967f97fa
AL
501
502 /* If we've got too many, that implies a descriptor loop. */
5774cf98 503 if ((elem->in_num + elem->out_num) > max) {
ce67ed65 504 error_report("Looped descriptor");
bb6834cf
AL
505 exit(1);
506 }
5774cf98 507 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
967f97fa 508
42fb2e07
KW
509 /* Now map what we have collected */
510 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
511 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
512
967f97fa
AL
513 elem->index = head;
514
515 vq->inuse++;
516
64979a4d 517 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
967f97fa
AL
518 return elem->in_num + elem->out_num;
519}
520
521/* virtio device */
7055e687
MT
522static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
523{
1c819449
FK
524 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
525 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
526
527 if (k->notify) {
528 k->notify(qbus->parent, vector);
7055e687
MT
529 }
530}
967f97fa 531
53c25cea 532void virtio_update_irq(VirtIODevice *vdev)
967f97fa 533{
7055e687 534 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
967f97fa
AL
535}
536
4e1837f8
SH
537void virtio_set_status(VirtIODevice *vdev, uint8_t val)
538{
181103cd 539 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
4e1837f8
SH
540 trace_virtio_set_status(vdev, val);
541
181103cd
FK
542 if (k->set_status) {
543 k->set_status(vdev, val);
4e1837f8
SH
544 }
545 vdev->status = val;
546}
547
616a6552
GK
548bool target_words_bigendian(void);
549static enum virtio_device_endian virtio_default_endian(void)
550{
551 if (target_words_bigendian()) {
552 return VIRTIO_DEVICE_ENDIAN_BIG;
553 } else {
554 return VIRTIO_DEVICE_ENDIAN_LITTLE;
555 }
556}
557
558static enum virtio_device_endian virtio_current_cpu_endian(void)
559{
560 CPUClass *cc = CPU_GET_CLASS(current_cpu);
561
562 if (cc->virtio_is_big_endian(current_cpu)) {
563 return VIRTIO_DEVICE_ENDIAN_BIG;
564 } else {
565 return VIRTIO_DEVICE_ENDIAN_LITTLE;
566 }
567}
568
53c25cea 569void virtio_reset(void *opaque)
967f97fa
AL
570{
571 VirtIODevice *vdev = opaque;
181103cd 572 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
573 int i;
574
e0c472d8 575 virtio_set_status(vdev, 0);
616a6552
GK
576 if (current_cpu) {
577 /* Guest initiated reset */
578 vdev->device_endian = virtio_current_cpu_endian();
579 } else {
580 /* System reset */
581 vdev->device_endian = virtio_default_endian();
582 }
e0c472d8 583
181103cd
FK
584 if (k->reset) {
585 k->reset(vdev);
586 }
967f97fa 587
704a76fc 588 vdev->guest_features = 0;
967f97fa
AL
589 vdev->queue_sel = 0;
590 vdev->status = 0;
591 vdev->isr = 0;
7055e687
MT
592 vdev->config_vector = VIRTIO_NO_VECTOR;
593 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
594
595 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
596 vdev->vq[i].vring.desc = 0;
597 vdev->vq[i].vring.avail = 0;
598 vdev->vq[i].vring.used = 0;
599 vdev->vq[i].last_avail_idx = 0;
53c25cea 600 vdev->vq[i].pa = 0;
7055e687 601 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
bcbabae8
MT
602 vdev->vq[i].signalled_used = 0;
603 vdev->vq[i].signalled_used_valid = false;
604 vdev->vq[i].notification = true;
967f97fa
AL
605 }
606}
607
53c25cea 608uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
967f97fa 609{
181103cd 610 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
611 uint8_t val;
612
5f5a1318 613 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 614 return (uint32_t)-1;
5f5a1318
JW
615 }
616
617 k->get_config(vdev, vdev->config);
967f97fa 618
06dbfc6f 619 val = ldub_p(vdev->config + addr);
967f97fa
AL
620 return val;
621}
622
53c25cea 623uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
967f97fa 624{
181103cd 625 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
626 uint16_t val;
627
5f5a1318 628 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 629 return (uint32_t)-1;
5f5a1318
JW
630 }
631
632 k->get_config(vdev, vdev->config);
967f97fa 633
06dbfc6f 634 val = lduw_p(vdev->config + addr);
967f97fa
AL
635 return val;
636}
637
53c25cea 638uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
967f97fa 639{
181103cd 640 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
641 uint32_t val;
642
5f5a1318 643 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 644 return (uint32_t)-1;
5f5a1318
JW
645 }
646
647 k->get_config(vdev, vdev->config);
967f97fa 648
06dbfc6f 649 val = ldl_p(vdev->config + addr);
967f97fa
AL
650 return val;
651}
652
53c25cea 653void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 654{
181103cd 655 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
656 uint8_t val = data;
657
5f5a1318 658 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 659 return;
5f5a1318 660 }
967f97fa 661
06dbfc6f 662 stb_p(vdev->config + addr, val);
967f97fa 663
181103cd
FK
664 if (k->set_config) {
665 k->set_config(vdev, vdev->config);
666 }
967f97fa
AL
667}
668
53c25cea 669void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 670{
181103cd 671 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
672 uint16_t val = data;
673
5f5a1318 674 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 675 return;
5f5a1318 676 }
967f97fa 677
06dbfc6f 678 stw_p(vdev->config + addr, val);
967f97fa 679
181103cd
FK
680 if (k->set_config) {
681 k->set_config(vdev, vdev->config);
682 }
967f97fa
AL
683}
684
53c25cea 685void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 686{
181103cd 687 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
688 uint32_t val = data;
689
5f5a1318 690 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 691 return;
5f5a1318 692 }
967f97fa 693
06dbfc6f 694 stl_p(vdev->config + addr, val);
967f97fa 695
181103cd
FK
696 if (k->set_config) {
697 k->set_config(vdev, vdev->config);
698 }
967f97fa
AL
699}
700
a8170e5e 701void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
967f97fa 702{
7055e687
MT
703 vdev->vq[n].pa = addr;
704 virtqueue_init(&vdev->vq[n]);
53c25cea
PB
705}
706
a8170e5e 707hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
53c25cea
PB
708{
709 return vdev->vq[n].pa;
710}
711
e63c0ba1
PM
712void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
713{
f6049f44
PM
714 /* Don't allow guest to flip queue between existent and
715 * nonexistent states, or to set it to an invalid size.
716 */
717 if (!!num != !!vdev->vq[n].vring.num ||
718 num > VIRTQUEUE_MAX_SIZE ||
719 num < 0) {
720 return;
e63c0ba1 721 }
f6049f44
PM
722 vdev->vq[n].vring.num = num;
723 virtqueue_init(&vdev->vq[n]);
e63c0ba1
PM
724}
725
53c25cea
PB
726int virtio_queue_get_num(VirtIODevice *vdev, int n)
727{
728 return vdev->vq[n].vring.num;
729}
967f97fa 730
c80decdb
PB
731int virtio_queue_get_id(VirtQueue *vq)
732{
733 VirtIODevice *vdev = vq->vdev;
734 assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
735 return vq - &vdev->vq[0];
736}
737
6ce69d1c
PM
738void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
739{
740 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
741 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
742
743 /* Check that the transport told us it was going to do this
744 * (so a buggy transport will immediately assert rather than
745 * silently failing to migrate this state)
746 */
747 assert(k->has_variable_vring_alignment);
748
749 vdev->vq[n].vring.align = align;
750 virtqueue_init(&vdev->vq[n]);
751}
752
25db9ebe
SH
753void virtio_queue_notify_vq(VirtQueue *vq)
754{
755 if (vq->vring.desc) {
756 VirtIODevice *vdev = vq->vdev;
757 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
758 vq->handle_output(vdev, vq);
759 }
760}
761
53c25cea
PB
762void virtio_queue_notify(VirtIODevice *vdev, int n)
763{
7157e2e2 764 virtio_queue_notify_vq(&vdev->vq[n]);
967f97fa
AL
765}
766
7055e687
MT
767uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
768{
769 return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
770 VIRTIO_NO_VECTOR;
771}
772
773void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
774{
775 if (n < VIRTIO_PCI_QUEUE_MAX)
776 vdev->vq[n].vector = vector;
777}
778
967f97fa
AL
779VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
780 void (*handle_output)(VirtIODevice *, VirtQueue *))
781{
782 int i;
783
784 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
785 if (vdev->vq[i].vring.num == 0)
786 break;
787 }
788
789 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
790 abort();
791
792 vdev->vq[i].vring.num = queue_size;
6ce69d1c 793 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
967f97fa
AL
794 vdev->vq[i].handle_output = handle_output;
795
796 return &vdev->vq[i];
797}
798
f23fd811
JW
799void virtio_del_queue(VirtIODevice *vdev, int n)
800{
801 if (n < 0 || n >= VIRTIO_PCI_QUEUE_MAX) {
802 abort();
803 }
804
805 vdev->vq[n].vring.num = 0;
806}
807
1cbdabe2
MT
808void virtio_irq(VirtQueue *vq)
809{
64979a4d 810 trace_virtio_irq(vq);
1cbdabe2
MT
811 vq->vdev->isr |= 0x01;
812 virtio_notify_vector(vq->vdev, vq->vector);
813}
814
bcbabae8
MT
815/* Assuming a given event_idx value from the other size, if
816 * we have just incremented index from old to new_idx,
817 * should we trigger an event? */
818static inline int vring_need_event(uint16_t event, uint16_t new, uint16_t old)
967f97fa 819{
bcbabae8
MT
820 /* Note: Xen has similar logic for notification hold-off
821 * in include/xen/interface/io/ring.h with req_event and req_prod
822 * corresponding to event_idx + 1 and new respectively.
823 * Note also that req_event and req_prod in Xen start at 1,
824 * event indexes in virtio start at 0. */
825 return (uint16_t)(new - event - 1) < (uint16_t)(new - old);
826}
827
828static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
829{
830 uint16_t old, new;
831 bool v;
a281ebc1
MT
832 /* We need to expose used array entries before checking used event. */
833 smp_mb();
97b83deb 834 /* Always notify when queue is empty (when feature acknowledge) */
bcbabae8
MT
835 if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
836 !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) {
837 return true;
838 }
839
840 if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
841 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
842 }
843
844 v = vq->signalled_used_valid;
845 vq->signalled_used_valid = true;
846 old = vq->signalled_used;
847 new = vq->signalled_used = vring_used_idx(vq);
848 return !v || vring_need_event(vring_used_event(vq), new, old);
849}
850
851void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
852{
853 if (!vring_notify(vdev, vq)) {
967f97fa 854 return;
bcbabae8 855 }
967f97fa 856
64979a4d 857 trace_virtio_notify(vdev, vq);
967f97fa 858 vdev->isr |= 0x01;
7055e687 859 virtio_notify_vector(vdev, vq->vector);
967f97fa
AL
860}
861
862void virtio_notify_config(VirtIODevice *vdev)
863{
7625162c
AL
864 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
865 return;
866
967f97fa 867 vdev->isr |= 0x03;
7055e687 868 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
869}
870
616a6552
GK
871static bool virtio_device_endian_needed(void *opaque)
872{
873 VirtIODevice *vdev = opaque;
874
875 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
876 return vdev->device_endian != virtio_default_endian();
877}
878
879static const VMStateDescription vmstate_virtio_device_endian = {
880 .name = "virtio/device_endian",
881 .version_id = 1,
882 .minimum_version_id = 1,
883 .fields = (VMStateField[]) {
884 VMSTATE_UINT8(device_endian, VirtIODevice),
885 VMSTATE_END_OF_LIST()
886 }
887};
888
6b321a3d
GK
889static const VMStateDescription vmstate_virtio = {
890 .name = "virtio",
891 .version_id = 1,
892 .minimum_version_id = 1,
893 .minimum_version_id_old = 1,
894 .fields = (VMStateField[]) {
895 VMSTATE_END_OF_LIST()
616a6552
GK
896 },
897 .subsections = (VMStateSubsection[]) {
898 {
899 .vmsd = &vmstate_virtio_device_endian,
900 .needed = &virtio_device_endian_needed
901 },
902 { 0 }
6b321a3d
GK
903 }
904};
905
967f97fa
AL
906void virtio_save(VirtIODevice *vdev, QEMUFile *f)
907{
1c819449
FK
908 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
909 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1b5fc0de 910 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
911 int i;
912
1c819449
FK
913 if (k->save_config) {
914 k->save_config(qbus->parent, f);
915 }
967f97fa 916
967f97fa
AL
917 qemu_put_8s(f, &vdev->status);
918 qemu_put_8s(f, &vdev->isr);
919 qemu_put_be16s(f, &vdev->queue_sel);
704a76fc 920 qemu_put_be32s(f, &vdev->guest_features);
967f97fa
AL
921 qemu_put_be32(f, vdev->config_len);
922 qemu_put_buffer(f, vdev->config, vdev->config_len);
923
924 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
925 if (vdev->vq[i].vring.num == 0)
926 break;
927 }
928
929 qemu_put_be32(f, i);
930
931 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
932 if (vdev->vq[i].vring.num == 0)
933 break;
934
935 qemu_put_be32(f, vdev->vq[i].vring.num);
6ce69d1c
PM
936 if (k->has_variable_vring_alignment) {
937 qemu_put_be32(f, vdev->vq[i].vring.align);
938 }
53c25cea 939 qemu_put_be64(f, vdev->vq[i].pa);
967f97fa 940 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1c819449
FK
941 if (k->save_queue) {
942 k->save_queue(qbus->parent, i, f);
943 }
967f97fa 944 }
1b5fc0de
GK
945
946 if (vdc->save != NULL) {
947 vdc->save(vdev, f);
948 }
6b321a3d
GK
949
950 /* Subsections */
951 vmstate_save_state(f, &vmstate_virtio, vdev);
967f97fa
AL
952}
953
ad0c9332
PB
954int virtio_set_features(VirtIODevice *vdev, uint32_t val)
955{
1c819449
FK
956 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
957 VirtioBusClass *vbusk = VIRTIO_BUS_GET_CLASS(qbus);
181103cd 958 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1c819449 959 uint32_t supported_features = vbusk->get_features(qbus->parent);
ad0c9332
PB
960 bool bad = (val & ~supported_features) != 0;
961
962 val &= supported_features;
181103cd
FK
963 if (k->set_features) {
964 k->set_features(vdev, val);
ad0c9332
PB
965 }
966 vdev->guest_features = val;
967 return bad ? -1 : 0;
968}
969
1b5fc0de 970int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
967f97fa 971{
cc459952 972 int i, ret;
a890a2f9 973 int32_t config_len;
cc459952 974 uint32_t num;
6d74ca5a 975 uint32_t features;
ad0c9332 976 uint32_t supported_features;
1c819449
FK
977 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
978 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1b5fc0de 979 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa 980
616a6552
GK
981 /*
982 * We poison the endianness to ensure it does not get used before
983 * subsections have been loaded.
984 */
985 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
986
1c819449
FK
987 if (k->load_config) {
988 ret = k->load_config(qbus->parent, f);
ff24bd58
MT
989 if (ret)
990 return ret;
991 }
967f97fa 992
967f97fa
AL
993 qemu_get_8s(f, &vdev->status);
994 qemu_get_8s(f, &vdev->isr);
995 qemu_get_be16s(f, &vdev->queue_sel);
4b53c2c7
MR
996 if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
997 return -1;
998 }
6d74ca5a 999 qemu_get_be32s(f, &features);
ad0c9332
PB
1000
1001 if (virtio_set_features(vdev, features) < 0) {
1c819449 1002 supported_features = k->get_features(qbus->parent);
ce67ed65
SH
1003 error_report("Features 0x%x unsupported. Allowed features: 0x%x",
1004 features, supported_features);
6d74ca5a
MT
1005 return -1;
1006 }
a890a2f9 1007 config_len = qemu_get_be32(f);
2f5732e9
DDAG
1008
1009 /*
1010 * There are cases where the incoming config can be bigger or smaller
1011 * than what we have; so load what we have space for, and skip
1012 * any excess that's in the stream.
1013 */
1014 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
1015
1016 while (config_len > vdev->config_len) {
1017 qemu_get_byte(f);
1018 config_len--;
a890a2f9 1019 }
967f97fa
AL
1020
1021 num = qemu_get_be32(f);
1022
cc459952
MT
1023 if (num > VIRTIO_PCI_QUEUE_MAX) {
1024 error_report("Invalid number of PCI queues: 0x%x", num);
1025 return -1;
1026 }
1027
967f97fa
AL
1028 for (i = 0; i < num; i++) {
1029 vdev->vq[i].vring.num = qemu_get_be32(f);
6ce69d1c
PM
1030 if (k->has_variable_vring_alignment) {
1031 vdev->vq[i].vring.align = qemu_get_be32(f);
1032 }
53c25cea 1033 vdev->vq[i].pa = qemu_get_be64(f);
967f97fa 1034 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
bcbabae8
MT
1035 vdev->vq[i].signalled_used_valid = false;
1036 vdev->vq[i].notification = true;
967f97fa 1037
53c25cea
PB
1038 if (vdev->vq[i].pa) {
1039 virtqueue_init(&vdev->vq[i]);
1abeb5a6
MT
1040 } else if (vdev->vq[i].last_avail_idx) {
1041 error_report("VQ %d address 0x0 "
6daf194d 1042 "inconsistent with Host index 0x%x",
1abeb5a6
MT
1043 i, vdev->vq[i].last_avail_idx);
1044 return -1;
258dc7c9 1045 }
1c819449
FK
1046 if (k->load_queue) {
1047 ret = k->load_queue(qbus->parent, i, f);
ff24bd58
MT
1048 if (ret)
1049 return ret;
7055e687 1050 }
967f97fa
AL
1051 }
1052
7055e687 1053 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1b5fc0de
GK
1054
1055 if (vdc->load != NULL) {
6b321a3d
GK
1056 ret = vdc->load(vdev, f, version_id);
1057 if (ret) {
1058 return ret;
1059 }
1b5fc0de
GK
1060 }
1061
616a6552
GK
1062 /* Subsections */
1063 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
1064 if (ret) {
1065 return ret;
1066 }
1067
1068 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
1069 vdev->device_endian = virtio_default_endian();
1070 }
1071
1072 for (i = 0; i < num; i++) {
1073 if (vdev->vq[i].pa) {
1074 uint16_t nheads;
1075 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
1076 /* Check it isn't doing strange things with descriptor numbers. */
1077 if (nheads > vdev->vq[i].vring.num) {
1078 error_report("VQ %d size 0x%x Guest index 0x%x "
1079 "inconsistent with Host index 0x%x: delta 0x%x",
1080 i, vdev->vq[i].vring.num,
1081 vring_avail_idx(&vdev->vq[i]),
1082 vdev->vq[i].last_avail_idx, nheads);
1083 return -1;
1084 }
1085 }
1086 }
1087
1088 return 0;
967f97fa
AL
1089}
1090
6a1a8cc7 1091void virtio_cleanup(VirtIODevice *vdev)
b946a153 1092{
85cf2a8d 1093 qemu_del_vm_change_state_handler(vdev->vmstate);
6f79e06b 1094 g_free(vdev->config);
7267c094 1095 g_free(vdev->vq);
8e05db92
FK
1096}
1097
1dfb4dd9 1098static void virtio_vmstate_change(void *opaque, int running, RunState state)
85cf2a8d
MT
1099{
1100 VirtIODevice *vdev = opaque;
1c819449
FK
1101 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1102 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
85cf2a8d
MT
1103 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
1104 vdev->vm_running = running;
1105
1106 if (backend_run) {
1107 virtio_set_status(vdev, vdev->status);
1108 }
1109
1c819449
FK
1110 if (k->vmstate_change) {
1111 k->vmstate_change(qbus->parent, backend_run);
85cf2a8d
MT
1112 }
1113
1114 if (!backend_run) {
1115 virtio_set_status(vdev, vdev->status);
1116 }
1117}
1118
8e05db92
FK
1119void virtio_init(VirtIODevice *vdev, const char *name,
1120 uint16_t device_id, size_t config_size)
967f97fa 1121{
b8193adb 1122 int i;
53c25cea 1123 vdev->device_id = device_id;
967f97fa
AL
1124 vdev->status = 0;
1125 vdev->isr = 0;
1126 vdev->queue_sel = 0;
7055e687 1127 vdev->config_vector = VIRTIO_NO_VECTOR;
7267c094 1128 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
1354869c 1129 vdev->vm_running = runstate_is_running();
8e05db92 1130 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
b8193adb 1131 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1cbdabe2 1132 vdev->vq[i].vdev = vdev;
e78a2b42 1133 vdev->vq[i].queue_index = i;
1cbdabe2 1134 }
967f97fa 1135
967f97fa
AL
1136 vdev->name = name;
1137 vdev->config_len = config_size;
8e05db92 1138 if (vdev->config_len) {
7267c094 1139 vdev->config = g_malloc0(config_size);
8e05db92 1140 } else {
967f97fa 1141 vdev->config = NULL;
8e05db92
FK
1142 }
1143 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
1144 vdev);
616a6552 1145 vdev->device_endian = virtio_default_endian();
8e05db92 1146}
967f97fa 1147
a8170e5e 1148hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
1149{
1150 return vdev->vq[n].vring.desc;
1151}
1152
a8170e5e 1153hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
1154{
1155 return vdev->vq[n].vring.avail;
1156}
1157
a8170e5e 1158hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
1159{
1160 return vdev->vq[n].vring.used;
1161}
1162
a8170e5e 1163hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
1164{
1165 return vdev->vq[n].vring.desc;
1166}
1167
a8170e5e 1168hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
1169{
1170 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
1171}
1172
a8170e5e 1173hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
1174{
1175 return offsetof(VRingAvail, ring) +
2b3af999 1176 sizeof(uint64_t) * vdev->vq[n].vring.num;
1cbdabe2
MT
1177}
1178
a8170e5e 1179hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
1180{
1181 return offsetof(VRingUsed, ring) +
1182 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
1183}
1184
a8170e5e 1185hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
1186{
1187 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
1188 virtio_queue_get_used_size(vdev, n);
1189}
1190
1191uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
1192{
1193 return vdev->vq[n].last_avail_idx;
1194}
1195
1196void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
1197{
1198 vdev->vq[n].last_avail_idx = idx;
1199}
1200
6793dfd1
SH
1201void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
1202{
1203 vdev->vq[n].signalled_used_valid = false;
1204}
1205
1cbdabe2
MT
1206VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
1207{
1208 return vdev->vq + n;
1209}
1210
e78a2b42
JW
1211uint16_t virtio_get_queue_index(VirtQueue *vq)
1212{
1213 return vq->queue_index;
1214}
1215
15b2bd18
PB
1216static void virtio_queue_guest_notifier_read(EventNotifier *n)
1217{
1218 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
1219 if (event_notifier_test_and_clear(n)) {
1220 virtio_irq(vq);
1221 }
1222}
1223
1224void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
1225 bool with_irqfd)
1226{
1227 if (assign && !with_irqfd) {
1228 event_notifier_set_handler(&vq->guest_notifier,
1229 virtio_queue_guest_notifier_read);
1230 } else {
1231 event_notifier_set_handler(&vq->guest_notifier, NULL);
1232 }
1233 if (!assign) {
1234 /* Test and clear notifier before closing it,
1235 * in case poll callback didn't have time to run. */
1236 virtio_queue_guest_notifier_read(&vq->guest_notifier);
1237 }
1238}
1239
1cbdabe2
MT
1240EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
1241{
1242 return &vq->guest_notifier;
1243}
b1f416aa
PB
1244
1245static void virtio_queue_host_notifier_read(EventNotifier *n)
1246{
1247 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
1248 if (event_notifier_test_and_clear(n)) {
1249 virtio_queue_notify_vq(vq);
1250 }
1251}
1252
26b9b5fe
PB
1253void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
1254 bool set_handler)
b1f416aa 1255{
26b9b5fe 1256 if (assign && set_handler) {
b1f416aa
PB
1257 event_notifier_set_handler(&vq->host_notifier,
1258 virtio_queue_host_notifier_read);
1259 } else {
1260 event_notifier_set_handler(&vq->host_notifier, NULL);
26b9b5fe
PB
1261 }
1262 if (!assign) {
b1f416aa
PB
1263 /* Test and clear notifier before after disabling event,
1264 * in case poll callback didn't have time to run. */
1265 virtio_queue_host_notifier_read(&vq->host_notifier);
1266 }
1267}
1268
1cbdabe2
MT
1269EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
1270{
1271 return &vq->host_notifier;
1272}
8e05db92 1273
1034e9cf
FK
1274void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
1275{
9e288406 1276 g_free(vdev->bus_name);
80e0090a 1277 vdev->bus_name = g_strdup(bus_name);
1034e9cf
FK
1278}
1279
1d244b42
AF
1280static void virtio_device_realize(DeviceState *dev, Error **errp)
1281{
1282 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1283 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
1284 Error *err = NULL;
1285
1d244b42
AF
1286 if (vdc->realize != NULL) {
1287 vdc->realize(dev, &err);
1288 if (err != NULL) {
1289 error_propagate(errp, err);
1290 return;
1291 }
8e05db92 1292 }
5e96f5d2 1293 virtio_bus_device_plugged(vdev);
8e05db92
FK
1294}
1295
1d244b42 1296static void virtio_device_unrealize(DeviceState *dev, Error **errp)
1034e9cf 1297{
1d244b42 1298 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
306ec6c3
AF
1299 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
1300 Error *err = NULL;
1d244b42 1301
83d07047
PB
1302 virtio_bus_device_unplugged(vdev);
1303
306ec6c3
AF
1304 if (vdc->unrealize != NULL) {
1305 vdc->unrealize(dev, &err);
1306 if (err != NULL) {
1307 error_propagate(errp, err);
1308 return;
1309 }
5e96f5d2 1310 }
1d244b42 1311
9e288406
MA
1312 g_free(vdev->bus_name);
1313 vdev->bus_name = NULL;
1034e9cf
FK
1314}
1315
8e05db92
FK
1316static void virtio_device_class_init(ObjectClass *klass, void *data)
1317{
1318 /* Set the default value here. */
1319 DeviceClass *dc = DEVICE_CLASS(klass);
1d244b42
AF
1320
1321 dc->realize = virtio_device_realize;
1322 dc->unrealize = virtio_device_unrealize;
8e05db92
FK
1323 dc->bus_type = TYPE_VIRTIO_BUS;
1324}
1325
1326static const TypeInfo virtio_device_info = {
1327 .name = TYPE_VIRTIO_DEVICE,
1328 .parent = TYPE_DEVICE,
1329 .instance_size = sizeof(VirtIODevice),
1330 .class_init = virtio_device_class_init,
1331 .abstract = true,
1332 .class_size = sizeof(VirtioDeviceClass),
1333};
1334
1335static void virtio_register_types(void)
1336{
1337 type_register_static(&virtio_device_info);
1338}
1339
1340type_init(virtio_register_types)