]> git.proxmox.com Git - qemu.git/blame - hw/virtio.c
virtio-pci: Rename bugs field to flags
[qemu.git] / hw / virtio.c
CommitLineData
967f97fa
AL
1/*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include <inttypes.h>
967f97fa 15
64979a4d 16#include "trace.h"
ce67ed65 17#include "qemu-error.h"
967f97fa
AL
18#include "virtio.h"
19#include "sysemu.h"
20
f46f15bc
AL
21/* The alignment to use between consumer and producer parts of vring.
22 * x86 pagesize again. */
23#define VIRTIO_PCI_VRING_ALIGN 4096
24
967f97fa
AL
25/* QEMU doesn't strictly need write barriers since everything runs in
26 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
27 * KVM or if kqemu gets SMP support.
79758e95
MT
28 * In any case, we must prevent the compiler from reordering the code.
29 * TODO: we likely need some rmb()/mb() as well.
967f97fa 30 */
79758e95
MT
31
32#define wmb() __asm__ __volatile__("": : :"memory")
967f97fa
AL
33
34typedef struct VRingDesc
35{
36 uint64_t addr;
37 uint32_t len;
38 uint16_t flags;
39 uint16_t next;
40} VRingDesc;
41
42typedef struct VRingAvail
43{
44 uint16_t flags;
45 uint16_t idx;
46 uint16_t ring[0];
47} VRingAvail;
48
49typedef struct VRingUsedElem
50{
51 uint32_t id;
52 uint32_t len;
53} VRingUsedElem;
54
55typedef struct VRingUsed
56{
57 uint16_t flags;
58 uint16_t idx;
59 VRingUsedElem ring[0];
60} VRingUsed;
61
62typedef struct VRing
63{
64 unsigned int num;
c227f099
AL
65 target_phys_addr_t desc;
66 target_phys_addr_t avail;
67 target_phys_addr_t used;
967f97fa
AL
68} VRing;
69
70struct VirtQueue
71{
72 VRing vring;
c227f099 73 target_phys_addr_t pa;
967f97fa
AL
74 uint16_t last_avail_idx;
75 int inuse;
7055e687 76 uint16_t vector;
967f97fa 77 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
1cbdabe2
MT
78 VirtIODevice *vdev;
79 EventNotifier guest_notifier;
80 EventNotifier host_notifier;
967f97fa
AL
81};
82
967f97fa 83/* virt queue functions */
53c25cea 84static void virtqueue_init(VirtQueue *vq)
967f97fa 85{
c227f099 86 target_phys_addr_t pa = vq->pa;
53c25cea 87
967f97fa
AL
88 vq->vring.desc = pa;
89 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
f46f15bc
AL
90 vq->vring.used = vring_align(vq->vring.avail +
91 offsetof(VRingAvail, ring[vq->vring.num]),
92 VIRTIO_PCI_VRING_ALIGN);
967f97fa
AL
93}
94
c227f099 95static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i)
967f97fa 96{
c227f099 97 target_phys_addr_t pa;
5774cf98 98 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
967f97fa
AL
99 return ldq_phys(pa);
100}
101
c227f099 102static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i)
967f97fa 103{
c227f099 104 target_phys_addr_t pa;
5774cf98 105 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
967f97fa
AL
106 return ldl_phys(pa);
107}
108
c227f099 109static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i)
967f97fa 110{
c227f099 111 target_phys_addr_t pa;
5774cf98 112 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
967f97fa
AL
113 return lduw_phys(pa);
114}
115
c227f099 116static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i)
967f97fa 117{
c227f099 118 target_phys_addr_t pa;
5774cf98 119 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
967f97fa
AL
120 return lduw_phys(pa);
121}
122
123static inline uint16_t vring_avail_flags(VirtQueue *vq)
124{
c227f099 125 target_phys_addr_t pa;
967f97fa
AL
126 pa = vq->vring.avail + offsetof(VRingAvail, flags);
127 return lduw_phys(pa);
128}
129
130static inline uint16_t vring_avail_idx(VirtQueue *vq)
131{
c227f099 132 target_phys_addr_t pa;
967f97fa
AL
133 pa = vq->vring.avail + offsetof(VRingAvail, idx);
134 return lduw_phys(pa);
135}
136
137static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
138{
c227f099 139 target_phys_addr_t pa;
967f97fa
AL
140 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
141 return lduw_phys(pa);
142}
143
144static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
145{
c227f099 146 target_phys_addr_t pa;
967f97fa
AL
147 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
148 stl_phys(pa, val);
149}
150
151static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
152{
c227f099 153 target_phys_addr_t pa;
967f97fa
AL
154 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
155 stl_phys(pa, val);
156}
157
158static uint16_t vring_used_idx(VirtQueue *vq)
159{
c227f099 160 target_phys_addr_t pa;
967f97fa
AL
161 pa = vq->vring.used + offsetof(VRingUsed, idx);
162 return lduw_phys(pa);
163}
164
165static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
166{
c227f099 167 target_phys_addr_t pa;
967f97fa
AL
168 pa = vq->vring.used + offsetof(VRingUsed, idx);
169 stw_phys(pa, vring_used_idx(vq) + val);
170}
171
172static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
173{
c227f099 174 target_phys_addr_t pa;
967f97fa
AL
175 pa = vq->vring.used + offsetof(VRingUsed, flags);
176 stw_phys(pa, lduw_phys(pa) | mask);
177}
178
179static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
180{
c227f099 181 target_phys_addr_t pa;
967f97fa
AL
182 pa = vq->vring.used + offsetof(VRingUsed, flags);
183 stw_phys(pa, lduw_phys(pa) & ~mask);
184}
185
186void virtio_queue_set_notification(VirtQueue *vq, int enable)
187{
188 if (enable)
189 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
190 else
191 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
192}
193
194int virtio_queue_ready(VirtQueue *vq)
195{
196 return vq->vring.avail != 0;
197}
198
199int virtio_queue_empty(VirtQueue *vq)
200{
201 return vring_avail_idx(vq) == vq->last_avail_idx;
202}
203
204void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
205 unsigned int len, unsigned int idx)
206{
207 unsigned int offset;
208 int i;
209
64979a4d
SH
210 trace_virtqueue_fill(vq, elem, len, idx);
211
967f97fa
AL
212 offset = 0;
213 for (i = 0; i < elem->in_num; i++) {
214 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
215
26b258e1
AL
216 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
217 elem->in_sg[i].iov_len,
218 1, size);
967f97fa 219
26b258e1 220 offset += elem->in_sg[i].iov_len;
967f97fa
AL
221 }
222
26b258e1
AL
223 for (i = 0; i < elem->out_num; i++)
224 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
225 elem->out_sg[i].iov_len,
226 0, elem->out_sg[i].iov_len);
227
967f97fa
AL
228 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
229
230 /* Get a pointer to the next entry in the used ring. */
231 vring_used_ring_id(vq, idx, elem->index);
232 vring_used_ring_len(vq, idx, len);
233}
234
235void virtqueue_flush(VirtQueue *vq, unsigned int count)
236{
237 /* Make sure buffer is written before we update index. */
238 wmb();
64979a4d 239 trace_virtqueue_flush(vq, count);
967f97fa
AL
240 vring_used_idx_increment(vq, count);
241 vq->inuse -= count;
242}
243
244void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
245 unsigned int len)
246{
247 virtqueue_fill(vq, elem, len, 0);
248 virtqueue_flush(vq, 1);
249}
250
251static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
252{
253 uint16_t num_heads = vring_avail_idx(vq) - idx;
254
255 /* Check it isn't doing very strange things with descriptor numbers. */
bb6834cf 256 if (num_heads > vq->vring.num) {
ce67ed65
SH
257 error_report("Guest moved used index from %u to %u",
258 idx, vring_avail_idx(vq));
bb6834cf
AL
259 exit(1);
260 }
967f97fa
AL
261
262 return num_heads;
263}
264
265static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
266{
267 unsigned int head;
268
269 /* Grab the next descriptor number they're advertising, and increment
270 * the index we've seen. */
271 head = vring_avail_ring(vq, idx % vq->vring.num);
272
273 /* If their number is silly, that's a fatal mistake. */
bb6834cf 274 if (head >= vq->vring.num) {
ce67ed65 275 error_report("Guest says index %u is available", head);
bb6834cf
AL
276 exit(1);
277 }
967f97fa
AL
278
279 return head;
280}
281
c227f099 282static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
5774cf98 283 unsigned int i, unsigned int max)
967f97fa
AL
284{
285 unsigned int next;
286
287 /* If this descriptor says it doesn't chain, we're done. */
5774cf98
MM
288 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
289 return max;
967f97fa
AL
290
291 /* Check they're not leading us off end of descriptors. */
5774cf98 292 next = vring_desc_next(desc_pa, i);
967f97fa
AL
293 /* Make sure compiler knows to grab that: we don't want it changing! */
294 wmb();
295
5774cf98 296 if (next >= max) {
ce67ed65 297 error_report("Desc next is %u", next);
bb6834cf
AL
298 exit(1);
299 }
967f97fa
AL
300
301 return next;
302}
303
304int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
305{
efeea6d0
MM
306 unsigned int idx;
307 int total_bufs, in_total, out_total;
967f97fa
AL
308
309 idx = vq->last_avail_idx;
310
efeea6d0 311 total_bufs = in_total = out_total = 0;
967f97fa 312 while (virtqueue_num_heads(vq, idx)) {
efeea6d0 313 unsigned int max, num_bufs, indirect = 0;
c227f099 314 target_phys_addr_t desc_pa;
967f97fa
AL
315 int i;
316
efeea6d0
MM
317 max = vq->vring.num;
318 num_bufs = total_bufs;
967f97fa 319 i = virtqueue_get_head(vq, idx++);
efeea6d0
MM
320 desc_pa = vq->vring.desc;
321
322 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
323 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
ce67ed65 324 error_report("Invalid size for indirect buffer table");
efeea6d0
MM
325 exit(1);
326 }
327
328 /* If we've got too many, that implies a descriptor loop. */
329 if (num_bufs >= max) {
ce67ed65 330 error_report("Looped descriptor");
efeea6d0
MM
331 exit(1);
332 }
333
334 /* loop over the indirect descriptor table */
335 indirect = 1;
336 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
337 num_bufs = i = 0;
338 desc_pa = vring_desc_addr(desc_pa, i);
339 }
340
967f97fa
AL
341 do {
342 /* If we've got too many, that implies a descriptor loop. */
5774cf98 343 if (++num_bufs > max) {
ce67ed65 344 error_report("Looped descriptor");
bb6834cf
AL
345 exit(1);
346 }
967f97fa 347
5774cf98 348 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
967f97fa 349 if (in_bytes > 0 &&
5774cf98 350 (in_total += vring_desc_len(desc_pa, i)) >= in_bytes)
967f97fa
AL
351 return 1;
352 } else {
353 if (out_bytes > 0 &&
5774cf98 354 (out_total += vring_desc_len(desc_pa, i)) >= out_bytes)
967f97fa
AL
355 return 1;
356 }
5774cf98 357 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
efeea6d0
MM
358
359 if (!indirect)
360 total_bufs = num_bufs;
361 else
362 total_bufs++;
967f97fa
AL
363 }
364
365 return 0;
366}
367
42fb2e07
KW
368void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
369 size_t num_sg, int is_write)
370{
371 unsigned int i;
372 target_phys_addr_t len;
373
374 for (i = 0; i < num_sg; i++) {
375 len = sg[i].iov_len;
376 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
377 if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
ce67ed65 378 error_report("virtio: trying to map MMIO memory");
42fb2e07
KW
379 exit(1);
380 }
381 }
382}
383
967f97fa
AL
384int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
385{
5774cf98 386 unsigned int i, head, max;
c227f099 387 target_phys_addr_t desc_pa = vq->vring.desc;
967f97fa
AL
388
389 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
390 return 0;
391
392 /* When we start there are none of either input nor output. */
393 elem->out_num = elem->in_num = 0;
394
5774cf98
MM
395 max = vq->vring.num;
396
967f97fa 397 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
efeea6d0
MM
398
399 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
400 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
ce67ed65 401 error_report("Invalid size for indirect buffer table");
efeea6d0
MM
402 exit(1);
403 }
404
405 /* loop over the indirect descriptor table */
406 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
407 desc_pa = vring_desc_addr(desc_pa, i);
408 i = 0;
409 }
410
42fb2e07 411 /* Collect all the descriptors */
967f97fa
AL
412 do {
413 struct iovec *sg;
414
5774cf98
MM
415 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
416 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
967f97fa 417 sg = &elem->in_sg[elem->in_num++];
42fb2e07
KW
418 } else {
419 elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
967f97fa 420 sg = &elem->out_sg[elem->out_num++];
42fb2e07 421 }
967f97fa 422
5774cf98 423 sg->iov_len = vring_desc_len(desc_pa, i);
967f97fa
AL
424
425 /* If we've got too many, that implies a descriptor loop. */
5774cf98 426 if ((elem->in_num + elem->out_num) > max) {
ce67ed65 427 error_report("Looped descriptor");
bb6834cf
AL
428 exit(1);
429 }
5774cf98 430 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
967f97fa 431
42fb2e07
KW
432 /* Now map what we have collected */
433 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
434 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
435
967f97fa
AL
436 elem->index = head;
437
438 vq->inuse++;
439
64979a4d 440 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
967f97fa
AL
441 return elem->in_num + elem->out_num;
442}
443
444/* virtio device */
7055e687
MT
445static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
446{
447 if (vdev->binding->notify) {
448 vdev->binding->notify(vdev->binding_opaque, vector);
449 }
450}
967f97fa 451
53c25cea 452void virtio_update_irq(VirtIODevice *vdev)
967f97fa 453{
7055e687 454 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
967f97fa
AL
455}
456
53c25cea 457void virtio_reset(void *opaque)
967f97fa
AL
458{
459 VirtIODevice *vdev = opaque;
460 int i;
461
e0c472d8
MT
462 virtio_set_status(vdev, 0);
463
967f97fa
AL
464 if (vdev->reset)
465 vdev->reset(vdev);
466
704a76fc 467 vdev->guest_features = 0;
967f97fa
AL
468 vdev->queue_sel = 0;
469 vdev->status = 0;
470 vdev->isr = 0;
7055e687
MT
471 vdev->config_vector = VIRTIO_NO_VECTOR;
472 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
473
474 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
475 vdev->vq[i].vring.desc = 0;
476 vdev->vq[i].vring.avail = 0;
477 vdev->vq[i].vring.used = 0;
478 vdev->vq[i].last_avail_idx = 0;
53c25cea 479 vdev->vq[i].pa = 0;
7055e687 480 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
967f97fa
AL
481 }
482}
483
53c25cea 484uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
967f97fa 485{
967f97fa
AL
486 uint8_t val;
487
488 vdev->get_config(vdev, vdev->config);
489
967f97fa
AL
490 if (addr > (vdev->config_len - sizeof(val)))
491 return (uint32_t)-1;
492
493 memcpy(&val, vdev->config + addr, sizeof(val));
494 return val;
495}
496
53c25cea 497uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
967f97fa 498{
967f97fa
AL
499 uint16_t val;
500
501 vdev->get_config(vdev, vdev->config);
502
967f97fa
AL
503 if (addr > (vdev->config_len - sizeof(val)))
504 return (uint32_t)-1;
505
506 memcpy(&val, vdev->config + addr, sizeof(val));
507 return val;
508}
509
53c25cea 510uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
967f97fa 511{
967f97fa
AL
512 uint32_t val;
513
514 vdev->get_config(vdev, vdev->config);
515
967f97fa
AL
516 if (addr > (vdev->config_len - sizeof(val)))
517 return (uint32_t)-1;
518
519 memcpy(&val, vdev->config + addr, sizeof(val));
520 return val;
521}
522
53c25cea 523void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 524{
967f97fa
AL
525 uint8_t val = data;
526
967f97fa
AL
527 if (addr > (vdev->config_len - sizeof(val)))
528 return;
529
530 memcpy(vdev->config + addr, &val, sizeof(val));
531
532 if (vdev->set_config)
533 vdev->set_config(vdev, vdev->config);
534}
535
53c25cea 536void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 537{
967f97fa
AL
538 uint16_t val = data;
539
967f97fa
AL
540 if (addr > (vdev->config_len - sizeof(val)))
541 return;
542
543 memcpy(vdev->config + addr, &val, sizeof(val));
544
545 if (vdev->set_config)
546 vdev->set_config(vdev, vdev->config);
547}
548
53c25cea 549void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 550{
967f97fa
AL
551 uint32_t val = data;
552
967f97fa
AL
553 if (addr > (vdev->config_len - sizeof(val)))
554 return;
555
556 memcpy(vdev->config + addr, &val, sizeof(val));
557
558 if (vdev->set_config)
559 vdev->set_config(vdev, vdev->config);
560}
561
c227f099 562void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr)
967f97fa 563{
7055e687
MT
564 vdev->vq[n].pa = addr;
565 virtqueue_init(&vdev->vq[n]);
53c25cea
PB
566}
567
c227f099 568target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
53c25cea
PB
569{
570 return vdev->vq[n].pa;
571}
572
573int virtio_queue_get_num(VirtIODevice *vdev, int n)
574{
575 return vdev->vq[n].vring.num;
576}
967f97fa 577
53c25cea
PB
578void virtio_queue_notify(VirtIODevice *vdev, int n)
579{
580 if (n < VIRTIO_PCI_QUEUE_MAX && vdev->vq[n].vring.desc) {
64979a4d 581 trace_virtio_queue_notify(vdev, n, &vdev->vq[n]);
53c25cea 582 vdev->vq[n].handle_output(vdev, &vdev->vq[n]);
967f97fa
AL
583 }
584}
585
7055e687
MT
586uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
587{
588 return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
589 VIRTIO_NO_VECTOR;
590}
591
592void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
593{
594 if (n < VIRTIO_PCI_QUEUE_MAX)
595 vdev->vq[n].vector = vector;
596}
597
967f97fa
AL
598VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
599 void (*handle_output)(VirtIODevice *, VirtQueue *))
600{
601 int i;
602
603 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
604 if (vdev->vq[i].vring.num == 0)
605 break;
606 }
607
608 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
609 abort();
610
611 vdev->vq[i].vring.num = queue_size;
612 vdev->vq[i].handle_output = handle_output;
613
614 return &vdev->vq[i];
615}
616
1cbdabe2
MT
617void virtio_irq(VirtQueue *vq)
618{
64979a4d 619 trace_virtio_irq(vq);
1cbdabe2
MT
620 vq->vdev->isr |= 0x01;
621 virtio_notify_vector(vq->vdev, vq->vector);
622}
623
967f97fa
AL
624void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
625{
97b83deb
AL
626 /* Always notify when queue is empty (when feature acknowledge) */
627 if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
704a76fc 628 (!(vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
97b83deb 629 (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx)))
967f97fa
AL
630 return;
631
64979a4d 632 trace_virtio_notify(vdev, vq);
967f97fa 633 vdev->isr |= 0x01;
7055e687 634 virtio_notify_vector(vdev, vq->vector);
967f97fa
AL
635}
636
637void virtio_notify_config(VirtIODevice *vdev)
638{
7625162c
AL
639 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
640 return;
641
967f97fa 642 vdev->isr |= 0x03;
7055e687 643 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
644}
645
646void virtio_save(VirtIODevice *vdev, QEMUFile *f)
647{
648 int i;
649
ff24bd58
MT
650 if (vdev->binding->save_config)
651 vdev->binding->save_config(vdev->binding_opaque, f);
967f97fa 652
967f97fa
AL
653 qemu_put_8s(f, &vdev->status);
654 qemu_put_8s(f, &vdev->isr);
655 qemu_put_be16s(f, &vdev->queue_sel);
704a76fc 656 qemu_put_be32s(f, &vdev->guest_features);
967f97fa
AL
657 qemu_put_be32(f, vdev->config_len);
658 qemu_put_buffer(f, vdev->config, vdev->config_len);
659
660 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
661 if (vdev->vq[i].vring.num == 0)
662 break;
663 }
664
665 qemu_put_be32(f, i);
666
667 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
668 if (vdev->vq[i].vring.num == 0)
669 break;
670
671 qemu_put_be32(f, vdev->vq[i].vring.num);
53c25cea 672 qemu_put_be64(f, vdev->vq[i].pa);
967f97fa 673 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
ff24bd58
MT
674 if (vdev->binding->save_queue)
675 vdev->binding->save_queue(vdev->binding_opaque, i, f);
967f97fa
AL
676 }
677}
678
ff24bd58 679int virtio_load(VirtIODevice *vdev, QEMUFile *f)
967f97fa 680{
ff24bd58 681 int num, i, ret;
6d74ca5a 682 uint32_t features;
8172539d 683 uint32_t supported_features =
6d74ca5a 684 vdev->binding->get_features(vdev->binding_opaque);
967f97fa 685
ff24bd58
MT
686 if (vdev->binding->load_config) {
687 ret = vdev->binding->load_config(vdev->binding_opaque, f);
688 if (ret)
689 return ret;
690 }
967f97fa 691
967f97fa
AL
692 qemu_get_8s(f, &vdev->status);
693 qemu_get_8s(f, &vdev->isr);
694 qemu_get_be16s(f, &vdev->queue_sel);
6d74ca5a
MT
695 qemu_get_be32s(f, &features);
696 if (features & ~supported_features) {
ce67ed65
SH
697 error_report("Features 0x%x unsupported. Allowed features: 0x%x",
698 features, supported_features);
6d74ca5a
MT
699 return -1;
700 }
fae054b0
MT
701 if (vdev->set_features)
702 vdev->set_features(vdev, features);
704a76fc 703 vdev->guest_features = features;
967f97fa
AL
704 vdev->config_len = qemu_get_be32(f);
705 qemu_get_buffer(f, vdev->config, vdev->config_len);
706
707 num = qemu_get_be32(f);
708
709 for (i = 0; i < num; i++) {
710 vdev->vq[i].vring.num = qemu_get_be32(f);
53c25cea 711 vdev->vq[i].pa = qemu_get_be64(f);
967f97fa
AL
712 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
713
53c25cea 714 if (vdev->vq[i].pa) {
1abeb5a6 715 uint16_t nheads;
53c25cea 716 virtqueue_init(&vdev->vq[i]);
1abeb5a6
MT
717 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
718 /* Check it isn't doing very strange things with descriptor numbers. */
719 if (nheads > vdev->vq[i].vring.num) {
720 error_report("VQ %d size 0x%x Guest index 0x%x "
721 "inconsistent with Host index 0x%x: delta 0x%x\n",
722 i, vdev->vq[i].vring.num,
723 vring_avail_idx(&vdev->vq[i]),
724 vdev->vq[i].last_avail_idx, nheads);
725 return -1;
726 }
727 } else if (vdev->vq[i].last_avail_idx) {
728 error_report("VQ %d address 0x0 "
729 "inconsistent with Host index 0x%x\n",
730 i, vdev->vq[i].last_avail_idx);
731 return -1;
258dc7c9 732 }
ff24bd58
MT
733 if (vdev->binding->load_queue) {
734 ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
735 if (ret)
736 return ret;
7055e687 737 }
967f97fa
AL
738 }
739
7055e687 740 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
ff24bd58 741 return 0;
967f97fa
AL
742}
743
b946a153
AL
744void virtio_cleanup(VirtIODevice *vdev)
745{
746 if (vdev->config)
747 qemu_free(vdev->config);
748 qemu_free(vdev->vq);
749}
750
53c25cea
PB
751VirtIODevice *virtio_common_init(const char *name, uint16_t device_id,
752 size_t config_size, size_t struct_size)
967f97fa
AL
753{
754 VirtIODevice *vdev;
b8193adb 755 int i;
967f97fa 756
53c25cea 757 vdev = qemu_mallocz(struct_size);
967f97fa 758
53c25cea 759 vdev->device_id = device_id;
967f97fa
AL
760 vdev->status = 0;
761 vdev->isr = 0;
762 vdev->queue_sel = 0;
7055e687 763 vdev->config_vector = VIRTIO_NO_VECTOR;
967f97fa 764 vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
1cbdabe2 765 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
b8193adb 766 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1cbdabe2
MT
767 vdev->vq[i].vdev = vdev;
768 }
967f97fa 769
967f97fa
AL
770 vdev->name = name;
771 vdev->config_len = config_size;
772 if (vdev->config_len)
773 vdev->config = qemu_mallocz(config_size);
774 else
775 vdev->config = NULL;
776
967f97fa
AL
777 return vdev;
778}
53c25cea
PB
779
780void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding,
781 void *opaque)
782{
783 vdev->binding = binding;
784 vdev->binding_opaque = opaque;
785}
1cbdabe2
MT
786
787target_phys_addr_t virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
788{
789 return vdev->vq[n].vring.desc;
790}
791
792target_phys_addr_t virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
793{
794 return vdev->vq[n].vring.avail;
795}
796
797target_phys_addr_t virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
798{
799 return vdev->vq[n].vring.used;
800}
801
802target_phys_addr_t virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
803{
804 return vdev->vq[n].vring.desc;
805}
806
807target_phys_addr_t virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
808{
809 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
810}
811
812target_phys_addr_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
813{
814 return offsetof(VRingAvail, ring) +
2b3af999 815 sizeof(uint64_t) * vdev->vq[n].vring.num;
1cbdabe2
MT
816}
817
818target_phys_addr_t virtio_queue_get_used_size(VirtIODevice *vdev, int n)
819{
820 return offsetof(VRingUsed, ring) +
821 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
822}
823
824target_phys_addr_t virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
825{
826 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
827 virtio_queue_get_used_size(vdev, n);
828}
829
830uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
831{
832 return vdev->vq[n].last_avail_idx;
833}
834
835void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
836{
837 vdev->vq[n].last_avail_idx = idx;
838}
839
840VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
841{
842 return vdev->vq + n;
843}
844
845EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
846{
847 return &vq->guest_notifier;
848}
849EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
850{
851 return &vq->host_notifier;
852}