]> git.proxmox.com Git - qemu.git/blob - hw/virtio.c
Merge remote branch 'mst/for_anthony' into staging
[qemu.git] / hw / virtio.c
1 /*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include <inttypes.h>
15
16 #include "virtio.h"
17 #include "sysemu.h"
18
19 /* The alignment to use between consumer and producer parts of vring.
20 * x86 pagesize again. */
21 #define VIRTIO_PCI_VRING_ALIGN 4096
22
23 /* QEMU doesn't strictly need write barriers since everything runs in
24 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
25 * KVM or if kqemu gets SMP support.
26 * In any case, we must prevent the compiler from reordering the code.
27 * TODO: we likely need some rmb()/mb() as well.
28 */
29
30 #define wmb() __asm__ __volatile__("": : :"memory")
31
32 typedef struct VRingDesc
33 {
34 uint64_t addr;
35 uint32_t len;
36 uint16_t flags;
37 uint16_t next;
38 } VRingDesc;
39
40 typedef struct VRingAvail
41 {
42 uint16_t flags;
43 uint16_t idx;
44 uint16_t ring[0];
45 } VRingAvail;
46
47 typedef struct VRingUsedElem
48 {
49 uint32_t id;
50 uint32_t len;
51 } VRingUsedElem;
52
53 typedef struct VRingUsed
54 {
55 uint16_t flags;
56 uint16_t idx;
57 VRingUsedElem ring[0];
58 } VRingUsed;
59
60 typedef struct VRing
61 {
62 unsigned int num;
63 target_phys_addr_t desc;
64 target_phys_addr_t avail;
65 target_phys_addr_t used;
66 } VRing;
67
68 struct VirtQueue
69 {
70 VRing vring;
71 target_phys_addr_t pa;
72 uint16_t last_avail_idx;
73 int inuse;
74 uint16_t vector;
75 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
76 VirtIODevice *vdev;
77 EventNotifier guest_notifier;
78 EventNotifier host_notifier;
79 };
80
81 /* virt queue functions */
82 static void virtqueue_init(VirtQueue *vq)
83 {
84 target_phys_addr_t pa = vq->pa;
85
86 vq->vring.desc = pa;
87 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
88 vq->vring.used = vring_align(vq->vring.avail +
89 offsetof(VRingAvail, ring[vq->vring.num]),
90 VIRTIO_PCI_VRING_ALIGN);
91 }
92
93 static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i)
94 {
95 target_phys_addr_t pa;
96 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
97 return ldq_phys(pa);
98 }
99
100 static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i)
101 {
102 target_phys_addr_t pa;
103 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
104 return ldl_phys(pa);
105 }
106
107 static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i)
108 {
109 target_phys_addr_t pa;
110 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
111 return lduw_phys(pa);
112 }
113
114 static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i)
115 {
116 target_phys_addr_t pa;
117 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
118 return lduw_phys(pa);
119 }
120
121 static inline uint16_t vring_avail_flags(VirtQueue *vq)
122 {
123 target_phys_addr_t pa;
124 pa = vq->vring.avail + offsetof(VRingAvail, flags);
125 return lduw_phys(pa);
126 }
127
128 static inline uint16_t vring_avail_idx(VirtQueue *vq)
129 {
130 target_phys_addr_t pa;
131 pa = vq->vring.avail + offsetof(VRingAvail, idx);
132 return lduw_phys(pa);
133 }
134
135 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
136 {
137 target_phys_addr_t pa;
138 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
139 return lduw_phys(pa);
140 }
141
142 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
143 {
144 target_phys_addr_t pa;
145 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
146 stl_phys(pa, val);
147 }
148
149 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
150 {
151 target_phys_addr_t pa;
152 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
153 stl_phys(pa, val);
154 }
155
156 static uint16_t vring_used_idx(VirtQueue *vq)
157 {
158 target_phys_addr_t pa;
159 pa = vq->vring.used + offsetof(VRingUsed, idx);
160 return lduw_phys(pa);
161 }
162
163 static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
164 {
165 target_phys_addr_t pa;
166 pa = vq->vring.used + offsetof(VRingUsed, idx);
167 stw_phys(pa, vring_used_idx(vq) + val);
168 }
169
170 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
171 {
172 target_phys_addr_t pa;
173 pa = vq->vring.used + offsetof(VRingUsed, flags);
174 stw_phys(pa, lduw_phys(pa) | mask);
175 }
176
177 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
178 {
179 target_phys_addr_t pa;
180 pa = vq->vring.used + offsetof(VRingUsed, flags);
181 stw_phys(pa, lduw_phys(pa) & ~mask);
182 }
183
184 void virtio_queue_set_notification(VirtQueue *vq, int enable)
185 {
186 if (enable)
187 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
188 else
189 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
190 }
191
192 int virtio_queue_ready(VirtQueue *vq)
193 {
194 return vq->vring.avail != 0;
195 }
196
197 int virtio_queue_empty(VirtQueue *vq)
198 {
199 return vring_avail_idx(vq) == vq->last_avail_idx;
200 }
201
202 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
203 unsigned int len, unsigned int idx)
204 {
205 unsigned int offset;
206 int i;
207
208 offset = 0;
209 for (i = 0; i < elem->in_num; i++) {
210 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
211
212 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
213 elem->in_sg[i].iov_len,
214 1, size);
215
216 offset += elem->in_sg[i].iov_len;
217 }
218
219 for (i = 0; i < elem->out_num; i++)
220 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
221 elem->out_sg[i].iov_len,
222 0, elem->out_sg[i].iov_len);
223
224 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
225
226 /* Get a pointer to the next entry in the used ring. */
227 vring_used_ring_id(vq, idx, elem->index);
228 vring_used_ring_len(vq, idx, len);
229 }
230
231 void virtqueue_flush(VirtQueue *vq, unsigned int count)
232 {
233 /* Make sure buffer is written before we update index. */
234 wmb();
235 vring_used_idx_increment(vq, count);
236 vq->inuse -= count;
237 }
238
239 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
240 unsigned int len)
241 {
242 virtqueue_fill(vq, elem, len, 0);
243 virtqueue_flush(vq, 1);
244 }
245
246 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
247 {
248 uint16_t num_heads = vring_avail_idx(vq) - idx;
249
250 /* Check it isn't doing very strange things with descriptor numbers. */
251 if (num_heads > vq->vring.num) {
252 fprintf(stderr, "Guest moved used index from %u to %u",
253 idx, vring_avail_idx(vq));
254 exit(1);
255 }
256
257 return num_heads;
258 }
259
260 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
261 {
262 unsigned int head;
263
264 /* Grab the next descriptor number they're advertising, and increment
265 * the index we've seen. */
266 head = vring_avail_ring(vq, idx % vq->vring.num);
267
268 /* If their number is silly, that's a fatal mistake. */
269 if (head >= vq->vring.num) {
270 fprintf(stderr, "Guest says index %u is available", head);
271 exit(1);
272 }
273
274 return head;
275 }
276
277 static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
278 unsigned int i, unsigned int max)
279 {
280 unsigned int next;
281
282 /* If this descriptor says it doesn't chain, we're done. */
283 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
284 return max;
285
286 /* Check they're not leading us off end of descriptors. */
287 next = vring_desc_next(desc_pa, i);
288 /* Make sure compiler knows to grab that: we don't want it changing! */
289 wmb();
290
291 if (next >= max) {
292 fprintf(stderr, "Desc next is %u", next);
293 exit(1);
294 }
295
296 return next;
297 }
298
299 int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
300 {
301 unsigned int idx;
302 int total_bufs, in_total, out_total;
303
304 idx = vq->last_avail_idx;
305
306 total_bufs = in_total = out_total = 0;
307 while (virtqueue_num_heads(vq, idx)) {
308 unsigned int max, num_bufs, indirect = 0;
309 target_phys_addr_t desc_pa;
310 int i;
311
312 max = vq->vring.num;
313 num_bufs = total_bufs;
314 i = virtqueue_get_head(vq, idx++);
315 desc_pa = vq->vring.desc;
316
317 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
318 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
319 fprintf(stderr, "Invalid size for indirect buffer table\n");
320 exit(1);
321 }
322
323 /* If we've got too many, that implies a descriptor loop. */
324 if (num_bufs >= max) {
325 fprintf(stderr, "Looped descriptor");
326 exit(1);
327 }
328
329 /* loop over the indirect descriptor table */
330 indirect = 1;
331 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
332 num_bufs = i = 0;
333 desc_pa = vring_desc_addr(desc_pa, i);
334 }
335
336 do {
337 /* If we've got too many, that implies a descriptor loop. */
338 if (++num_bufs > max) {
339 fprintf(stderr, "Looped descriptor");
340 exit(1);
341 }
342
343 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
344 if (in_bytes > 0 &&
345 (in_total += vring_desc_len(desc_pa, i)) >= in_bytes)
346 return 1;
347 } else {
348 if (out_bytes > 0 &&
349 (out_total += vring_desc_len(desc_pa, i)) >= out_bytes)
350 return 1;
351 }
352 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
353
354 if (!indirect)
355 total_bufs = num_bufs;
356 else
357 total_bufs++;
358 }
359
360 return 0;
361 }
362
363 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
364 {
365 unsigned int i, head, max;
366 target_phys_addr_t desc_pa = vq->vring.desc;
367 target_phys_addr_t len;
368
369 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
370 return 0;
371
372 /* When we start there are none of either input nor output. */
373 elem->out_num = elem->in_num = 0;
374
375 max = vq->vring.num;
376
377 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
378
379 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
380 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
381 fprintf(stderr, "Invalid size for indirect buffer table\n");
382 exit(1);
383 }
384
385 /* loop over the indirect descriptor table */
386 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
387 desc_pa = vring_desc_addr(desc_pa, i);
388 i = 0;
389 }
390
391 do {
392 struct iovec *sg;
393 int is_write = 0;
394
395 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
396 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
397 sg = &elem->in_sg[elem->in_num++];
398 is_write = 1;
399 } else
400 sg = &elem->out_sg[elem->out_num++];
401
402 /* Grab the first descriptor, and check it's OK. */
403 sg->iov_len = vring_desc_len(desc_pa, i);
404 len = sg->iov_len;
405
406 sg->iov_base = cpu_physical_memory_map(vring_desc_addr(desc_pa, i),
407 &len, is_write);
408
409 if (sg->iov_base == NULL || len != sg->iov_len) {
410 fprintf(stderr, "virtio: trying to map MMIO memory\n");
411 exit(1);
412 }
413
414 /* If we've got too many, that implies a descriptor loop. */
415 if ((elem->in_num + elem->out_num) > max) {
416 fprintf(stderr, "Looped descriptor");
417 exit(1);
418 }
419 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
420
421 elem->index = head;
422
423 vq->inuse++;
424
425 return elem->in_num + elem->out_num;
426 }
427
428 /* virtio device */
429 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
430 {
431 if (vdev->binding->notify) {
432 vdev->binding->notify(vdev->binding_opaque, vector);
433 }
434 }
435
436 void virtio_update_irq(VirtIODevice *vdev)
437 {
438 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
439 }
440
441 void virtio_reset(void *opaque)
442 {
443 VirtIODevice *vdev = opaque;
444 int i;
445
446 if (vdev->reset)
447 vdev->reset(vdev);
448
449 vdev->guest_features = 0;
450 vdev->queue_sel = 0;
451 vdev->status = 0;
452 vdev->isr = 0;
453 vdev->config_vector = VIRTIO_NO_VECTOR;
454 virtio_notify_vector(vdev, vdev->config_vector);
455
456 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
457 vdev->vq[i].vring.desc = 0;
458 vdev->vq[i].vring.avail = 0;
459 vdev->vq[i].vring.used = 0;
460 vdev->vq[i].last_avail_idx = 0;
461 vdev->vq[i].pa = 0;
462 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
463 }
464 }
465
466 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
467 {
468 uint8_t val;
469
470 vdev->get_config(vdev, vdev->config);
471
472 if (addr > (vdev->config_len - sizeof(val)))
473 return (uint32_t)-1;
474
475 memcpy(&val, vdev->config + addr, sizeof(val));
476 return val;
477 }
478
479 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
480 {
481 uint16_t val;
482
483 vdev->get_config(vdev, vdev->config);
484
485 if (addr > (vdev->config_len - sizeof(val)))
486 return (uint32_t)-1;
487
488 memcpy(&val, vdev->config + addr, sizeof(val));
489 return val;
490 }
491
492 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
493 {
494 uint32_t val;
495
496 vdev->get_config(vdev, vdev->config);
497
498 if (addr > (vdev->config_len - sizeof(val)))
499 return (uint32_t)-1;
500
501 memcpy(&val, vdev->config + addr, sizeof(val));
502 return val;
503 }
504
505 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
506 {
507 uint8_t val = data;
508
509 if (addr > (vdev->config_len - sizeof(val)))
510 return;
511
512 memcpy(vdev->config + addr, &val, sizeof(val));
513
514 if (vdev->set_config)
515 vdev->set_config(vdev, vdev->config);
516 }
517
518 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
519 {
520 uint16_t val = data;
521
522 if (addr > (vdev->config_len - sizeof(val)))
523 return;
524
525 memcpy(vdev->config + addr, &val, sizeof(val));
526
527 if (vdev->set_config)
528 vdev->set_config(vdev, vdev->config);
529 }
530
531 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
532 {
533 uint32_t val = data;
534
535 if (addr > (vdev->config_len - sizeof(val)))
536 return;
537
538 memcpy(vdev->config + addr, &val, sizeof(val));
539
540 if (vdev->set_config)
541 vdev->set_config(vdev, vdev->config);
542 }
543
544 void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr)
545 {
546 vdev->vq[n].pa = addr;
547 virtqueue_init(&vdev->vq[n]);
548 }
549
550 target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
551 {
552 return vdev->vq[n].pa;
553 }
554
555 int virtio_queue_get_num(VirtIODevice *vdev, int n)
556 {
557 return vdev->vq[n].vring.num;
558 }
559
560 void virtio_queue_notify(VirtIODevice *vdev, int n)
561 {
562 if (n < VIRTIO_PCI_QUEUE_MAX && vdev->vq[n].vring.desc) {
563 vdev->vq[n].handle_output(vdev, &vdev->vq[n]);
564 }
565 }
566
567 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
568 {
569 return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
570 VIRTIO_NO_VECTOR;
571 }
572
573 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
574 {
575 if (n < VIRTIO_PCI_QUEUE_MAX)
576 vdev->vq[n].vector = vector;
577 }
578
579 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
580 void (*handle_output)(VirtIODevice *, VirtQueue *))
581 {
582 int i;
583
584 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
585 if (vdev->vq[i].vring.num == 0)
586 break;
587 }
588
589 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
590 abort();
591
592 vdev->vq[i].vring.num = queue_size;
593 vdev->vq[i].handle_output = handle_output;
594
595 return &vdev->vq[i];
596 }
597
598 void virtio_irq(VirtQueue *vq)
599 {
600 vq->vdev->isr |= 0x01;
601 virtio_notify_vector(vq->vdev, vq->vector);
602 }
603
604 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
605 {
606 /* Always notify when queue is empty (when feature acknowledge) */
607 if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
608 (!(vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
609 (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx)))
610 return;
611
612 vdev->isr |= 0x01;
613 virtio_notify_vector(vdev, vq->vector);
614 }
615
616 void virtio_notify_config(VirtIODevice *vdev)
617 {
618 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
619 return;
620
621 vdev->isr |= 0x03;
622 virtio_notify_vector(vdev, vdev->config_vector);
623 }
624
625 void virtio_save(VirtIODevice *vdev, QEMUFile *f)
626 {
627 int i;
628
629 if (vdev->binding->save_config)
630 vdev->binding->save_config(vdev->binding_opaque, f);
631
632 qemu_put_8s(f, &vdev->status);
633 qemu_put_8s(f, &vdev->isr);
634 qemu_put_be16s(f, &vdev->queue_sel);
635 qemu_put_be32s(f, &vdev->guest_features);
636 qemu_put_be32(f, vdev->config_len);
637 qemu_put_buffer(f, vdev->config, vdev->config_len);
638
639 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
640 if (vdev->vq[i].vring.num == 0)
641 break;
642 }
643
644 qemu_put_be32(f, i);
645
646 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
647 if (vdev->vq[i].vring.num == 0)
648 break;
649
650 qemu_put_be32(f, vdev->vq[i].vring.num);
651 qemu_put_be64(f, vdev->vq[i].pa);
652 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
653 if (vdev->binding->save_queue)
654 vdev->binding->save_queue(vdev->binding_opaque, i, f);
655 }
656 }
657
658 int virtio_load(VirtIODevice *vdev, QEMUFile *f)
659 {
660 int num, i, ret;
661 uint32_t features;
662 uint32_t supported_features =
663 vdev->binding->get_features(vdev->binding_opaque);
664
665 if (vdev->binding->load_config) {
666 ret = vdev->binding->load_config(vdev->binding_opaque, f);
667 if (ret)
668 return ret;
669 }
670
671 qemu_get_8s(f, &vdev->status);
672 qemu_get_8s(f, &vdev->isr);
673 qemu_get_be16s(f, &vdev->queue_sel);
674 qemu_get_be32s(f, &features);
675 if (features & ~supported_features) {
676 fprintf(stderr, "Features 0x%x unsupported. Allowed features: 0x%x\n",
677 features, supported_features);
678 return -1;
679 }
680 if (vdev->set_features)
681 vdev->set_features(vdev, features);
682 vdev->guest_features = features;
683 vdev->config_len = qemu_get_be32(f);
684 qemu_get_buffer(f, vdev->config, vdev->config_len);
685
686 num = qemu_get_be32(f);
687
688 for (i = 0; i < num; i++) {
689 vdev->vq[i].vring.num = qemu_get_be32(f);
690 vdev->vq[i].pa = qemu_get_be64(f);
691 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
692
693 if (vdev->vq[i].pa) {
694 virtqueue_init(&vdev->vq[i]);
695 }
696 if (vdev->binding->load_queue) {
697 ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
698 if (ret)
699 return ret;
700 }
701 }
702
703 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
704 return 0;
705 }
706
707 void virtio_cleanup(VirtIODevice *vdev)
708 {
709 if (vdev->config)
710 qemu_free(vdev->config);
711 qemu_free(vdev->vq);
712 }
713
714 VirtIODevice *virtio_common_init(const char *name, uint16_t device_id,
715 size_t config_size, size_t struct_size)
716 {
717 VirtIODevice *vdev;
718 int i;
719
720 vdev = qemu_mallocz(struct_size);
721
722 vdev->device_id = device_id;
723 vdev->status = 0;
724 vdev->isr = 0;
725 vdev->queue_sel = 0;
726 vdev->config_vector = VIRTIO_NO_VECTOR;
727 vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
728 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
729 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
730 vdev->vq[i].vdev = vdev;
731 }
732
733 vdev->name = name;
734 vdev->config_len = config_size;
735 if (vdev->config_len)
736 vdev->config = qemu_mallocz(config_size);
737 else
738 vdev->config = NULL;
739
740 return vdev;
741 }
742
743 void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding,
744 void *opaque)
745 {
746 vdev->binding = binding;
747 vdev->binding_opaque = opaque;
748 }
749
750 target_phys_addr_t virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
751 {
752 return vdev->vq[n].vring.desc;
753 }
754
755 target_phys_addr_t virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
756 {
757 return vdev->vq[n].vring.avail;
758 }
759
760 target_phys_addr_t virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
761 {
762 return vdev->vq[n].vring.used;
763 }
764
765 target_phys_addr_t virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
766 {
767 return vdev->vq[n].vring.desc;
768 }
769
770 target_phys_addr_t virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
771 {
772 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
773 }
774
775 target_phys_addr_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
776 {
777 return offsetof(VRingAvail, ring) +
778 sizeof(uint64_t) * vdev->vq[n].vring.num;
779 }
780
781 target_phys_addr_t virtio_queue_get_used_size(VirtIODevice *vdev, int n)
782 {
783 return offsetof(VRingUsed, ring) +
784 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
785 }
786
787 target_phys_addr_t virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
788 {
789 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
790 virtio_queue_get_used_size(vdev, n);
791 }
792
793 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
794 {
795 return vdev->vq[n].last_avail_idx;
796 }
797
798 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
799 {
800 vdev->vq[n].last_avail_idx = idx;
801 }
802
803 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
804 {
805 return vdev->vq + n;
806 }
807
808 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
809 {
810 return &vq->guest_notifier;
811 }
812 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
813 {
814 return &vq->host_notifier;
815 }