]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio.c
notifier: event notifier implementation
[mirror_qemu.git] / hw / virtio.c
CommitLineData
967f97fa
AL
1/*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include <inttypes.h>
967f97fa
AL
15
16#include "virtio.h"
17#include "sysemu.h"
18
f46f15bc
AL
19/* The alignment to use between consumer and producer parts of vring.
20 * x86 pagesize again. */
21#define VIRTIO_PCI_VRING_ALIGN 4096
22
967f97fa
AL
23/* QEMU doesn't strictly need write barriers since everything runs in
24 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
25 * KVM or if kqemu gets SMP support.
79758e95
MT
26 * In any case, we must prevent the compiler from reordering the code.
27 * TODO: we likely need some rmb()/mb() as well.
967f97fa 28 */
79758e95
MT
29
30#define wmb() __asm__ __volatile__("": : :"memory")
967f97fa
AL
31
32typedef struct VRingDesc
33{
34 uint64_t addr;
35 uint32_t len;
36 uint16_t flags;
37 uint16_t next;
38} VRingDesc;
39
40typedef struct VRingAvail
41{
42 uint16_t flags;
43 uint16_t idx;
44 uint16_t ring[0];
45} VRingAvail;
46
47typedef struct VRingUsedElem
48{
49 uint32_t id;
50 uint32_t len;
51} VRingUsedElem;
52
53typedef struct VRingUsed
54{
55 uint16_t flags;
56 uint16_t idx;
57 VRingUsedElem ring[0];
58} VRingUsed;
59
60typedef struct VRing
61{
62 unsigned int num;
c227f099
AL
63 target_phys_addr_t desc;
64 target_phys_addr_t avail;
65 target_phys_addr_t used;
967f97fa
AL
66} VRing;
67
68struct VirtQueue
69{
70 VRing vring;
c227f099 71 target_phys_addr_t pa;
967f97fa
AL
72 uint16_t last_avail_idx;
73 int inuse;
7055e687 74 uint16_t vector;
967f97fa
AL
75 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
76};
77
967f97fa 78/* virt queue functions */
53c25cea 79static void virtqueue_init(VirtQueue *vq)
967f97fa 80{
c227f099 81 target_phys_addr_t pa = vq->pa;
53c25cea 82
967f97fa
AL
83 vq->vring.desc = pa;
84 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
f46f15bc
AL
85 vq->vring.used = vring_align(vq->vring.avail +
86 offsetof(VRingAvail, ring[vq->vring.num]),
87 VIRTIO_PCI_VRING_ALIGN);
967f97fa
AL
88}
89
c227f099 90static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i)
967f97fa 91{
c227f099 92 target_phys_addr_t pa;
5774cf98 93 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
967f97fa
AL
94 return ldq_phys(pa);
95}
96
c227f099 97static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i)
967f97fa 98{
c227f099 99 target_phys_addr_t pa;
5774cf98 100 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
967f97fa
AL
101 return ldl_phys(pa);
102}
103
c227f099 104static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i)
967f97fa 105{
c227f099 106 target_phys_addr_t pa;
5774cf98 107 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
967f97fa
AL
108 return lduw_phys(pa);
109}
110
c227f099 111static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i)
967f97fa 112{
c227f099 113 target_phys_addr_t pa;
5774cf98 114 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
967f97fa
AL
115 return lduw_phys(pa);
116}
117
118static inline uint16_t vring_avail_flags(VirtQueue *vq)
119{
c227f099 120 target_phys_addr_t pa;
967f97fa
AL
121 pa = vq->vring.avail + offsetof(VRingAvail, flags);
122 return lduw_phys(pa);
123}
124
125static inline uint16_t vring_avail_idx(VirtQueue *vq)
126{
c227f099 127 target_phys_addr_t pa;
967f97fa
AL
128 pa = vq->vring.avail + offsetof(VRingAvail, idx);
129 return lduw_phys(pa);
130}
131
132static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
133{
c227f099 134 target_phys_addr_t pa;
967f97fa
AL
135 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
136 return lduw_phys(pa);
137}
138
139static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
140{
c227f099 141 target_phys_addr_t pa;
967f97fa
AL
142 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
143 stl_phys(pa, val);
144}
145
146static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
147{
c227f099 148 target_phys_addr_t pa;
967f97fa
AL
149 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
150 stl_phys(pa, val);
151}
152
153static uint16_t vring_used_idx(VirtQueue *vq)
154{
c227f099 155 target_phys_addr_t pa;
967f97fa
AL
156 pa = vq->vring.used + offsetof(VRingUsed, idx);
157 return lduw_phys(pa);
158}
159
160static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
161{
c227f099 162 target_phys_addr_t pa;
967f97fa
AL
163 pa = vq->vring.used + offsetof(VRingUsed, idx);
164 stw_phys(pa, vring_used_idx(vq) + val);
165}
166
167static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
168{
c227f099 169 target_phys_addr_t pa;
967f97fa
AL
170 pa = vq->vring.used + offsetof(VRingUsed, flags);
171 stw_phys(pa, lduw_phys(pa) | mask);
172}
173
174static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
175{
c227f099 176 target_phys_addr_t pa;
967f97fa
AL
177 pa = vq->vring.used + offsetof(VRingUsed, flags);
178 stw_phys(pa, lduw_phys(pa) & ~mask);
179}
180
181void virtio_queue_set_notification(VirtQueue *vq, int enable)
182{
183 if (enable)
184 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
185 else
186 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
187}
188
189int virtio_queue_ready(VirtQueue *vq)
190{
191 return vq->vring.avail != 0;
192}
193
194int virtio_queue_empty(VirtQueue *vq)
195{
196 return vring_avail_idx(vq) == vq->last_avail_idx;
197}
198
199void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
200 unsigned int len, unsigned int idx)
201{
202 unsigned int offset;
203 int i;
204
967f97fa
AL
205 offset = 0;
206 for (i = 0; i < elem->in_num; i++) {
207 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
208
26b258e1
AL
209 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
210 elem->in_sg[i].iov_len,
211 1, size);
967f97fa 212
26b258e1 213 offset += elem->in_sg[i].iov_len;
967f97fa
AL
214 }
215
26b258e1
AL
216 for (i = 0; i < elem->out_num; i++)
217 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
218 elem->out_sg[i].iov_len,
219 0, elem->out_sg[i].iov_len);
220
967f97fa
AL
221 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
222
223 /* Get a pointer to the next entry in the used ring. */
224 vring_used_ring_id(vq, idx, elem->index);
225 vring_used_ring_len(vq, idx, len);
226}
227
228void virtqueue_flush(VirtQueue *vq, unsigned int count)
229{
230 /* Make sure buffer is written before we update index. */
231 wmb();
232 vring_used_idx_increment(vq, count);
233 vq->inuse -= count;
234}
235
236void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
237 unsigned int len)
238{
239 virtqueue_fill(vq, elem, len, 0);
240 virtqueue_flush(vq, 1);
241}
242
243static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
244{
245 uint16_t num_heads = vring_avail_idx(vq) - idx;
246
247 /* Check it isn't doing very strange things with descriptor numbers. */
bb6834cf
AL
248 if (num_heads > vq->vring.num) {
249 fprintf(stderr, "Guest moved used index from %u to %u",
250 idx, vring_avail_idx(vq));
251 exit(1);
252 }
967f97fa
AL
253
254 return num_heads;
255}
256
257static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
258{
259 unsigned int head;
260
261 /* Grab the next descriptor number they're advertising, and increment
262 * the index we've seen. */
263 head = vring_avail_ring(vq, idx % vq->vring.num);
264
265 /* If their number is silly, that's a fatal mistake. */
bb6834cf
AL
266 if (head >= vq->vring.num) {
267 fprintf(stderr, "Guest says index %u is available", head);
268 exit(1);
269 }
967f97fa
AL
270
271 return head;
272}
273
c227f099 274static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
5774cf98 275 unsigned int i, unsigned int max)
967f97fa
AL
276{
277 unsigned int next;
278
279 /* If this descriptor says it doesn't chain, we're done. */
5774cf98
MM
280 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
281 return max;
967f97fa
AL
282
283 /* Check they're not leading us off end of descriptors. */
5774cf98 284 next = vring_desc_next(desc_pa, i);
967f97fa
AL
285 /* Make sure compiler knows to grab that: we don't want it changing! */
286 wmb();
287
5774cf98 288 if (next >= max) {
bb6834cf
AL
289 fprintf(stderr, "Desc next is %u", next);
290 exit(1);
291 }
967f97fa
AL
292
293 return next;
294}
295
296int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
297{
efeea6d0
MM
298 unsigned int idx;
299 int total_bufs, in_total, out_total;
967f97fa
AL
300
301 idx = vq->last_avail_idx;
302
efeea6d0 303 total_bufs = in_total = out_total = 0;
967f97fa 304 while (virtqueue_num_heads(vq, idx)) {
efeea6d0 305 unsigned int max, num_bufs, indirect = 0;
c227f099 306 target_phys_addr_t desc_pa;
967f97fa
AL
307 int i;
308
efeea6d0
MM
309 max = vq->vring.num;
310 num_bufs = total_bufs;
967f97fa 311 i = virtqueue_get_head(vq, idx++);
efeea6d0
MM
312 desc_pa = vq->vring.desc;
313
314 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
315 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
316 fprintf(stderr, "Invalid size for indirect buffer table\n");
317 exit(1);
318 }
319
320 /* If we've got too many, that implies a descriptor loop. */
321 if (num_bufs >= max) {
322 fprintf(stderr, "Looped descriptor");
323 exit(1);
324 }
325
326 /* loop over the indirect descriptor table */
327 indirect = 1;
328 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
329 num_bufs = i = 0;
330 desc_pa = vring_desc_addr(desc_pa, i);
331 }
332
967f97fa
AL
333 do {
334 /* If we've got too many, that implies a descriptor loop. */
5774cf98 335 if (++num_bufs > max) {
bb6834cf
AL
336 fprintf(stderr, "Looped descriptor");
337 exit(1);
338 }
967f97fa 339
5774cf98 340 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
967f97fa 341 if (in_bytes > 0 &&
5774cf98 342 (in_total += vring_desc_len(desc_pa, i)) >= in_bytes)
967f97fa
AL
343 return 1;
344 } else {
345 if (out_bytes > 0 &&
5774cf98 346 (out_total += vring_desc_len(desc_pa, i)) >= out_bytes)
967f97fa
AL
347 return 1;
348 }
5774cf98 349 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
efeea6d0
MM
350
351 if (!indirect)
352 total_bufs = num_bufs;
353 else
354 total_bufs++;
967f97fa
AL
355 }
356
357 return 0;
358}
359
360int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
361{
5774cf98 362 unsigned int i, head, max;
c227f099
AL
363 target_phys_addr_t desc_pa = vq->vring.desc;
364 target_phys_addr_t len;
967f97fa
AL
365
366 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
367 return 0;
368
369 /* When we start there are none of either input nor output. */
370 elem->out_num = elem->in_num = 0;
371
5774cf98
MM
372 max = vq->vring.num;
373
967f97fa 374 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
efeea6d0
MM
375
376 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
377 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
378 fprintf(stderr, "Invalid size for indirect buffer table\n");
379 exit(1);
380 }
381
382 /* loop over the indirect descriptor table */
383 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
384 desc_pa = vring_desc_addr(desc_pa, i);
385 i = 0;
386 }
387
967f97fa
AL
388 do {
389 struct iovec *sg;
26b258e1 390 int is_write = 0;
967f97fa 391
5774cf98
MM
392 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
393 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
967f97fa 394 sg = &elem->in_sg[elem->in_num++];
26b258e1 395 is_write = 1;
967f97fa
AL
396 } else
397 sg = &elem->out_sg[elem->out_num++];
398
399 /* Grab the first descriptor, and check it's OK. */
5774cf98 400 sg->iov_len = vring_desc_len(desc_pa, i);
26b258e1 401 len = sg->iov_len;
967f97fa 402
5774cf98
MM
403 sg->iov_base = cpu_physical_memory_map(vring_desc_addr(desc_pa, i),
404 &len, is_write);
26b258e1
AL
405
406 if (sg->iov_base == NULL || len != sg->iov_len) {
407 fprintf(stderr, "virtio: trying to map MMIO memory\n");
bb6834cf
AL
408 exit(1);
409 }
967f97fa
AL
410
411 /* If we've got too many, that implies a descriptor loop. */
5774cf98 412 if ((elem->in_num + elem->out_num) > max) {
bb6834cf
AL
413 fprintf(stderr, "Looped descriptor");
414 exit(1);
415 }
5774cf98 416 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
967f97fa
AL
417
418 elem->index = head;
419
420 vq->inuse++;
421
422 return elem->in_num + elem->out_num;
423}
424
425/* virtio device */
7055e687
MT
426static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
427{
428 if (vdev->binding->notify) {
429 vdev->binding->notify(vdev->binding_opaque, vector);
430 }
431}
967f97fa 432
53c25cea 433void virtio_update_irq(VirtIODevice *vdev)
967f97fa 434{
7055e687 435 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
967f97fa
AL
436}
437
53c25cea 438void virtio_reset(void *opaque)
967f97fa
AL
439{
440 VirtIODevice *vdev = opaque;
441 int i;
442
443 if (vdev->reset)
444 vdev->reset(vdev);
445
704a76fc 446 vdev->guest_features = 0;
967f97fa
AL
447 vdev->queue_sel = 0;
448 vdev->status = 0;
449 vdev->isr = 0;
7055e687
MT
450 vdev->config_vector = VIRTIO_NO_VECTOR;
451 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
452
453 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
454 vdev->vq[i].vring.desc = 0;
455 vdev->vq[i].vring.avail = 0;
456 vdev->vq[i].vring.used = 0;
457 vdev->vq[i].last_avail_idx = 0;
53c25cea 458 vdev->vq[i].pa = 0;
7055e687 459 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
967f97fa
AL
460 }
461}
462
53c25cea 463uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
967f97fa 464{
967f97fa
AL
465 uint8_t val;
466
467 vdev->get_config(vdev, vdev->config);
468
967f97fa
AL
469 if (addr > (vdev->config_len - sizeof(val)))
470 return (uint32_t)-1;
471
472 memcpy(&val, vdev->config + addr, sizeof(val));
473 return val;
474}
475
53c25cea 476uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
967f97fa 477{
967f97fa
AL
478 uint16_t val;
479
480 vdev->get_config(vdev, vdev->config);
481
967f97fa
AL
482 if (addr > (vdev->config_len - sizeof(val)))
483 return (uint32_t)-1;
484
485 memcpy(&val, vdev->config + addr, sizeof(val));
486 return val;
487}
488
53c25cea 489uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
967f97fa 490{
967f97fa
AL
491 uint32_t val;
492
493 vdev->get_config(vdev, vdev->config);
494
967f97fa
AL
495 if (addr > (vdev->config_len - sizeof(val)))
496 return (uint32_t)-1;
497
498 memcpy(&val, vdev->config + addr, sizeof(val));
499 return val;
500}
501
53c25cea 502void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 503{
967f97fa
AL
504 uint8_t val = data;
505
967f97fa
AL
506 if (addr > (vdev->config_len - sizeof(val)))
507 return;
508
509 memcpy(vdev->config + addr, &val, sizeof(val));
510
511 if (vdev->set_config)
512 vdev->set_config(vdev, vdev->config);
513}
514
53c25cea 515void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 516{
967f97fa
AL
517 uint16_t val = data;
518
967f97fa
AL
519 if (addr > (vdev->config_len - sizeof(val)))
520 return;
521
522 memcpy(vdev->config + addr, &val, sizeof(val));
523
524 if (vdev->set_config)
525 vdev->set_config(vdev, vdev->config);
526}
527
53c25cea 528void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 529{
967f97fa
AL
530 uint32_t val = data;
531
967f97fa
AL
532 if (addr > (vdev->config_len - sizeof(val)))
533 return;
534
535 memcpy(vdev->config + addr, &val, sizeof(val));
536
537 if (vdev->set_config)
538 vdev->set_config(vdev, vdev->config);
539}
540
c227f099 541void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr)
967f97fa 542{
7055e687
MT
543 vdev->vq[n].pa = addr;
544 virtqueue_init(&vdev->vq[n]);
53c25cea
PB
545}
546
c227f099 547target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
53c25cea
PB
548{
549 return vdev->vq[n].pa;
550}
551
552int virtio_queue_get_num(VirtIODevice *vdev, int n)
553{
554 return vdev->vq[n].vring.num;
555}
967f97fa 556
53c25cea
PB
557void virtio_queue_notify(VirtIODevice *vdev, int n)
558{
559 if (n < VIRTIO_PCI_QUEUE_MAX && vdev->vq[n].vring.desc) {
560 vdev->vq[n].handle_output(vdev, &vdev->vq[n]);
967f97fa
AL
561 }
562}
563
7055e687
MT
564uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
565{
566 return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
567 VIRTIO_NO_VECTOR;
568}
569
570void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
571{
572 if (n < VIRTIO_PCI_QUEUE_MAX)
573 vdev->vq[n].vector = vector;
574}
575
967f97fa
AL
576VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
577 void (*handle_output)(VirtIODevice *, VirtQueue *))
578{
579 int i;
580
581 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
582 if (vdev->vq[i].vring.num == 0)
583 break;
584 }
585
586 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
587 abort();
588
589 vdev->vq[i].vring.num = queue_size;
590 vdev->vq[i].handle_output = handle_output;
591
592 return &vdev->vq[i];
593}
594
595void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
596{
97b83deb
AL
597 /* Always notify when queue is empty (when feature acknowledge) */
598 if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
704a76fc 599 (!(vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
97b83deb 600 (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx)))
967f97fa
AL
601 return;
602
603 vdev->isr |= 0x01;
7055e687 604 virtio_notify_vector(vdev, vq->vector);
967f97fa
AL
605}
606
607void virtio_notify_config(VirtIODevice *vdev)
608{
7625162c
AL
609 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
610 return;
611
967f97fa 612 vdev->isr |= 0x03;
7055e687 613 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
614}
615
616void virtio_save(VirtIODevice *vdev, QEMUFile *f)
617{
618 int i;
619
ff24bd58
MT
620 if (vdev->binding->save_config)
621 vdev->binding->save_config(vdev->binding_opaque, f);
967f97fa 622
967f97fa
AL
623 qemu_put_8s(f, &vdev->status);
624 qemu_put_8s(f, &vdev->isr);
625 qemu_put_be16s(f, &vdev->queue_sel);
704a76fc 626 qemu_put_be32s(f, &vdev->guest_features);
967f97fa
AL
627 qemu_put_be32(f, vdev->config_len);
628 qemu_put_buffer(f, vdev->config, vdev->config_len);
629
630 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
631 if (vdev->vq[i].vring.num == 0)
632 break;
633 }
634
635 qemu_put_be32(f, i);
636
637 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
638 if (vdev->vq[i].vring.num == 0)
639 break;
640
641 qemu_put_be32(f, vdev->vq[i].vring.num);
53c25cea 642 qemu_put_be64(f, vdev->vq[i].pa);
967f97fa 643 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
ff24bd58
MT
644 if (vdev->binding->save_queue)
645 vdev->binding->save_queue(vdev->binding_opaque, i, f);
967f97fa
AL
646 }
647}
648
ff24bd58 649int virtio_load(VirtIODevice *vdev, QEMUFile *f)
967f97fa 650{
ff24bd58 651 int num, i, ret;
6d74ca5a 652 uint32_t features;
8172539d 653 uint32_t supported_features =
6d74ca5a 654 vdev->binding->get_features(vdev->binding_opaque);
967f97fa 655
ff24bd58
MT
656 if (vdev->binding->load_config) {
657 ret = vdev->binding->load_config(vdev->binding_opaque, f);
658 if (ret)
659 return ret;
660 }
967f97fa 661
967f97fa
AL
662 qemu_get_8s(f, &vdev->status);
663 qemu_get_8s(f, &vdev->isr);
664 qemu_get_be16s(f, &vdev->queue_sel);
6d74ca5a
MT
665 qemu_get_be32s(f, &features);
666 if (features & ~supported_features) {
667 fprintf(stderr, "Features 0x%x unsupported. Allowed features: 0x%x\n",
668 features, supported_features);
669 return -1;
670 }
704a76fc 671 vdev->guest_features = features;
967f97fa
AL
672 vdev->config_len = qemu_get_be32(f);
673 qemu_get_buffer(f, vdev->config, vdev->config_len);
674
675 num = qemu_get_be32(f);
676
677 for (i = 0; i < num; i++) {
678 vdev->vq[i].vring.num = qemu_get_be32(f);
53c25cea 679 vdev->vq[i].pa = qemu_get_be64(f);
967f97fa
AL
680 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
681
53c25cea
PB
682 if (vdev->vq[i].pa) {
683 virtqueue_init(&vdev->vq[i]);
967f97fa 684 }
ff24bd58
MT
685 if (vdev->binding->load_queue) {
686 ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
687 if (ret)
688 return ret;
7055e687 689 }
967f97fa
AL
690 }
691
7055e687 692 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
ff24bd58 693 return 0;
967f97fa
AL
694}
695
b946a153
AL
696void virtio_cleanup(VirtIODevice *vdev)
697{
698 if (vdev->config)
699 qemu_free(vdev->config);
700 qemu_free(vdev->vq);
701}
702
53c25cea
PB
703VirtIODevice *virtio_common_init(const char *name, uint16_t device_id,
704 size_t config_size, size_t struct_size)
967f97fa
AL
705{
706 VirtIODevice *vdev;
b8193adb 707 int i;
967f97fa 708
53c25cea 709 vdev = qemu_mallocz(struct_size);
967f97fa 710
53c25cea 711 vdev->device_id = device_id;
967f97fa
AL
712 vdev->status = 0;
713 vdev->isr = 0;
714 vdev->queue_sel = 0;
7055e687 715 vdev->config_vector = VIRTIO_NO_VECTOR;
967f97fa 716 vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
b8193adb
MT
717 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++)
718 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
967f97fa 719
967f97fa
AL
720 vdev->name = name;
721 vdev->config_len = config_size;
722 if (vdev->config_len)
723 vdev->config = qemu_mallocz(config_size);
724 else
725 vdev->config = NULL;
726
967f97fa
AL
727 return vdev;
728}
53c25cea
PB
729
730void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding,
731 void *opaque)
732{
733 vdev->binding = binding;
734 vdev->binding_opaque = opaque;
735}