]> git.proxmox.com Git - qemu.git/blame - hw/virtio.c
tcg/TODO: remove setcond
[qemu.git] / hw / virtio.c
CommitLineData
967f97fa
AL
1/*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include <inttypes.h>
967f97fa
AL
15
16#include "virtio.h"
17#include "sysemu.h"
18
f46f15bc
AL
19/* The alignment to use between consumer and producer parts of vring.
20 * x86 pagesize again. */
21#define VIRTIO_PCI_VRING_ALIGN 4096
22
967f97fa
AL
23/* QEMU doesn't strictly need write barriers since everything runs in
24 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
25 * KVM or if kqemu gets SMP support.
79758e95
MT
26 * In any case, we must prevent the compiler from reordering the code.
27 * TODO: we likely need some rmb()/mb() as well.
967f97fa 28 */
79758e95
MT
29
30#define wmb() __asm__ __volatile__("": : :"memory")
967f97fa
AL
31
32typedef struct VRingDesc
33{
34 uint64_t addr;
35 uint32_t len;
36 uint16_t flags;
37 uint16_t next;
38} VRingDesc;
39
40typedef struct VRingAvail
41{
42 uint16_t flags;
43 uint16_t idx;
44 uint16_t ring[0];
45} VRingAvail;
46
47typedef struct VRingUsedElem
48{
49 uint32_t id;
50 uint32_t len;
51} VRingUsedElem;
52
53typedef struct VRingUsed
54{
55 uint16_t flags;
56 uint16_t idx;
57 VRingUsedElem ring[0];
58} VRingUsed;
59
60typedef struct VRing
61{
62 unsigned int num;
c227f099
AL
63 target_phys_addr_t desc;
64 target_phys_addr_t avail;
65 target_phys_addr_t used;
967f97fa
AL
66} VRing;
67
68struct VirtQueue
69{
70 VRing vring;
c227f099 71 target_phys_addr_t pa;
967f97fa
AL
72 uint16_t last_avail_idx;
73 int inuse;
7055e687 74 uint16_t vector;
967f97fa 75 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
1cbdabe2
MT
76 VirtIODevice *vdev;
77 EventNotifier guest_notifier;
78 EventNotifier host_notifier;
967f97fa
AL
79};
80
967f97fa 81/* virt queue functions */
53c25cea 82static void virtqueue_init(VirtQueue *vq)
967f97fa 83{
c227f099 84 target_phys_addr_t pa = vq->pa;
53c25cea 85
967f97fa
AL
86 vq->vring.desc = pa;
87 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
f46f15bc
AL
88 vq->vring.used = vring_align(vq->vring.avail +
89 offsetof(VRingAvail, ring[vq->vring.num]),
90 VIRTIO_PCI_VRING_ALIGN);
967f97fa
AL
91}
92
c227f099 93static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i)
967f97fa 94{
c227f099 95 target_phys_addr_t pa;
5774cf98 96 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
967f97fa
AL
97 return ldq_phys(pa);
98}
99
c227f099 100static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i)
967f97fa 101{
c227f099 102 target_phys_addr_t pa;
5774cf98 103 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
967f97fa
AL
104 return ldl_phys(pa);
105}
106
c227f099 107static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i)
967f97fa 108{
c227f099 109 target_phys_addr_t pa;
5774cf98 110 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
967f97fa
AL
111 return lduw_phys(pa);
112}
113
c227f099 114static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i)
967f97fa 115{
c227f099 116 target_phys_addr_t pa;
5774cf98 117 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
967f97fa
AL
118 return lduw_phys(pa);
119}
120
121static inline uint16_t vring_avail_flags(VirtQueue *vq)
122{
c227f099 123 target_phys_addr_t pa;
967f97fa
AL
124 pa = vq->vring.avail + offsetof(VRingAvail, flags);
125 return lduw_phys(pa);
126}
127
128static inline uint16_t vring_avail_idx(VirtQueue *vq)
129{
c227f099 130 target_phys_addr_t pa;
967f97fa
AL
131 pa = vq->vring.avail + offsetof(VRingAvail, idx);
132 return lduw_phys(pa);
133}
134
135static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
136{
c227f099 137 target_phys_addr_t pa;
967f97fa
AL
138 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
139 return lduw_phys(pa);
140}
141
142static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
143{
c227f099 144 target_phys_addr_t pa;
967f97fa
AL
145 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
146 stl_phys(pa, val);
147}
148
149static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
150{
c227f099 151 target_phys_addr_t pa;
967f97fa
AL
152 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
153 stl_phys(pa, val);
154}
155
156static uint16_t vring_used_idx(VirtQueue *vq)
157{
c227f099 158 target_phys_addr_t pa;
967f97fa
AL
159 pa = vq->vring.used + offsetof(VRingUsed, idx);
160 return lduw_phys(pa);
161}
162
163static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
164{
c227f099 165 target_phys_addr_t pa;
967f97fa
AL
166 pa = vq->vring.used + offsetof(VRingUsed, idx);
167 stw_phys(pa, vring_used_idx(vq) + val);
168}
169
170static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
171{
c227f099 172 target_phys_addr_t pa;
967f97fa
AL
173 pa = vq->vring.used + offsetof(VRingUsed, flags);
174 stw_phys(pa, lduw_phys(pa) | mask);
175}
176
177static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
178{
c227f099 179 target_phys_addr_t pa;
967f97fa
AL
180 pa = vq->vring.used + offsetof(VRingUsed, flags);
181 stw_phys(pa, lduw_phys(pa) & ~mask);
182}
183
184void virtio_queue_set_notification(VirtQueue *vq, int enable)
185{
186 if (enable)
187 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
188 else
189 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
190}
191
192int virtio_queue_ready(VirtQueue *vq)
193{
194 return vq->vring.avail != 0;
195}
196
197int virtio_queue_empty(VirtQueue *vq)
198{
199 return vring_avail_idx(vq) == vq->last_avail_idx;
200}
201
202void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
203 unsigned int len, unsigned int idx)
204{
205 unsigned int offset;
206 int i;
207
967f97fa
AL
208 offset = 0;
209 for (i = 0; i < elem->in_num; i++) {
210 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
211
26b258e1
AL
212 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
213 elem->in_sg[i].iov_len,
214 1, size);
967f97fa 215
26b258e1 216 offset += elem->in_sg[i].iov_len;
967f97fa
AL
217 }
218
26b258e1
AL
219 for (i = 0; i < elem->out_num; i++)
220 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
221 elem->out_sg[i].iov_len,
222 0, elem->out_sg[i].iov_len);
223
967f97fa
AL
224 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
225
226 /* Get a pointer to the next entry in the used ring. */
227 vring_used_ring_id(vq, idx, elem->index);
228 vring_used_ring_len(vq, idx, len);
229}
230
231void virtqueue_flush(VirtQueue *vq, unsigned int count)
232{
233 /* Make sure buffer is written before we update index. */
234 wmb();
235 vring_used_idx_increment(vq, count);
236 vq->inuse -= count;
237}
238
239void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
240 unsigned int len)
241{
242 virtqueue_fill(vq, elem, len, 0);
243 virtqueue_flush(vq, 1);
244}
245
246static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
247{
248 uint16_t num_heads = vring_avail_idx(vq) - idx;
249
250 /* Check it isn't doing very strange things with descriptor numbers. */
bb6834cf
AL
251 if (num_heads > vq->vring.num) {
252 fprintf(stderr, "Guest moved used index from %u to %u",
253 idx, vring_avail_idx(vq));
254 exit(1);
255 }
967f97fa
AL
256
257 return num_heads;
258}
259
260static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
261{
262 unsigned int head;
263
264 /* Grab the next descriptor number they're advertising, and increment
265 * the index we've seen. */
266 head = vring_avail_ring(vq, idx % vq->vring.num);
267
268 /* If their number is silly, that's a fatal mistake. */
bb6834cf
AL
269 if (head >= vq->vring.num) {
270 fprintf(stderr, "Guest says index %u is available", head);
271 exit(1);
272 }
967f97fa
AL
273
274 return head;
275}
276
c227f099 277static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
5774cf98 278 unsigned int i, unsigned int max)
967f97fa
AL
279{
280 unsigned int next;
281
282 /* If this descriptor says it doesn't chain, we're done. */
5774cf98
MM
283 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
284 return max;
967f97fa
AL
285
286 /* Check they're not leading us off end of descriptors. */
5774cf98 287 next = vring_desc_next(desc_pa, i);
967f97fa
AL
288 /* Make sure compiler knows to grab that: we don't want it changing! */
289 wmb();
290
5774cf98 291 if (next >= max) {
bb6834cf
AL
292 fprintf(stderr, "Desc next is %u", next);
293 exit(1);
294 }
967f97fa
AL
295
296 return next;
297}
298
299int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
300{
efeea6d0
MM
301 unsigned int idx;
302 int total_bufs, in_total, out_total;
967f97fa
AL
303
304 idx = vq->last_avail_idx;
305
efeea6d0 306 total_bufs = in_total = out_total = 0;
967f97fa 307 while (virtqueue_num_heads(vq, idx)) {
efeea6d0 308 unsigned int max, num_bufs, indirect = 0;
c227f099 309 target_phys_addr_t desc_pa;
967f97fa
AL
310 int i;
311
efeea6d0
MM
312 max = vq->vring.num;
313 num_bufs = total_bufs;
967f97fa 314 i = virtqueue_get_head(vq, idx++);
efeea6d0
MM
315 desc_pa = vq->vring.desc;
316
317 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
318 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
319 fprintf(stderr, "Invalid size for indirect buffer table\n");
320 exit(1);
321 }
322
323 /* If we've got too many, that implies a descriptor loop. */
324 if (num_bufs >= max) {
325 fprintf(stderr, "Looped descriptor");
326 exit(1);
327 }
328
329 /* loop over the indirect descriptor table */
330 indirect = 1;
331 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
332 num_bufs = i = 0;
333 desc_pa = vring_desc_addr(desc_pa, i);
334 }
335
967f97fa
AL
336 do {
337 /* If we've got too many, that implies a descriptor loop. */
5774cf98 338 if (++num_bufs > max) {
bb6834cf
AL
339 fprintf(stderr, "Looped descriptor");
340 exit(1);
341 }
967f97fa 342
5774cf98 343 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
967f97fa 344 if (in_bytes > 0 &&
5774cf98 345 (in_total += vring_desc_len(desc_pa, i)) >= in_bytes)
967f97fa
AL
346 return 1;
347 } else {
348 if (out_bytes > 0 &&
5774cf98 349 (out_total += vring_desc_len(desc_pa, i)) >= out_bytes)
967f97fa
AL
350 return 1;
351 }
5774cf98 352 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
efeea6d0
MM
353
354 if (!indirect)
355 total_bufs = num_bufs;
356 else
357 total_bufs++;
967f97fa
AL
358 }
359
360 return 0;
361}
362
363int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
364{
5774cf98 365 unsigned int i, head, max;
c227f099
AL
366 target_phys_addr_t desc_pa = vq->vring.desc;
367 target_phys_addr_t len;
967f97fa
AL
368
369 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
370 return 0;
371
372 /* When we start there are none of either input nor output. */
373 elem->out_num = elem->in_num = 0;
374
5774cf98
MM
375 max = vq->vring.num;
376
967f97fa 377 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
efeea6d0
MM
378
379 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
380 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
381 fprintf(stderr, "Invalid size for indirect buffer table\n");
382 exit(1);
383 }
384
385 /* loop over the indirect descriptor table */
386 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
387 desc_pa = vring_desc_addr(desc_pa, i);
388 i = 0;
389 }
390
967f97fa
AL
391 do {
392 struct iovec *sg;
26b258e1 393 int is_write = 0;
967f97fa 394
5774cf98
MM
395 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
396 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
967f97fa 397 sg = &elem->in_sg[elem->in_num++];
26b258e1 398 is_write = 1;
967f97fa
AL
399 } else
400 sg = &elem->out_sg[elem->out_num++];
401
402 /* Grab the first descriptor, and check it's OK. */
5774cf98 403 sg->iov_len = vring_desc_len(desc_pa, i);
26b258e1 404 len = sg->iov_len;
967f97fa 405
5774cf98
MM
406 sg->iov_base = cpu_physical_memory_map(vring_desc_addr(desc_pa, i),
407 &len, is_write);
26b258e1
AL
408
409 if (sg->iov_base == NULL || len != sg->iov_len) {
410 fprintf(stderr, "virtio: trying to map MMIO memory\n");
bb6834cf
AL
411 exit(1);
412 }
967f97fa
AL
413
414 /* If we've got too many, that implies a descriptor loop. */
5774cf98 415 if ((elem->in_num + elem->out_num) > max) {
bb6834cf
AL
416 fprintf(stderr, "Looped descriptor");
417 exit(1);
418 }
5774cf98 419 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
967f97fa
AL
420
421 elem->index = head;
422
423 vq->inuse++;
424
425 return elem->in_num + elem->out_num;
426}
427
428/* virtio device */
7055e687
MT
429static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
430{
431 if (vdev->binding->notify) {
432 vdev->binding->notify(vdev->binding_opaque, vector);
433 }
434}
967f97fa 435
53c25cea 436void virtio_update_irq(VirtIODevice *vdev)
967f97fa 437{
7055e687 438 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
967f97fa
AL
439}
440
53c25cea 441void virtio_reset(void *opaque)
967f97fa
AL
442{
443 VirtIODevice *vdev = opaque;
444 int i;
445
446 if (vdev->reset)
447 vdev->reset(vdev);
448
704a76fc 449 vdev->guest_features = 0;
967f97fa
AL
450 vdev->queue_sel = 0;
451 vdev->status = 0;
452 vdev->isr = 0;
7055e687
MT
453 vdev->config_vector = VIRTIO_NO_VECTOR;
454 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
455
456 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
457 vdev->vq[i].vring.desc = 0;
458 vdev->vq[i].vring.avail = 0;
459 vdev->vq[i].vring.used = 0;
460 vdev->vq[i].last_avail_idx = 0;
53c25cea 461 vdev->vq[i].pa = 0;
7055e687 462 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
967f97fa
AL
463 }
464}
465
53c25cea 466uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
967f97fa 467{
967f97fa
AL
468 uint8_t val;
469
470 vdev->get_config(vdev, vdev->config);
471
967f97fa
AL
472 if (addr > (vdev->config_len - sizeof(val)))
473 return (uint32_t)-1;
474
475 memcpy(&val, vdev->config + addr, sizeof(val));
476 return val;
477}
478
53c25cea 479uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
967f97fa 480{
967f97fa
AL
481 uint16_t val;
482
483 vdev->get_config(vdev, vdev->config);
484
967f97fa
AL
485 if (addr > (vdev->config_len - sizeof(val)))
486 return (uint32_t)-1;
487
488 memcpy(&val, vdev->config + addr, sizeof(val));
489 return val;
490}
491
53c25cea 492uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
967f97fa 493{
967f97fa
AL
494 uint32_t val;
495
496 vdev->get_config(vdev, vdev->config);
497
967f97fa
AL
498 if (addr > (vdev->config_len - sizeof(val)))
499 return (uint32_t)-1;
500
501 memcpy(&val, vdev->config + addr, sizeof(val));
502 return val;
503}
504
53c25cea 505void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 506{
967f97fa
AL
507 uint8_t val = data;
508
967f97fa
AL
509 if (addr > (vdev->config_len - sizeof(val)))
510 return;
511
512 memcpy(vdev->config + addr, &val, sizeof(val));
513
514 if (vdev->set_config)
515 vdev->set_config(vdev, vdev->config);
516}
517
53c25cea 518void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 519{
967f97fa
AL
520 uint16_t val = data;
521
967f97fa
AL
522 if (addr > (vdev->config_len - sizeof(val)))
523 return;
524
525 memcpy(vdev->config + addr, &val, sizeof(val));
526
527 if (vdev->set_config)
528 vdev->set_config(vdev, vdev->config);
529}
530
53c25cea 531void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 532{
967f97fa
AL
533 uint32_t val = data;
534
967f97fa
AL
535 if (addr > (vdev->config_len - sizeof(val)))
536 return;
537
538 memcpy(vdev->config + addr, &val, sizeof(val));
539
540 if (vdev->set_config)
541 vdev->set_config(vdev, vdev->config);
542}
543
c227f099 544void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr)
967f97fa 545{
7055e687
MT
546 vdev->vq[n].pa = addr;
547 virtqueue_init(&vdev->vq[n]);
53c25cea
PB
548}
549
c227f099 550target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
53c25cea
PB
551{
552 return vdev->vq[n].pa;
553}
554
555int virtio_queue_get_num(VirtIODevice *vdev, int n)
556{
557 return vdev->vq[n].vring.num;
558}
967f97fa 559
53c25cea
PB
560void virtio_queue_notify(VirtIODevice *vdev, int n)
561{
562 if (n < VIRTIO_PCI_QUEUE_MAX && vdev->vq[n].vring.desc) {
563 vdev->vq[n].handle_output(vdev, &vdev->vq[n]);
967f97fa
AL
564 }
565}
566
7055e687
MT
567uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
568{
569 return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
570 VIRTIO_NO_VECTOR;
571}
572
573void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
574{
575 if (n < VIRTIO_PCI_QUEUE_MAX)
576 vdev->vq[n].vector = vector;
577}
578
967f97fa
AL
579VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
580 void (*handle_output)(VirtIODevice *, VirtQueue *))
581{
582 int i;
583
584 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
585 if (vdev->vq[i].vring.num == 0)
586 break;
587 }
588
589 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
590 abort();
591
592 vdev->vq[i].vring.num = queue_size;
593 vdev->vq[i].handle_output = handle_output;
594
595 return &vdev->vq[i];
596}
597
1cbdabe2
MT
598void virtio_irq(VirtQueue *vq)
599{
600 vq->vdev->isr |= 0x01;
601 virtio_notify_vector(vq->vdev, vq->vector);
602}
603
967f97fa
AL
604void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
605{
97b83deb
AL
606 /* Always notify when queue is empty (when feature acknowledge) */
607 if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
704a76fc 608 (!(vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
97b83deb 609 (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx)))
967f97fa
AL
610 return;
611
612 vdev->isr |= 0x01;
7055e687 613 virtio_notify_vector(vdev, vq->vector);
967f97fa
AL
614}
615
616void virtio_notify_config(VirtIODevice *vdev)
617{
7625162c
AL
618 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
619 return;
620
967f97fa 621 vdev->isr |= 0x03;
7055e687 622 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
623}
624
625void virtio_save(VirtIODevice *vdev, QEMUFile *f)
626{
627 int i;
628
ff24bd58
MT
629 if (vdev->binding->save_config)
630 vdev->binding->save_config(vdev->binding_opaque, f);
967f97fa 631
967f97fa
AL
632 qemu_put_8s(f, &vdev->status);
633 qemu_put_8s(f, &vdev->isr);
634 qemu_put_be16s(f, &vdev->queue_sel);
704a76fc 635 qemu_put_be32s(f, &vdev->guest_features);
967f97fa
AL
636 qemu_put_be32(f, vdev->config_len);
637 qemu_put_buffer(f, vdev->config, vdev->config_len);
638
639 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
640 if (vdev->vq[i].vring.num == 0)
641 break;
642 }
643
644 qemu_put_be32(f, i);
645
646 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
647 if (vdev->vq[i].vring.num == 0)
648 break;
649
650 qemu_put_be32(f, vdev->vq[i].vring.num);
53c25cea 651 qemu_put_be64(f, vdev->vq[i].pa);
967f97fa 652 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
ff24bd58
MT
653 if (vdev->binding->save_queue)
654 vdev->binding->save_queue(vdev->binding_opaque, i, f);
967f97fa
AL
655 }
656}
657
ff24bd58 658int virtio_load(VirtIODevice *vdev, QEMUFile *f)
967f97fa 659{
ff24bd58 660 int num, i, ret;
6d74ca5a 661 uint32_t features;
8172539d 662 uint32_t supported_features =
6d74ca5a 663 vdev->binding->get_features(vdev->binding_opaque);
967f97fa 664
ff24bd58
MT
665 if (vdev->binding->load_config) {
666 ret = vdev->binding->load_config(vdev->binding_opaque, f);
667 if (ret)
668 return ret;
669 }
967f97fa 670
967f97fa
AL
671 qemu_get_8s(f, &vdev->status);
672 qemu_get_8s(f, &vdev->isr);
673 qemu_get_be16s(f, &vdev->queue_sel);
6d74ca5a
MT
674 qemu_get_be32s(f, &features);
675 if (features & ~supported_features) {
676 fprintf(stderr, "Features 0x%x unsupported. Allowed features: 0x%x\n",
677 features, supported_features);
678 return -1;
679 }
704a76fc 680 vdev->guest_features = features;
967f97fa
AL
681 vdev->config_len = qemu_get_be32(f);
682 qemu_get_buffer(f, vdev->config, vdev->config_len);
683
684 num = qemu_get_be32(f);
685
686 for (i = 0; i < num; i++) {
687 vdev->vq[i].vring.num = qemu_get_be32(f);
53c25cea 688 vdev->vq[i].pa = qemu_get_be64(f);
967f97fa
AL
689 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
690
53c25cea
PB
691 if (vdev->vq[i].pa) {
692 virtqueue_init(&vdev->vq[i]);
967f97fa 693 }
ff24bd58
MT
694 if (vdev->binding->load_queue) {
695 ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
696 if (ret)
697 return ret;
7055e687 698 }
967f97fa
AL
699 }
700
7055e687 701 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
ff24bd58 702 return 0;
967f97fa
AL
703}
704
b946a153
AL
705void virtio_cleanup(VirtIODevice *vdev)
706{
707 if (vdev->config)
708 qemu_free(vdev->config);
709 qemu_free(vdev->vq);
710}
711
53c25cea
PB
712VirtIODevice *virtio_common_init(const char *name, uint16_t device_id,
713 size_t config_size, size_t struct_size)
967f97fa
AL
714{
715 VirtIODevice *vdev;
b8193adb 716 int i;
967f97fa 717
53c25cea 718 vdev = qemu_mallocz(struct_size);
967f97fa 719
53c25cea 720 vdev->device_id = device_id;
967f97fa
AL
721 vdev->status = 0;
722 vdev->isr = 0;
723 vdev->queue_sel = 0;
7055e687 724 vdev->config_vector = VIRTIO_NO_VECTOR;
967f97fa 725 vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
1cbdabe2 726 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
b8193adb 727 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1cbdabe2
MT
728 vdev->vq[i].vdev = vdev;
729 }
967f97fa 730
967f97fa
AL
731 vdev->name = name;
732 vdev->config_len = config_size;
733 if (vdev->config_len)
734 vdev->config = qemu_mallocz(config_size);
735 else
736 vdev->config = NULL;
737
967f97fa
AL
738 return vdev;
739}
53c25cea
PB
740
741void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding,
742 void *opaque)
743{
744 vdev->binding = binding;
745 vdev->binding_opaque = opaque;
746}
1cbdabe2
MT
747
748target_phys_addr_t virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
749{
750 return vdev->vq[n].vring.desc;
751}
752
753target_phys_addr_t virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
754{
755 return vdev->vq[n].vring.avail;
756}
757
758target_phys_addr_t virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
759{
760 return vdev->vq[n].vring.used;
761}
762
763target_phys_addr_t virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
764{
765 return vdev->vq[n].vring.desc;
766}
767
768target_phys_addr_t virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
769{
770 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
771}
772
773target_phys_addr_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
774{
775 return offsetof(VRingAvail, ring) +
776 sizeof(u_int64_t) * vdev->vq[n].vring.num;
777}
778
779target_phys_addr_t virtio_queue_get_used_size(VirtIODevice *vdev, int n)
780{
781 return offsetof(VRingUsed, ring) +
782 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
783}
784
785target_phys_addr_t virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
786{
787 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
788 virtio_queue_get_used_size(vdev, n);
789}
790
791uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
792{
793 return vdev->vq[n].last_avail_idx;
794}
795
796void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
797{
798 vdev->vq[n].last_avail_idx = idx;
799}
800
801VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
802{
803 return vdev->vq + n;
804}
805
806EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
807{
808 return &vq->guest_notifier;
809}
810EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
811{
812 return &vq->host_notifier;
813}