]> git.proxmox.com Git - qemu.git/blame - hw/virtio.c
misc: move include files to include/qemu/
[qemu.git] / hw / virtio.c
CommitLineData
967f97fa
AL
1/*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include <inttypes.h>
967f97fa 15
64979a4d 16#include "trace.h"
1de7afc9 17#include "qemu/error-report.h"
967f97fa 18#include "virtio.h"
1de7afc9 19#include "qemu/atomic.h"
967f97fa 20
f46f15bc
AL
21/* The alignment to use between consumer and producer parts of vring.
22 * x86 pagesize again. */
23#define VIRTIO_PCI_VRING_ALIGN 4096
24
967f97fa
AL
25typedef struct VRingDesc
26{
27 uint64_t addr;
28 uint32_t len;
29 uint16_t flags;
30 uint16_t next;
31} VRingDesc;
32
33typedef struct VRingAvail
34{
35 uint16_t flags;
36 uint16_t idx;
37 uint16_t ring[0];
38} VRingAvail;
39
40typedef struct VRingUsedElem
41{
42 uint32_t id;
43 uint32_t len;
44} VRingUsedElem;
45
46typedef struct VRingUsed
47{
48 uint16_t flags;
49 uint16_t idx;
50 VRingUsedElem ring[0];
51} VRingUsed;
52
53typedef struct VRing
54{
55 unsigned int num;
a8170e5e
AK
56 hwaddr desc;
57 hwaddr avail;
58 hwaddr used;
967f97fa
AL
59} VRing;
60
61struct VirtQueue
62{
63 VRing vring;
a8170e5e 64 hwaddr pa;
967f97fa 65 uint16_t last_avail_idx;
bcbabae8
MT
66 /* Last used index value we have signalled on */
67 uint16_t signalled_used;
68
69 /* Last used index value we have signalled on */
70 bool signalled_used_valid;
71
72 /* Notification enabled? */
73 bool notification;
74
967f97fa 75 int inuse;
bcbabae8 76
7055e687 77 uint16_t vector;
967f97fa 78 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
1cbdabe2
MT
79 VirtIODevice *vdev;
80 EventNotifier guest_notifier;
81 EventNotifier host_notifier;
967f97fa
AL
82};
83
967f97fa 84/* virt queue functions */
53c25cea 85static void virtqueue_init(VirtQueue *vq)
967f97fa 86{
a8170e5e 87 hwaddr pa = vq->pa;
53c25cea 88
967f97fa
AL
89 vq->vring.desc = pa;
90 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
f46f15bc
AL
91 vq->vring.used = vring_align(vq->vring.avail +
92 offsetof(VRingAvail, ring[vq->vring.num]),
93 VIRTIO_PCI_VRING_ALIGN);
967f97fa
AL
94}
95
a8170e5e 96static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i)
967f97fa 97{
a8170e5e 98 hwaddr pa;
5774cf98 99 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
967f97fa
AL
100 return ldq_phys(pa);
101}
102
a8170e5e 103static inline uint32_t vring_desc_len(hwaddr desc_pa, int i)
967f97fa 104{
a8170e5e 105 hwaddr pa;
5774cf98 106 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
967f97fa
AL
107 return ldl_phys(pa);
108}
109
a8170e5e 110static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i)
967f97fa 111{
a8170e5e 112 hwaddr pa;
5774cf98 113 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
967f97fa
AL
114 return lduw_phys(pa);
115}
116
a8170e5e 117static inline uint16_t vring_desc_next(hwaddr desc_pa, int i)
967f97fa 118{
a8170e5e 119 hwaddr pa;
5774cf98 120 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
967f97fa
AL
121 return lduw_phys(pa);
122}
123
124static inline uint16_t vring_avail_flags(VirtQueue *vq)
125{
a8170e5e 126 hwaddr pa;
967f97fa
AL
127 pa = vq->vring.avail + offsetof(VRingAvail, flags);
128 return lduw_phys(pa);
129}
130
131static inline uint16_t vring_avail_idx(VirtQueue *vq)
132{
a8170e5e 133 hwaddr pa;
967f97fa
AL
134 pa = vq->vring.avail + offsetof(VRingAvail, idx);
135 return lduw_phys(pa);
136}
137
138static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
139{
a8170e5e 140 hwaddr pa;
967f97fa
AL
141 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
142 return lduw_phys(pa);
143}
144
bcbabae8
MT
145static inline uint16_t vring_used_event(VirtQueue *vq)
146{
147 return vring_avail_ring(vq, vq->vring.num);
148}
149
967f97fa
AL
150static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
151{
a8170e5e 152 hwaddr pa;
967f97fa
AL
153 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
154 stl_phys(pa, val);
155}
156
157static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
158{
a8170e5e 159 hwaddr pa;
967f97fa
AL
160 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
161 stl_phys(pa, val);
162}
163
164static uint16_t vring_used_idx(VirtQueue *vq)
165{
a8170e5e 166 hwaddr pa;
967f97fa
AL
167 pa = vq->vring.used + offsetof(VRingUsed, idx);
168 return lduw_phys(pa);
169}
170
bcbabae8 171static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
967f97fa 172{
a8170e5e 173 hwaddr pa;
967f97fa 174 pa = vq->vring.used + offsetof(VRingUsed, idx);
bcbabae8 175 stw_phys(pa, val);
967f97fa
AL
176}
177
178static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
179{
a8170e5e 180 hwaddr pa;
967f97fa
AL
181 pa = vq->vring.used + offsetof(VRingUsed, flags);
182 stw_phys(pa, lduw_phys(pa) | mask);
183}
184
185static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
186{
a8170e5e 187 hwaddr pa;
967f97fa
AL
188 pa = vq->vring.used + offsetof(VRingUsed, flags);
189 stw_phys(pa, lduw_phys(pa) & ~mask);
190}
191
bcbabae8
MT
192static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
193{
a8170e5e 194 hwaddr pa;
bcbabae8
MT
195 if (!vq->notification) {
196 return;
197 }
198 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
199 stw_phys(pa, val);
200}
201
967f97fa
AL
202void virtio_queue_set_notification(VirtQueue *vq, int enable)
203{
bcbabae8
MT
204 vq->notification = enable;
205 if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
206 vring_avail_event(vq, vring_avail_idx(vq));
207 } else if (enable) {
967f97fa 208 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
bcbabae8 209 } else {
967f97fa 210 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
bcbabae8 211 }
92045d80
MT
212 if (enable) {
213 /* Expose avail event/used flags before caller checks the avail idx. */
214 smp_mb();
215 }
967f97fa
AL
216}
217
218int virtio_queue_ready(VirtQueue *vq)
219{
220 return vq->vring.avail != 0;
221}
222
223int virtio_queue_empty(VirtQueue *vq)
224{
225 return vring_avail_idx(vq) == vq->last_avail_idx;
226}
227
228void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
229 unsigned int len, unsigned int idx)
230{
231 unsigned int offset;
232 int i;
233
64979a4d
SH
234 trace_virtqueue_fill(vq, elem, len, idx);
235
967f97fa
AL
236 offset = 0;
237 for (i = 0; i < elem->in_num; i++) {
238 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
239
26b258e1
AL
240 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
241 elem->in_sg[i].iov_len,
242 1, size);
967f97fa 243
0cea71a2 244 offset += size;
967f97fa
AL
245 }
246
26b258e1
AL
247 for (i = 0; i < elem->out_num; i++)
248 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
249 elem->out_sg[i].iov_len,
250 0, elem->out_sg[i].iov_len);
251
967f97fa
AL
252 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
253
254 /* Get a pointer to the next entry in the used ring. */
255 vring_used_ring_id(vq, idx, elem->index);
256 vring_used_ring_len(vq, idx, len);
257}
258
259void virtqueue_flush(VirtQueue *vq, unsigned int count)
260{
bcbabae8 261 uint16_t old, new;
967f97fa 262 /* Make sure buffer is written before we update index. */
b90d2f35 263 smp_wmb();
64979a4d 264 trace_virtqueue_flush(vq, count);
bcbabae8
MT
265 old = vring_used_idx(vq);
266 new = old + count;
267 vring_used_idx_set(vq, new);
967f97fa 268 vq->inuse -= count;
bcbabae8
MT
269 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
270 vq->signalled_used_valid = false;
967f97fa
AL
271}
272
273void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
274 unsigned int len)
275{
276 virtqueue_fill(vq, elem, len, 0);
277 virtqueue_flush(vq, 1);
278}
279
280static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
281{
282 uint16_t num_heads = vring_avail_idx(vq) - idx;
283
284 /* Check it isn't doing very strange things with descriptor numbers. */
bb6834cf 285 if (num_heads > vq->vring.num) {
ce67ed65
SH
286 error_report("Guest moved used index from %u to %u",
287 idx, vring_avail_idx(vq));
bb6834cf
AL
288 exit(1);
289 }
a821ce59
MT
290 /* On success, callers read a descriptor at vq->last_avail_idx.
291 * Make sure descriptor read does not bypass avail index read. */
292 if (num_heads) {
293 smp_rmb();
294 }
967f97fa
AL
295
296 return num_heads;
297}
298
299static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
300{
301 unsigned int head;
302
303 /* Grab the next descriptor number they're advertising, and increment
304 * the index we've seen. */
305 head = vring_avail_ring(vq, idx % vq->vring.num);
306
307 /* If their number is silly, that's a fatal mistake. */
bb6834cf 308 if (head >= vq->vring.num) {
ce67ed65 309 error_report("Guest says index %u is available", head);
bb6834cf
AL
310 exit(1);
311 }
967f97fa
AL
312
313 return head;
314}
315
a8170e5e 316static unsigned virtqueue_next_desc(hwaddr desc_pa,
5774cf98 317 unsigned int i, unsigned int max)
967f97fa
AL
318{
319 unsigned int next;
320
321 /* If this descriptor says it doesn't chain, we're done. */
5774cf98
MM
322 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
323 return max;
967f97fa
AL
324
325 /* Check they're not leading us off end of descriptors. */
5774cf98 326 next = vring_desc_next(desc_pa, i);
967f97fa 327 /* Make sure compiler knows to grab that: we don't want it changing! */
b90d2f35 328 smp_wmb();
967f97fa 329
5774cf98 330 if (next >= max) {
ce67ed65 331 error_report("Desc next is %u", next);
bb6834cf
AL
332 exit(1);
333 }
967f97fa
AL
334
335 return next;
336}
337
0d8d7690 338void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
e1f7b481
MT
339 unsigned int *out_bytes,
340 unsigned max_in_bytes, unsigned max_out_bytes)
967f97fa 341{
efeea6d0 342 unsigned int idx;
385ce95d 343 unsigned int total_bufs, in_total, out_total;
967f97fa
AL
344
345 idx = vq->last_avail_idx;
346
efeea6d0 347 total_bufs = in_total = out_total = 0;
967f97fa 348 while (virtqueue_num_heads(vq, idx)) {
efeea6d0 349 unsigned int max, num_bufs, indirect = 0;
a8170e5e 350 hwaddr desc_pa;
967f97fa
AL
351 int i;
352
efeea6d0
MM
353 max = vq->vring.num;
354 num_bufs = total_bufs;
967f97fa 355 i = virtqueue_get_head(vq, idx++);
efeea6d0
MM
356 desc_pa = vq->vring.desc;
357
358 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
359 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
ce67ed65 360 error_report("Invalid size for indirect buffer table");
efeea6d0
MM
361 exit(1);
362 }
363
364 /* If we've got too many, that implies a descriptor loop. */
365 if (num_bufs >= max) {
ce67ed65 366 error_report("Looped descriptor");
efeea6d0
MM
367 exit(1);
368 }
369
370 /* loop over the indirect descriptor table */
371 indirect = 1;
372 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
373 num_bufs = i = 0;
374 desc_pa = vring_desc_addr(desc_pa, i);
375 }
376
967f97fa
AL
377 do {
378 /* If we've got too many, that implies a descriptor loop. */
5774cf98 379 if (++num_bufs > max) {
ce67ed65 380 error_report("Looped descriptor");
bb6834cf
AL
381 exit(1);
382 }
967f97fa 383
5774cf98 384 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
0d8d7690 385 in_total += vring_desc_len(desc_pa, i);
967f97fa 386 } else {
0d8d7690 387 out_total += vring_desc_len(desc_pa, i);
967f97fa 388 }
e1f7b481
MT
389 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
390 goto done;
391 }
5774cf98 392 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
efeea6d0
MM
393
394 if (!indirect)
395 total_bufs = num_bufs;
396 else
397 total_bufs++;
967f97fa 398 }
e1f7b481 399done:
0d8d7690
AS
400 if (in_bytes) {
401 *in_bytes = in_total;
402 }
403 if (out_bytes) {
404 *out_bytes = out_total;
405 }
406}
967f97fa 407
0d8d7690
AS
408int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
409 unsigned int out_bytes)
410{
411 unsigned int in_total, out_total;
412
e1f7b481
MT
413 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
414 return in_bytes <= in_total && out_bytes <= out_total;
967f97fa
AL
415}
416
a8170e5e 417void virtqueue_map_sg(struct iovec *sg, hwaddr *addr,
42fb2e07
KW
418 size_t num_sg, int is_write)
419{
420 unsigned int i;
a8170e5e 421 hwaddr len;
42fb2e07
KW
422
423 for (i = 0; i < num_sg; i++) {
424 len = sg[i].iov_len;
425 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
426 if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
ce67ed65 427 error_report("virtio: trying to map MMIO memory");
42fb2e07
KW
428 exit(1);
429 }
430 }
431}
432
967f97fa
AL
433int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
434{
5774cf98 435 unsigned int i, head, max;
a8170e5e 436 hwaddr desc_pa = vq->vring.desc;
967f97fa
AL
437
438 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
439 return 0;
440
441 /* When we start there are none of either input nor output. */
442 elem->out_num = elem->in_num = 0;
443
5774cf98
MM
444 max = vq->vring.num;
445
967f97fa 446 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
bcbabae8
MT
447 if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
448 vring_avail_event(vq, vring_avail_idx(vq));
449 }
efeea6d0
MM
450
451 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
452 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
ce67ed65 453 error_report("Invalid size for indirect buffer table");
efeea6d0
MM
454 exit(1);
455 }
456
457 /* loop over the indirect descriptor table */
458 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
459 desc_pa = vring_desc_addr(desc_pa, i);
460 i = 0;
461 }
462
42fb2e07 463 /* Collect all the descriptors */
967f97fa
AL
464 do {
465 struct iovec *sg;
466
5774cf98 467 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
c8eac1cf
MT
468 if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
469 error_report("Too many write descriptors in indirect table");
470 exit(1);
471 }
5774cf98 472 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
967f97fa 473 sg = &elem->in_sg[elem->in_num++];
42fb2e07 474 } else {
c8eac1cf
MT
475 if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
476 error_report("Too many read descriptors in indirect table");
477 exit(1);
478 }
42fb2e07 479 elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
967f97fa 480 sg = &elem->out_sg[elem->out_num++];
42fb2e07 481 }
967f97fa 482
5774cf98 483 sg->iov_len = vring_desc_len(desc_pa, i);
967f97fa
AL
484
485 /* If we've got too many, that implies a descriptor loop. */
5774cf98 486 if ((elem->in_num + elem->out_num) > max) {
ce67ed65 487 error_report("Looped descriptor");
bb6834cf
AL
488 exit(1);
489 }
5774cf98 490 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
967f97fa 491
42fb2e07
KW
492 /* Now map what we have collected */
493 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
494 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
495
967f97fa
AL
496 elem->index = head;
497
498 vq->inuse++;
499
64979a4d 500 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
967f97fa
AL
501 return elem->in_num + elem->out_num;
502}
503
504/* virtio device */
7055e687
MT
505static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
506{
507 if (vdev->binding->notify) {
508 vdev->binding->notify(vdev->binding_opaque, vector);
509 }
510}
967f97fa 511
53c25cea 512void virtio_update_irq(VirtIODevice *vdev)
967f97fa 513{
7055e687 514 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
967f97fa
AL
515}
516
4e1837f8
SH
517void virtio_set_status(VirtIODevice *vdev, uint8_t val)
518{
519 trace_virtio_set_status(vdev, val);
520
521 if (vdev->set_status) {
522 vdev->set_status(vdev, val);
523 }
524 vdev->status = val;
525}
526
53c25cea 527void virtio_reset(void *opaque)
967f97fa
AL
528{
529 VirtIODevice *vdev = opaque;
530 int i;
531
e0c472d8
MT
532 virtio_set_status(vdev, 0);
533
967f97fa
AL
534 if (vdev->reset)
535 vdev->reset(vdev);
536
704a76fc 537 vdev->guest_features = 0;
967f97fa
AL
538 vdev->queue_sel = 0;
539 vdev->status = 0;
540 vdev->isr = 0;
7055e687
MT
541 vdev->config_vector = VIRTIO_NO_VECTOR;
542 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
543
544 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
545 vdev->vq[i].vring.desc = 0;
546 vdev->vq[i].vring.avail = 0;
547 vdev->vq[i].vring.used = 0;
548 vdev->vq[i].last_avail_idx = 0;
53c25cea 549 vdev->vq[i].pa = 0;
7055e687 550 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
bcbabae8
MT
551 vdev->vq[i].signalled_used = 0;
552 vdev->vq[i].signalled_used_valid = false;
553 vdev->vq[i].notification = true;
967f97fa
AL
554 }
555}
556
53c25cea 557uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
967f97fa 558{
967f97fa
AL
559 uint8_t val;
560
561 vdev->get_config(vdev, vdev->config);
562
967f97fa
AL
563 if (addr > (vdev->config_len - sizeof(val)))
564 return (uint32_t)-1;
565
06dbfc6f 566 val = ldub_p(vdev->config + addr);
967f97fa
AL
567 return val;
568}
569
53c25cea 570uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
967f97fa 571{
967f97fa
AL
572 uint16_t val;
573
574 vdev->get_config(vdev, vdev->config);
575
967f97fa
AL
576 if (addr > (vdev->config_len - sizeof(val)))
577 return (uint32_t)-1;
578
06dbfc6f 579 val = lduw_p(vdev->config + addr);
967f97fa
AL
580 return val;
581}
582
53c25cea 583uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
967f97fa 584{
967f97fa
AL
585 uint32_t val;
586
587 vdev->get_config(vdev, vdev->config);
588
967f97fa
AL
589 if (addr > (vdev->config_len - sizeof(val)))
590 return (uint32_t)-1;
591
06dbfc6f 592 val = ldl_p(vdev->config + addr);
967f97fa
AL
593 return val;
594}
595
53c25cea 596void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 597{
967f97fa
AL
598 uint8_t val = data;
599
967f97fa
AL
600 if (addr > (vdev->config_len - sizeof(val)))
601 return;
602
06dbfc6f 603 stb_p(vdev->config + addr, val);
967f97fa
AL
604
605 if (vdev->set_config)
606 vdev->set_config(vdev, vdev->config);
607}
608
53c25cea 609void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 610{
967f97fa
AL
611 uint16_t val = data;
612
967f97fa
AL
613 if (addr > (vdev->config_len - sizeof(val)))
614 return;
615
06dbfc6f 616 stw_p(vdev->config + addr, val);
967f97fa
AL
617
618 if (vdev->set_config)
619 vdev->set_config(vdev, vdev->config);
620}
621
53c25cea 622void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 623{
967f97fa
AL
624 uint32_t val = data;
625
967f97fa
AL
626 if (addr > (vdev->config_len - sizeof(val)))
627 return;
628
06dbfc6f 629 stl_p(vdev->config + addr, val);
967f97fa
AL
630
631 if (vdev->set_config)
632 vdev->set_config(vdev, vdev->config);
633}
634
a8170e5e 635void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
967f97fa 636{
7055e687
MT
637 vdev->vq[n].pa = addr;
638 virtqueue_init(&vdev->vq[n]);
53c25cea
PB
639}
640
a8170e5e 641hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
53c25cea
PB
642{
643 return vdev->vq[n].pa;
644}
645
646int virtio_queue_get_num(VirtIODevice *vdev, int n)
647{
648 return vdev->vq[n].vring.num;
649}
967f97fa 650
c80decdb
PB
651int virtio_queue_get_id(VirtQueue *vq)
652{
653 VirtIODevice *vdev = vq->vdev;
654 assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
655 return vq - &vdev->vq[0];
656}
657
25db9ebe
SH
658void virtio_queue_notify_vq(VirtQueue *vq)
659{
660 if (vq->vring.desc) {
661 VirtIODevice *vdev = vq->vdev;
662 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
663 vq->handle_output(vdev, vq);
664 }
665}
666
53c25cea
PB
667void virtio_queue_notify(VirtIODevice *vdev, int n)
668{
7157e2e2 669 virtio_queue_notify_vq(&vdev->vq[n]);
967f97fa
AL
670}
671
7055e687
MT
672uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
673{
674 return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
675 VIRTIO_NO_VECTOR;
676}
677
678void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
679{
680 if (n < VIRTIO_PCI_QUEUE_MAX)
681 vdev->vq[n].vector = vector;
682}
683
967f97fa
AL
684VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
685 void (*handle_output)(VirtIODevice *, VirtQueue *))
686{
687 int i;
688
689 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
690 if (vdev->vq[i].vring.num == 0)
691 break;
692 }
693
694 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
695 abort();
696
697 vdev->vq[i].vring.num = queue_size;
698 vdev->vq[i].handle_output = handle_output;
699
700 return &vdev->vq[i];
701}
702
1cbdabe2
MT
703void virtio_irq(VirtQueue *vq)
704{
64979a4d 705 trace_virtio_irq(vq);
1cbdabe2
MT
706 vq->vdev->isr |= 0x01;
707 virtio_notify_vector(vq->vdev, vq->vector);
708}
709
bcbabae8
MT
710/* Assuming a given event_idx value from the other size, if
711 * we have just incremented index from old to new_idx,
712 * should we trigger an event? */
713static inline int vring_need_event(uint16_t event, uint16_t new, uint16_t old)
967f97fa 714{
bcbabae8
MT
715 /* Note: Xen has similar logic for notification hold-off
716 * in include/xen/interface/io/ring.h with req_event and req_prod
717 * corresponding to event_idx + 1 and new respectively.
718 * Note also that req_event and req_prod in Xen start at 1,
719 * event indexes in virtio start at 0. */
720 return (uint16_t)(new - event - 1) < (uint16_t)(new - old);
721}
722
723static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
724{
725 uint16_t old, new;
726 bool v;
a281ebc1
MT
727 /* We need to expose used array entries before checking used event. */
728 smp_mb();
97b83deb 729 /* Always notify when queue is empty (when feature acknowledge) */
bcbabae8
MT
730 if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
731 !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) {
732 return true;
733 }
734
735 if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
736 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
737 }
738
739 v = vq->signalled_used_valid;
740 vq->signalled_used_valid = true;
741 old = vq->signalled_used;
742 new = vq->signalled_used = vring_used_idx(vq);
743 return !v || vring_need_event(vring_used_event(vq), new, old);
744}
745
746void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
747{
748 if (!vring_notify(vdev, vq)) {
967f97fa 749 return;
bcbabae8 750 }
967f97fa 751
64979a4d 752 trace_virtio_notify(vdev, vq);
967f97fa 753 vdev->isr |= 0x01;
7055e687 754 virtio_notify_vector(vdev, vq->vector);
967f97fa
AL
755}
756
757void virtio_notify_config(VirtIODevice *vdev)
758{
7625162c
AL
759 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
760 return;
761
967f97fa 762 vdev->isr |= 0x03;
7055e687 763 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
764}
765
766void virtio_save(VirtIODevice *vdev, QEMUFile *f)
767{
768 int i;
769
ff24bd58
MT
770 if (vdev->binding->save_config)
771 vdev->binding->save_config(vdev->binding_opaque, f);
967f97fa 772
967f97fa
AL
773 qemu_put_8s(f, &vdev->status);
774 qemu_put_8s(f, &vdev->isr);
775 qemu_put_be16s(f, &vdev->queue_sel);
704a76fc 776 qemu_put_be32s(f, &vdev->guest_features);
967f97fa
AL
777 qemu_put_be32(f, vdev->config_len);
778 qemu_put_buffer(f, vdev->config, vdev->config_len);
779
780 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
781 if (vdev->vq[i].vring.num == 0)
782 break;
783 }
784
785 qemu_put_be32(f, i);
786
787 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
788 if (vdev->vq[i].vring.num == 0)
789 break;
790
791 qemu_put_be32(f, vdev->vq[i].vring.num);
53c25cea 792 qemu_put_be64(f, vdev->vq[i].pa);
967f97fa 793 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
ff24bd58
MT
794 if (vdev->binding->save_queue)
795 vdev->binding->save_queue(vdev->binding_opaque, i, f);
967f97fa
AL
796 }
797}
798
ad0c9332
PB
799int virtio_set_features(VirtIODevice *vdev, uint32_t val)
800{
801 uint32_t supported_features =
802 vdev->binding->get_features(vdev->binding_opaque);
803 bool bad = (val & ~supported_features) != 0;
804
805 val &= supported_features;
806 if (vdev->set_features) {
807 vdev->set_features(vdev, val);
808 }
809 vdev->guest_features = val;
810 return bad ? -1 : 0;
811}
812
ff24bd58 813int virtio_load(VirtIODevice *vdev, QEMUFile *f)
967f97fa 814{
ff24bd58 815 int num, i, ret;
6d74ca5a 816 uint32_t features;
ad0c9332 817 uint32_t supported_features;
967f97fa 818
ff24bd58
MT
819 if (vdev->binding->load_config) {
820 ret = vdev->binding->load_config(vdev->binding_opaque, f);
821 if (ret)
822 return ret;
823 }
967f97fa 824
967f97fa
AL
825 qemu_get_8s(f, &vdev->status);
826 qemu_get_8s(f, &vdev->isr);
827 qemu_get_be16s(f, &vdev->queue_sel);
6d74ca5a 828 qemu_get_be32s(f, &features);
ad0c9332
PB
829
830 if (virtio_set_features(vdev, features) < 0) {
831 supported_features = vdev->binding->get_features(vdev->binding_opaque);
ce67ed65
SH
832 error_report("Features 0x%x unsupported. Allowed features: 0x%x",
833 features, supported_features);
6d74ca5a
MT
834 return -1;
835 }
967f97fa
AL
836 vdev->config_len = qemu_get_be32(f);
837 qemu_get_buffer(f, vdev->config, vdev->config_len);
838
839 num = qemu_get_be32(f);
840
841 for (i = 0; i < num; i++) {
842 vdev->vq[i].vring.num = qemu_get_be32(f);
53c25cea 843 vdev->vq[i].pa = qemu_get_be64(f);
967f97fa 844 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
bcbabae8
MT
845 vdev->vq[i].signalled_used_valid = false;
846 vdev->vq[i].notification = true;
967f97fa 847
53c25cea 848 if (vdev->vq[i].pa) {
1abeb5a6 849 uint16_t nheads;
53c25cea 850 virtqueue_init(&vdev->vq[i]);
1abeb5a6
MT
851 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
852 /* Check it isn't doing very strange things with descriptor numbers. */
853 if (nheads > vdev->vq[i].vring.num) {
854 error_report("VQ %d size 0x%x Guest index 0x%x "
6daf194d 855 "inconsistent with Host index 0x%x: delta 0x%x",
1abeb5a6
MT
856 i, vdev->vq[i].vring.num,
857 vring_avail_idx(&vdev->vq[i]),
858 vdev->vq[i].last_avail_idx, nheads);
859 return -1;
860 }
861 } else if (vdev->vq[i].last_avail_idx) {
862 error_report("VQ %d address 0x0 "
6daf194d 863 "inconsistent with Host index 0x%x",
1abeb5a6
MT
864 i, vdev->vq[i].last_avail_idx);
865 return -1;
258dc7c9 866 }
ff24bd58
MT
867 if (vdev->binding->load_queue) {
868 ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
869 if (ret)
870 return ret;
7055e687 871 }
967f97fa
AL
872 }
873
7055e687 874 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
ff24bd58 875 return 0;
967f97fa
AL
876}
877
b946a153
AL
878void virtio_cleanup(VirtIODevice *vdev)
879{
85cf2a8d 880 qemu_del_vm_change_state_handler(vdev->vmstate);
6f79e06b 881 g_free(vdev->config);
7267c094
AL
882 g_free(vdev->vq);
883 g_free(vdev);
b946a153
AL
884}
885
1dfb4dd9 886static void virtio_vmstate_change(void *opaque, int running, RunState state)
85cf2a8d
MT
887{
888 VirtIODevice *vdev = opaque;
889 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
890 vdev->vm_running = running;
891
892 if (backend_run) {
893 virtio_set_status(vdev, vdev->status);
894 }
895
896 if (vdev->binding->vmstate_change) {
897 vdev->binding->vmstate_change(vdev->binding_opaque, backend_run);
898 }
899
900 if (!backend_run) {
901 virtio_set_status(vdev, vdev->status);
902 }
903}
904
53c25cea
PB
905VirtIODevice *virtio_common_init(const char *name, uint16_t device_id,
906 size_t config_size, size_t struct_size)
967f97fa
AL
907{
908 VirtIODevice *vdev;
b8193adb 909 int i;
967f97fa 910
7267c094 911 vdev = g_malloc0(struct_size);
967f97fa 912
53c25cea 913 vdev->device_id = device_id;
967f97fa
AL
914 vdev->status = 0;
915 vdev->isr = 0;
916 vdev->queue_sel = 0;
7055e687 917 vdev->config_vector = VIRTIO_NO_VECTOR;
7267c094 918 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
1354869c 919 vdev->vm_running = runstate_is_running();
1cbdabe2 920 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
b8193adb 921 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1cbdabe2
MT
922 vdev->vq[i].vdev = vdev;
923 }
967f97fa 924
967f97fa
AL
925 vdev->name = name;
926 vdev->config_len = config_size;
927 if (vdev->config_len)
7267c094 928 vdev->config = g_malloc0(config_size);
967f97fa
AL
929 else
930 vdev->config = NULL;
931
85cf2a8d
MT
932 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, vdev);
933
967f97fa
AL
934 return vdev;
935}
53c25cea
PB
936
937void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding,
938 void *opaque)
939{
940 vdev->binding = binding;
941 vdev->binding_opaque = opaque;
942}
1cbdabe2 943
a8170e5e 944hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
945{
946 return vdev->vq[n].vring.desc;
947}
948
a8170e5e 949hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
950{
951 return vdev->vq[n].vring.avail;
952}
953
a8170e5e 954hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
955{
956 return vdev->vq[n].vring.used;
957}
958
a8170e5e 959hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
960{
961 return vdev->vq[n].vring.desc;
962}
963
a8170e5e 964hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
965{
966 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
967}
968
a8170e5e 969hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
970{
971 return offsetof(VRingAvail, ring) +
2b3af999 972 sizeof(uint64_t) * vdev->vq[n].vring.num;
1cbdabe2
MT
973}
974
a8170e5e 975hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
976{
977 return offsetof(VRingUsed, ring) +
978 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
979}
980
a8170e5e 981hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
982{
983 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
984 virtio_queue_get_used_size(vdev, n);
985}
986
987uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
988{
989 return vdev->vq[n].last_avail_idx;
990}
991
992void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
993{
994 vdev->vq[n].last_avail_idx = idx;
995}
996
997VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
998{
999 return vdev->vq + n;
1000}
1001
15b2bd18
PB
1002static void virtio_queue_guest_notifier_read(EventNotifier *n)
1003{
1004 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
1005 if (event_notifier_test_and_clear(n)) {
1006 virtio_irq(vq);
1007 }
1008}
1009
1010void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
1011 bool with_irqfd)
1012{
1013 if (assign && !with_irqfd) {
1014 event_notifier_set_handler(&vq->guest_notifier,
1015 virtio_queue_guest_notifier_read);
1016 } else {
1017 event_notifier_set_handler(&vq->guest_notifier, NULL);
1018 }
1019 if (!assign) {
1020 /* Test and clear notifier before closing it,
1021 * in case poll callback didn't have time to run. */
1022 virtio_queue_guest_notifier_read(&vq->guest_notifier);
1023 }
1024}
1025
1cbdabe2
MT
1026EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
1027{
1028 return &vq->guest_notifier;
1029}
b1f416aa
PB
1030
1031static void virtio_queue_host_notifier_read(EventNotifier *n)
1032{
1033 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
1034 if (event_notifier_test_and_clear(n)) {
1035 virtio_queue_notify_vq(vq);
1036 }
1037}
1038
26b9b5fe
PB
1039void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
1040 bool set_handler)
b1f416aa 1041{
26b9b5fe 1042 if (assign && set_handler) {
b1f416aa
PB
1043 event_notifier_set_handler(&vq->host_notifier,
1044 virtio_queue_host_notifier_read);
1045 } else {
1046 event_notifier_set_handler(&vq->host_notifier, NULL);
26b9b5fe
PB
1047 }
1048 if (!assign) {
b1f416aa
PB
1049 /* Test and clear notifier before after disabling event,
1050 * in case poll callback didn't have time to run. */
1051 virtio_queue_host_notifier_read(&vq->host_notifier);
1052 }
1053}
1054
1cbdabe2
MT
1055EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
1056{
1057 return &vq->host_notifier;
1058}