]> git.proxmox.com Git - qemu.git/blob - hw/virtio-pci.c
Merge remote-tracking branch 'qemu-kvm/uq/master' into staging
[qemu.git] / hw / virtio-pci.c
1 /*
2 * Virtio PCI Bindings
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include <inttypes.h>
19
20 #include "virtio.h"
21 #include "virtio-blk.h"
22 #include "virtio-net.h"
23 #include "virtio-serial.h"
24 #include "virtio-scsi.h"
25 #include "pci.h"
26 #include "qemu-error.h"
27 #include "msi.h"
28 #include "msix.h"
29 #include "net.h"
30 #include "loader.h"
31 #include "kvm.h"
32 #include "blockdev.h"
33 #include "virtio-pci.h"
34 #include "range.h"
35
36 /* from Linux's linux/virtio_pci.h */
37
38 /* A 32-bit r/o bitmask of the features supported by the host */
39 #define VIRTIO_PCI_HOST_FEATURES 0
40
41 /* A 32-bit r/w bitmask of features activated by the guest */
42 #define VIRTIO_PCI_GUEST_FEATURES 4
43
44 /* A 32-bit r/w PFN for the currently selected queue */
45 #define VIRTIO_PCI_QUEUE_PFN 8
46
47 /* A 16-bit r/o queue size for the currently selected queue */
48 #define VIRTIO_PCI_QUEUE_NUM 12
49
50 /* A 16-bit r/w queue selector */
51 #define VIRTIO_PCI_QUEUE_SEL 14
52
53 /* A 16-bit r/w queue notifier */
54 #define VIRTIO_PCI_QUEUE_NOTIFY 16
55
56 /* An 8-bit device status register. */
57 #define VIRTIO_PCI_STATUS 18
58
59 /* An 8-bit r/o interrupt status register. Reading the value will return the
60 * current contents of the ISR and will also clear it. This is effectively
61 * a read-and-acknowledge. */
62 #define VIRTIO_PCI_ISR 19
63
64 /* MSI-X registers: only enabled if MSI-X is enabled. */
65 /* A 16-bit vector for configuration changes. */
66 #define VIRTIO_MSI_CONFIG_VECTOR 20
67 /* A 16-bit vector for selected queue notifications. */
68 #define VIRTIO_MSI_QUEUE_VECTOR 22
69
70 /* Config space size */
71 #define VIRTIO_PCI_CONFIG_NOMSI 20
72 #define VIRTIO_PCI_CONFIG_MSI 24
73 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
74 VIRTIO_PCI_CONFIG_MSI : \
75 VIRTIO_PCI_CONFIG_NOMSI)
76
77 /* The remaining space is defined by each driver as the per-driver
78 * configuration space */
79 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
80 VIRTIO_PCI_CONFIG_MSI : \
81 VIRTIO_PCI_CONFIG_NOMSI)
82
83 /* How many bits to shift physical queue address written to QUEUE_PFN.
84 * 12 is historical, and due to x86 page size. */
85 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
86
87 /* Flags track per-device state like workarounds for quirks in older guests. */
88 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG (1 << 0)
89
90 /* QEMU doesn't strictly need write barriers since everything runs in
91 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
92 * KVM or if kqemu gets SMP support.
93 */
94 #define wmb() do { } while (0)
95
96 /* HACK for virtio to determine if it's running a big endian guest */
97 bool virtio_is_big_endian(void);
98
99 /* virtio device */
100
101 static void virtio_pci_notify(void *opaque, uint16_t vector)
102 {
103 VirtIOPCIProxy *proxy = opaque;
104 if (msix_enabled(&proxy->pci_dev))
105 msix_notify(&proxy->pci_dev, vector);
106 else
107 qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
108 }
109
110 static void virtio_pci_save_config(void * opaque, QEMUFile *f)
111 {
112 VirtIOPCIProxy *proxy = opaque;
113 pci_device_save(&proxy->pci_dev, f);
114 msix_save(&proxy->pci_dev, f);
115 if (msix_present(&proxy->pci_dev))
116 qemu_put_be16(f, proxy->vdev->config_vector);
117 }
118
119 static void virtio_pci_save_queue(void * opaque, int n, QEMUFile *f)
120 {
121 VirtIOPCIProxy *proxy = opaque;
122 if (msix_present(&proxy->pci_dev))
123 qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
124 }
125
126 static int virtio_pci_load_config(void * opaque, QEMUFile *f)
127 {
128 VirtIOPCIProxy *proxy = opaque;
129 int ret;
130 ret = pci_device_load(&proxy->pci_dev, f);
131 if (ret) {
132 return ret;
133 }
134 msix_load(&proxy->pci_dev, f);
135 if (msix_present(&proxy->pci_dev)) {
136 qemu_get_be16s(f, &proxy->vdev->config_vector);
137 } else {
138 proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
139 }
140 if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
141 return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
142 }
143 return 0;
144 }
145
146 static int virtio_pci_load_queue(void * opaque, int n, QEMUFile *f)
147 {
148 VirtIOPCIProxy *proxy = opaque;
149 uint16_t vector;
150 if (msix_present(&proxy->pci_dev)) {
151 qemu_get_be16s(f, &vector);
152 } else {
153 vector = VIRTIO_NO_VECTOR;
154 }
155 virtio_queue_set_vector(proxy->vdev, n, vector);
156 if (vector != VIRTIO_NO_VECTOR) {
157 return msix_vector_use(&proxy->pci_dev, vector);
158 }
159 return 0;
160 }
161
162 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
163 int n, bool assign)
164 {
165 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
166 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
167 int r = 0;
168
169 if (assign) {
170 r = event_notifier_init(notifier, 1);
171 if (r < 0) {
172 error_report("%s: unable to init event notifier: %d",
173 __func__, r);
174 return r;
175 }
176 memory_region_add_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
177 true, n, event_notifier_get_fd(notifier));
178 } else {
179 memory_region_del_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
180 true, n, event_notifier_get_fd(notifier));
181 /* Handle the race condition where the guest kicked and we deassigned
182 * before we got around to handling the kick.
183 */
184 if (event_notifier_test_and_clear(notifier)) {
185 virtio_queue_notify_vq(vq);
186 }
187
188 event_notifier_cleanup(notifier);
189 }
190 return r;
191 }
192
193 static void virtio_pci_host_notifier_read(void *opaque)
194 {
195 VirtQueue *vq = opaque;
196 EventNotifier *n = virtio_queue_get_host_notifier(vq);
197 if (event_notifier_test_and_clear(n)) {
198 virtio_queue_notify_vq(vq);
199 }
200 }
201
202 static void virtio_pci_set_host_notifier_fd_handler(VirtIOPCIProxy *proxy,
203 int n, bool assign)
204 {
205 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
206 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
207 if (assign) {
208 qemu_set_fd_handler(event_notifier_get_fd(notifier),
209 virtio_pci_host_notifier_read, NULL, vq);
210 } else {
211 qemu_set_fd_handler(event_notifier_get_fd(notifier),
212 NULL, NULL, NULL);
213 }
214 }
215
216 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
217 {
218 int n, r;
219
220 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
221 proxy->ioeventfd_disabled ||
222 proxy->ioeventfd_started) {
223 return;
224 }
225
226 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
227 if (!virtio_queue_get_num(proxy->vdev, n)) {
228 continue;
229 }
230
231 r = virtio_pci_set_host_notifier_internal(proxy, n, true);
232 if (r < 0) {
233 goto assign_error;
234 }
235
236 virtio_pci_set_host_notifier_fd_handler(proxy, n, true);
237 }
238 proxy->ioeventfd_started = true;
239 return;
240
241 assign_error:
242 while (--n >= 0) {
243 if (!virtio_queue_get_num(proxy->vdev, n)) {
244 continue;
245 }
246
247 virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
248 r = virtio_pci_set_host_notifier_internal(proxy, n, false);
249 assert(r >= 0);
250 }
251 proxy->ioeventfd_started = false;
252 error_report("%s: failed. Fallback to a userspace (slower).", __func__);
253 }
254
255 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
256 {
257 int r;
258 int n;
259
260 if (!proxy->ioeventfd_started) {
261 return;
262 }
263
264 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
265 if (!virtio_queue_get_num(proxy->vdev, n)) {
266 continue;
267 }
268
269 virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
270 r = virtio_pci_set_host_notifier_internal(proxy, n, false);
271 assert(r >= 0);
272 }
273 proxy->ioeventfd_started = false;
274 }
275
276 void virtio_pci_reset(DeviceState *d)
277 {
278 VirtIOPCIProxy *proxy = container_of(d, VirtIOPCIProxy, pci_dev.qdev);
279 virtio_pci_stop_ioeventfd(proxy);
280 virtio_reset(proxy->vdev);
281 msix_reset(&proxy->pci_dev);
282 proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
283 }
284
285 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
286 {
287 VirtIOPCIProxy *proxy = opaque;
288 VirtIODevice *vdev = proxy->vdev;
289 target_phys_addr_t pa;
290
291 switch (addr) {
292 case VIRTIO_PCI_GUEST_FEATURES:
293 /* Guest does not negotiate properly? We have to assume nothing. */
294 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
295 val = vdev->bad_features ? vdev->bad_features(vdev) : 0;
296 }
297 virtio_set_features(vdev, val);
298 break;
299 case VIRTIO_PCI_QUEUE_PFN:
300 pa = (target_phys_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
301 if (pa == 0) {
302 virtio_pci_stop_ioeventfd(proxy);
303 virtio_reset(proxy->vdev);
304 msix_unuse_all_vectors(&proxy->pci_dev);
305 }
306 else
307 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
308 break;
309 case VIRTIO_PCI_QUEUE_SEL:
310 if (val < VIRTIO_PCI_QUEUE_MAX)
311 vdev->queue_sel = val;
312 break;
313 case VIRTIO_PCI_QUEUE_NOTIFY:
314 if (val < VIRTIO_PCI_QUEUE_MAX) {
315 virtio_queue_notify(vdev, val);
316 }
317 break;
318 case VIRTIO_PCI_STATUS:
319 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
320 virtio_pci_stop_ioeventfd(proxy);
321 }
322
323 virtio_set_status(vdev, val & 0xFF);
324
325 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
326 virtio_pci_start_ioeventfd(proxy);
327 }
328
329 if (vdev->status == 0) {
330 virtio_reset(proxy->vdev);
331 msix_unuse_all_vectors(&proxy->pci_dev);
332 }
333
334 /* Linux before 2.6.34 sets the device as OK without enabling
335 the PCI device bus master bit. In this case we need to disable
336 some safety checks. */
337 if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
338 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
339 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
340 }
341 break;
342 case VIRTIO_MSI_CONFIG_VECTOR:
343 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
344 /* Make it possible for guest to discover an error took place. */
345 if (msix_vector_use(&proxy->pci_dev, val) < 0)
346 val = VIRTIO_NO_VECTOR;
347 vdev->config_vector = val;
348 break;
349 case VIRTIO_MSI_QUEUE_VECTOR:
350 msix_vector_unuse(&proxy->pci_dev,
351 virtio_queue_vector(vdev, vdev->queue_sel));
352 /* Make it possible for guest to discover an error took place. */
353 if (msix_vector_use(&proxy->pci_dev, val) < 0)
354 val = VIRTIO_NO_VECTOR;
355 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
356 break;
357 default:
358 error_report("%s: unexpected address 0x%x value 0x%x",
359 __func__, addr, val);
360 break;
361 }
362 }
363
364 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
365 {
366 VirtIODevice *vdev = proxy->vdev;
367 uint32_t ret = 0xFFFFFFFF;
368
369 switch (addr) {
370 case VIRTIO_PCI_HOST_FEATURES:
371 ret = proxy->host_features;
372 break;
373 case VIRTIO_PCI_GUEST_FEATURES:
374 ret = vdev->guest_features;
375 break;
376 case VIRTIO_PCI_QUEUE_PFN:
377 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
378 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
379 break;
380 case VIRTIO_PCI_QUEUE_NUM:
381 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
382 break;
383 case VIRTIO_PCI_QUEUE_SEL:
384 ret = vdev->queue_sel;
385 break;
386 case VIRTIO_PCI_STATUS:
387 ret = vdev->status;
388 break;
389 case VIRTIO_PCI_ISR:
390 /* reading from the ISR also clears it. */
391 ret = vdev->isr;
392 vdev->isr = 0;
393 qemu_set_irq(proxy->pci_dev.irq[0], 0);
394 break;
395 case VIRTIO_MSI_CONFIG_VECTOR:
396 ret = vdev->config_vector;
397 break;
398 case VIRTIO_MSI_QUEUE_VECTOR:
399 ret = virtio_queue_vector(vdev, vdev->queue_sel);
400 break;
401 default:
402 break;
403 }
404
405 return ret;
406 }
407
408 static uint32_t virtio_pci_config_readb(void *opaque, uint32_t addr)
409 {
410 VirtIOPCIProxy *proxy = opaque;
411 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
412 if (addr < config)
413 return virtio_ioport_read(proxy, addr);
414 addr -= config;
415 return virtio_config_readb(proxy->vdev, addr);
416 }
417
418 static uint32_t virtio_pci_config_readw(void *opaque, uint32_t addr)
419 {
420 VirtIOPCIProxy *proxy = opaque;
421 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
422 uint16_t val;
423 if (addr < config)
424 return virtio_ioport_read(proxy, addr);
425 addr -= config;
426 val = virtio_config_readw(proxy->vdev, addr);
427 if (virtio_is_big_endian()) {
428 /*
429 * virtio is odd, ioports are LE but config space is target native
430 * endian. However, in qemu, all PIO is LE, so we need to re-swap
431 * on BE targets
432 */
433 val = bswap16(val);
434 }
435 return val;
436 }
437
438 static uint32_t virtio_pci_config_readl(void *opaque, uint32_t addr)
439 {
440 VirtIOPCIProxy *proxy = opaque;
441 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
442 uint32_t val;
443 if (addr < config)
444 return virtio_ioport_read(proxy, addr);
445 addr -= config;
446 val = virtio_config_readl(proxy->vdev, addr);
447 if (virtio_is_big_endian()) {
448 val = bswap32(val);
449 }
450 return val;
451 }
452
453 static void virtio_pci_config_writeb(void *opaque, uint32_t addr, uint32_t val)
454 {
455 VirtIOPCIProxy *proxy = opaque;
456 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
457 if (addr < config) {
458 virtio_ioport_write(proxy, addr, val);
459 return;
460 }
461 addr -= config;
462 virtio_config_writeb(proxy->vdev, addr, val);
463 }
464
465 static void virtio_pci_config_writew(void *opaque, uint32_t addr, uint32_t val)
466 {
467 VirtIOPCIProxy *proxy = opaque;
468 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
469 if (addr < config) {
470 virtio_ioport_write(proxy, addr, val);
471 return;
472 }
473 addr -= config;
474 if (virtio_is_big_endian()) {
475 val = bswap16(val);
476 }
477 virtio_config_writew(proxy->vdev, addr, val);
478 }
479
480 static void virtio_pci_config_writel(void *opaque, uint32_t addr, uint32_t val)
481 {
482 VirtIOPCIProxy *proxy = opaque;
483 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
484 if (addr < config) {
485 virtio_ioport_write(proxy, addr, val);
486 return;
487 }
488 addr -= config;
489 if (virtio_is_big_endian()) {
490 val = bswap32(val);
491 }
492 virtio_config_writel(proxy->vdev, addr, val);
493 }
494
495 static const MemoryRegionPortio virtio_portio[] = {
496 { 0, 0x10000, 1, .write = virtio_pci_config_writeb, },
497 { 0, 0x10000, 2, .write = virtio_pci_config_writew, },
498 { 0, 0x10000, 4, .write = virtio_pci_config_writel, },
499 { 0, 0x10000, 1, .read = virtio_pci_config_readb, },
500 { 0, 0x10000, 2, .read = virtio_pci_config_readw, },
501 { 0, 0x10000, 4, .read = virtio_pci_config_readl, },
502 PORTIO_END_OF_LIST()
503 };
504
505 static const MemoryRegionOps virtio_pci_config_ops = {
506 .old_portio = virtio_portio,
507 .endianness = DEVICE_LITTLE_ENDIAN,
508 };
509
510 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
511 uint32_t val, int len)
512 {
513 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
514
515 pci_default_write_config(pci_dev, address, val, len);
516
517 if (range_covers_byte(address, len, PCI_COMMAND) &&
518 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
519 !(proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG)) {
520 virtio_pci_stop_ioeventfd(proxy);
521 virtio_set_status(proxy->vdev,
522 proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
523 }
524
525 msix_write_config(pci_dev, address, val, len);
526 }
527
528 static unsigned virtio_pci_get_features(void *opaque)
529 {
530 VirtIOPCIProxy *proxy = opaque;
531 return proxy->host_features;
532 }
533
534 static void virtio_pci_guest_notifier_read(void *opaque)
535 {
536 VirtQueue *vq = opaque;
537 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
538 if (event_notifier_test_and_clear(n)) {
539 virtio_irq(vq);
540 }
541 }
542
543 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
544 unsigned int queue_no,
545 unsigned int vector,
546 MSIMessage msg)
547 {
548 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
549 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
550 int fd, ret;
551
552 fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vq));
553
554 if (irqfd->users == 0) {
555 ret = kvm_irqchip_add_msi_route(kvm_state, msg);
556 if (ret < 0) {
557 return ret;
558 }
559 irqfd->virq = ret;
560 }
561 irqfd->users++;
562
563 ret = kvm_irqchip_add_irqfd(kvm_state, fd, irqfd->virq);
564 if (ret < 0) {
565 if (--irqfd->users == 0) {
566 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
567 }
568 return ret;
569 }
570
571 qemu_set_fd_handler(fd, NULL, NULL, NULL);
572
573 return 0;
574 }
575
576 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
577 unsigned int queue_no,
578 unsigned int vector)
579 {
580 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
581 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
582 int fd, ret;
583
584 fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vq));
585
586 ret = kvm_irqchip_remove_irqfd(kvm_state, fd, irqfd->virq);
587 assert(ret == 0);
588
589 if (--irqfd->users == 0) {
590 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
591 }
592
593 qemu_set_fd_handler(fd, virtio_pci_guest_notifier_read, NULL, vq);
594 }
595
596 static int kvm_virtio_pci_vector_use(PCIDevice *dev, unsigned vector,
597 MSIMessage msg)
598 {
599 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
600 VirtIODevice *vdev = proxy->vdev;
601 int ret, queue_no;
602
603 for (queue_no = 0; queue_no < VIRTIO_PCI_QUEUE_MAX; queue_no++) {
604 if (!virtio_queue_get_num(vdev, queue_no)) {
605 break;
606 }
607 if (virtio_queue_vector(vdev, queue_no) != vector) {
608 continue;
609 }
610 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
611 if (ret < 0) {
612 goto undo;
613 }
614 }
615 return 0;
616
617 undo:
618 while (--queue_no >= 0) {
619 if (virtio_queue_vector(vdev, queue_no) != vector) {
620 continue;
621 }
622 kvm_virtio_pci_vq_vector_release(proxy, queue_no, vector);
623 }
624 return ret;
625 }
626
627 static void kvm_virtio_pci_vector_release(PCIDevice *dev, unsigned vector)
628 {
629 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
630 VirtIODevice *vdev = proxy->vdev;
631 int queue_no;
632
633 for (queue_no = 0; queue_no < VIRTIO_PCI_QUEUE_MAX; queue_no++) {
634 if (!virtio_queue_get_num(vdev, queue_no)) {
635 break;
636 }
637 if (virtio_queue_vector(vdev, queue_no) != vector) {
638 continue;
639 }
640 kvm_virtio_pci_vq_vector_release(proxy, queue_no, vector);
641 }
642 }
643
644 static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign)
645 {
646 VirtIOPCIProxy *proxy = opaque;
647 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
648 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
649
650 if (assign) {
651 int r = event_notifier_init(notifier, 0);
652 if (r < 0) {
653 return r;
654 }
655 qemu_set_fd_handler(event_notifier_get_fd(notifier),
656 virtio_pci_guest_notifier_read, NULL, vq);
657 } else {
658 qemu_set_fd_handler(event_notifier_get_fd(notifier),
659 NULL, NULL, NULL);
660 /* Test and clear notifier before closing it,
661 * in case poll callback didn't have time to run. */
662 virtio_pci_guest_notifier_read(vq);
663 event_notifier_cleanup(notifier);
664 }
665
666 return 0;
667 }
668
669 static bool virtio_pci_query_guest_notifiers(void *opaque)
670 {
671 VirtIOPCIProxy *proxy = opaque;
672 return msix_enabled(&proxy->pci_dev);
673 }
674
675 static int virtio_pci_set_guest_notifiers(void *opaque, bool assign)
676 {
677 VirtIOPCIProxy *proxy = opaque;
678 VirtIODevice *vdev = proxy->vdev;
679 int r, n;
680
681 /* Must unset vector notifier while guest notifier is still assigned */
682 if (kvm_irqchip_in_kernel() && !assign) {
683 msix_unset_vector_notifiers(&proxy->pci_dev);
684 g_free(proxy->vector_irqfd);
685 proxy->vector_irqfd = NULL;
686 }
687
688 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
689 if (!virtio_queue_get_num(vdev, n)) {
690 break;
691 }
692
693 r = virtio_pci_set_guest_notifier(opaque, n, assign);
694 if (r < 0) {
695 goto assign_error;
696 }
697 }
698
699 /* Must set vector notifier after guest notifier has been assigned */
700 if (kvm_irqchip_in_kernel() && assign) {
701 proxy->vector_irqfd =
702 g_malloc0(sizeof(*proxy->vector_irqfd) *
703 msix_nr_vectors_allocated(&proxy->pci_dev));
704 r = msix_set_vector_notifiers(&proxy->pci_dev,
705 kvm_virtio_pci_vector_use,
706 kvm_virtio_pci_vector_release);
707 if (r < 0) {
708 goto assign_error;
709 }
710 }
711
712 return 0;
713
714 assign_error:
715 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
716 assert(assign);
717 while (--n >= 0) {
718 virtio_pci_set_guest_notifier(opaque, n, !assign);
719 }
720 return r;
721 }
722
723 static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
724 {
725 VirtIOPCIProxy *proxy = opaque;
726
727 /* Stop using ioeventfd for virtqueue kick if the device starts using host
728 * notifiers. This makes it easy to avoid stepping on each others' toes.
729 */
730 proxy->ioeventfd_disabled = assign;
731 if (assign) {
732 virtio_pci_stop_ioeventfd(proxy);
733 }
734 /* We don't need to start here: it's not needed because backend
735 * currently only stops on status change away from ok,
736 * reset, vmstop and such. If we do add code to start here,
737 * need to check vmstate, device state etc. */
738 return virtio_pci_set_host_notifier_internal(proxy, n, assign);
739 }
740
741 static void virtio_pci_vmstate_change(void *opaque, bool running)
742 {
743 VirtIOPCIProxy *proxy = opaque;
744
745 if (running) {
746 /* Try to find out if the guest has bus master disabled, but is
747 in ready state. Then we have a buggy guest OS. */
748 if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
749 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
750 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
751 }
752 virtio_pci_start_ioeventfd(proxy);
753 } else {
754 virtio_pci_stop_ioeventfd(proxy);
755 }
756 }
757
758 static const VirtIOBindings virtio_pci_bindings = {
759 .notify = virtio_pci_notify,
760 .save_config = virtio_pci_save_config,
761 .load_config = virtio_pci_load_config,
762 .save_queue = virtio_pci_save_queue,
763 .load_queue = virtio_pci_load_queue,
764 .get_features = virtio_pci_get_features,
765 .query_guest_notifiers = virtio_pci_query_guest_notifiers,
766 .set_host_notifier = virtio_pci_set_host_notifier,
767 .set_guest_notifiers = virtio_pci_set_guest_notifiers,
768 .vmstate_change = virtio_pci_vmstate_change,
769 };
770
771 void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev)
772 {
773 uint8_t *config;
774 uint32_t size;
775
776 proxy->vdev = vdev;
777
778 config = proxy->pci_dev.config;
779
780 if (proxy->class_code) {
781 pci_config_set_class(config, proxy->class_code);
782 }
783 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
784 pci_get_word(config + PCI_VENDOR_ID));
785 pci_set_word(config + PCI_SUBSYSTEM_ID, vdev->device_id);
786 config[PCI_INTERRUPT_PIN] = 1;
787
788 memory_region_init(&proxy->msix_bar, "virtio-msix", 4096);
789 if (vdev->nvectors && !msix_init(&proxy->pci_dev, vdev->nvectors,
790 &proxy->msix_bar, 1, 0)) {
791 pci_register_bar(&proxy->pci_dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY,
792 &proxy->msix_bar);
793 } else
794 vdev->nvectors = 0;
795
796 proxy->pci_dev.config_write = virtio_write_config;
797
798 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + vdev->config_len;
799 if (size & (size-1))
800 size = 1 << qemu_fls(size);
801
802 memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
803 "virtio-pci", size);
804 pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
805 &proxy->bar);
806
807 if (!kvm_has_many_ioeventfds()) {
808 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
809 }
810
811 virtio_bind_device(vdev, &virtio_pci_bindings, proxy);
812 proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
813 proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
814 proxy->host_features = vdev->get_features(vdev, proxy->host_features);
815 }
816
817 static int virtio_blk_init_pci(PCIDevice *pci_dev)
818 {
819 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
820 VirtIODevice *vdev;
821
822 if (proxy->class_code != PCI_CLASS_STORAGE_SCSI &&
823 proxy->class_code != PCI_CLASS_STORAGE_OTHER)
824 proxy->class_code = PCI_CLASS_STORAGE_SCSI;
825
826 vdev = virtio_blk_init(&pci_dev->qdev, &proxy->blk);
827 if (!vdev) {
828 return -1;
829 }
830 vdev->nvectors = proxy->nvectors;
831 virtio_init_pci(proxy, vdev);
832 /* make the actual value visible */
833 proxy->nvectors = vdev->nvectors;
834 return 0;
835 }
836
837 static int virtio_exit_pci(PCIDevice *pci_dev)
838 {
839 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
840 int r;
841
842 memory_region_destroy(&proxy->bar);
843 r = msix_uninit(pci_dev, &proxy->msix_bar);
844 memory_region_destroy(&proxy->msix_bar);
845 return r;
846 }
847
848 static int virtio_blk_exit_pci(PCIDevice *pci_dev)
849 {
850 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
851
852 virtio_pci_stop_ioeventfd(proxy);
853 virtio_blk_exit(proxy->vdev);
854 return virtio_exit_pci(pci_dev);
855 }
856
857 static int virtio_serial_init_pci(PCIDevice *pci_dev)
858 {
859 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
860 VirtIODevice *vdev;
861
862 if (proxy->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
863 proxy->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
864 proxy->class_code != PCI_CLASS_OTHERS) /* qemu-kvm */
865 proxy->class_code = PCI_CLASS_COMMUNICATION_OTHER;
866
867 vdev = virtio_serial_init(&pci_dev->qdev, &proxy->serial);
868 if (!vdev) {
869 return -1;
870 }
871 vdev->nvectors = proxy->nvectors == DEV_NVECTORS_UNSPECIFIED
872 ? proxy->serial.max_virtserial_ports + 1
873 : proxy->nvectors;
874 virtio_init_pci(proxy, vdev);
875 proxy->nvectors = vdev->nvectors;
876 return 0;
877 }
878
879 static int virtio_serial_exit_pci(PCIDevice *pci_dev)
880 {
881 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
882
883 virtio_pci_stop_ioeventfd(proxy);
884 virtio_serial_exit(proxy->vdev);
885 return virtio_exit_pci(pci_dev);
886 }
887
888 static int virtio_net_init_pci(PCIDevice *pci_dev)
889 {
890 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
891 VirtIODevice *vdev;
892
893 vdev = virtio_net_init(&pci_dev->qdev, &proxy->nic, &proxy->net);
894
895 vdev->nvectors = proxy->nvectors;
896 virtio_init_pci(proxy, vdev);
897
898 /* make the actual value visible */
899 proxy->nvectors = vdev->nvectors;
900 return 0;
901 }
902
903 static int virtio_net_exit_pci(PCIDevice *pci_dev)
904 {
905 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
906
907 virtio_pci_stop_ioeventfd(proxy);
908 virtio_net_exit(proxy->vdev);
909 return virtio_exit_pci(pci_dev);
910 }
911
912 static int virtio_balloon_init_pci(PCIDevice *pci_dev)
913 {
914 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
915 VirtIODevice *vdev;
916
917 if (proxy->class_code != PCI_CLASS_OTHERS &&
918 proxy->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
919 proxy->class_code = PCI_CLASS_OTHERS;
920 }
921
922 vdev = virtio_balloon_init(&pci_dev->qdev);
923 if (!vdev) {
924 return -1;
925 }
926 virtio_init_pci(proxy, vdev);
927 return 0;
928 }
929
930 static int virtio_balloon_exit_pci(PCIDevice *pci_dev)
931 {
932 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
933
934 virtio_pci_stop_ioeventfd(proxy);
935 virtio_balloon_exit(proxy->vdev);
936 return virtio_exit_pci(pci_dev);
937 }
938
939 static Property virtio_blk_properties[] = {
940 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
941 DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy, blk.conf),
942 DEFINE_PROP_STRING("serial", VirtIOPCIProxy, blk.serial),
943 #ifdef __linux__
944 DEFINE_PROP_BIT("scsi", VirtIOPCIProxy, blk.scsi, 0, true),
945 #endif
946 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
947 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
948 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
949 DEFINE_PROP_END_OF_LIST(),
950 };
951
952 static void virtio_blk_class_init(ObjectClass *klass, void *data)
953 {
954 DeviceClass *dc = DEVICE_CLASS(klass);
955 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
956
957 k->init = virtio_blk_init_pci;
958 k->exit = virtio_blk_exit_pci;
959 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
960 k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
961 k->revision = VIRTIO_PCI_ABI_VERSION;
962 k->class_id = PCI_CLASS_STORAGE_SCSI;
963 dc->reset = virtio_pci_reset;
964 dc->props = virtio_blk_properties;
965 }
966
967 static TypeInfo virtio_blk_info = {
968 .name = "virtio-blk-pci",
969 .parent = TYPE_PCI_DEVICE,
970 .instance_size = sizeof(VirtIOPCIProxy),
971 .class_init = virtio_blk_class_init,
972 };
973
974 static Property virtio_net_properties[] = {
975 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
976 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
977 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
978 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
979 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy, net.txtimer, TX_TIMER_INTERVAL),
980 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy, net.txburst, TX_BURST),
981 DEFINE_PROP_STRING("tx", VirtIOPCIProxy, net.tx),
982 DEFINE_PROP_END_OF_LIST(),
983 };
984
985 static void virtio_net_class_init(ObjectClass *klass, void *data)
986 {
987 DeviceClass *dc = DEVICE_CLASS(klass);
988 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
989
990 k->init = virtio_net_init_pci;
991 k->exit = virtio_net_exit_pci;
992 k->romfile = "pxe-virtio.rom";
993 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
994 k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
995 k->revision = VIRTIO_PCI_ABI_VERSION;
996 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
997 dc->reset = virtio_pci_reset;
998 dc->props = virtio_net_properties;
999 }
1000
1001 static TypeInfo virtio_net_info = {
1002 .name = "virtio-net-pci",
1003 .parent = TYPE_PCI_DEVICE,
1004 .instance_size = sizeof(VirtIOPCIProxy),
1005 .class_init = virtio_net_class_init,
1006 };
1007
1008 static Property virtio_serial_properties[] = {
1009 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1010 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED),
1011 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1012 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1013 DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy, serial.max_virtserial_ports, 31),
1014 DEFINE_PROP_END_OF_LIST(),
1015 };
1016
1017 static void virtio_serial_class_init(ObjectClass *klass, void *data)
1018 {
1019 DeviceClass *dc = DEVICE_CLASS(klass);
1020 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1021
1022 k->init = virtio_serial_init_pci;
1023 k->exit = virtio_serial_exit_pci;
1024 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1025 k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
1026 k->revision = VIRTIO_PCI_ABI_VERSION;
1027 k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
1028 dc->reset = virtio_pci_reset;
1029 dc->props = virtio_serial_properties;
1030 }
1031
1032 static TypeInfo virtio_serial_info = {
1033 .name = "virtio-serial-pci",
1034 .parent = TYPE_PCI_DEVICE,
1035 .instance_size = sizeof(VirtIOPCIProxy),
1036 .class_init = virtio_serial_class_init,
1037 };
1038
1039 static Property virtio_balloon_properties[] = {
1040 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1041 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1042 DEFINE_PROP_END_OF_LIST(),
1043 };
1044
1045 static void virtio_balloon_class_init(ObjectClass *klass, void *data)
1046 {
1047 DeviceClass *dc = DEVICE_CLASS(klass);
1048 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1049
1050 k->init = virtio_balloon_init_pci;
1051 k->exit = virtio_balloon_exit_pci;
1052 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1053 k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
1054 k->revision = VIRTIO_PCI_ABI_VERSION;
1055 k->class_id = PCI_CLASS_OTHERS;
1056 dc->reset = virtio_pci_reset;
1057 dc->props = virtio_balloon_properties;
1058 }
1059
1060 static TypeInfo virtio_balloon_info = {
1061 .name = "virtio-balloon-pci",
1062 .parent = TYPE_PCI_DEVICE,
1063 .instance_size = sizeof(VirtIOPCIProxy),
1064 .class_init = virtio_balloon_class_init,
1065 };
1066
1067 static int virtio_scsi_init_pci(PCIDevice *pci_dev)
1068 {
1069 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1070 VirtIODevice *vdev;
1071
1072 vdev = virtio_scsi_init(&pci_dev->qdev, &proxy->scsi);
1073 if (!vdev) {
1074 return -EINVAL;
1075 }
1076
1077 vdev->nvectors = proxy->nvectors;
1078 virtio_init_pci(proxy, vdev);
1079
1080 /* make the actual value visible */
1081 proxy->nvectors = vdev->nvectors;
1082 return 0;
1083 }
1084
1085 static int virtio_scsi_exit_pci(PCIDevice *pci_dev)
1086 {
1087 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1088
1089 virtio_scsi_exit(proxy->vdev);
1090 return virtio_exit_pci(pci_dev);
1091 }
1092
1093 static Property virtio_scsi_properties[] = {
1094 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1095 DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOPCIProxy, host_features, scsi),
1096 DEFINE_PROP_END_OF_LIST(),
1097 };
1098
1099 static void virtio_scsi_class_init(ObjectClass *klass, void *data)
1100 {
1101 DeviceClass *dc = DEVICE_CLASS(klass);
1102 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1103
1104 k->init = virtio_scsi_init_pci;
1105 k->exit = virtio_scsi_exit_pci;
1106 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1107 k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
1108 k->revision = 0x00;
1109 k->class_id = PCI_CLASS_STORAGE_SCSI;
1110 dc->reset = virtio_pci_reset;
1111 dc->props = virtio_scsi_properties;
1112 }
1113
1114 static TypeInfo virtio_scsi_info = {
1115 .name = "virtio-scsi-pci",
1116 .parent = TYPE_PCI_DEVICE,
1117 .instance_size = sizeof(VirtIOPCIProxy),
1118 .class_init = virtio_scsi_class_init,
1119 };
1120
1121 static void virtio_pci_register_types(void)
1122 {
1123 type_register_static(&virtio_blk_info);
1124 type_register_static(&virtio_net_info);
1125 type_register_static(&virtio_serial_info);
1126 type_register_static(&virtio_balloon_info);
1127 type_register_static(&virtio_scsi_info);
1128 }
1129
1130 type_init(virtio_pci_register_types)