]> git.proxmox.com Git - qemu.git/blob - hw/virtio-pci.c
msi: Invoke msi/msix_write_config from PCI core
[qemu.git] / hw / virtio-pci.c
1 /*
2 * Virtio PCI Bindings
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include <inttypes.h>
19
20 #include "virtio.h"
21 #include "virtio-blk.h"
22 #include "virtio-net.h"
23 #include "virtio-serial.h"
24 #include "virtio-scsi.h"
25 #include "pci.h"
26 #include "qemu-error.h"
27 #include "msi.h"
28 #include "msix.h"
29 #include "net.h"
30 #include "loader.h"
31 #include "kvm.h"
32 #include "blockdev.h"
33 #include "virtio-pci.h"
34 #include "range.h"
35
36 /* from Linux's linux/virtio_pci.h */
37
38 /* A 32-bit r/o bitmask of the features supported by the host */
39 #define VIRTIO_PCI_HOST_FEATURES 0
40
41 /* A 32-bit r/w bitmask of features activated by the guest */
42 #define VIRTIO_PCI_GUEST_FEATURES 4
43
44 /* A 32-bit r/w PFN for the currently selected queue */
45 #define VIRTIO_PCI_QUEUE_PFN 8
46
47 /* A 16-bit r/o queue size for the currently selected queue */
48 #define VIRTIO_PCI_QUEUE_NUM 12
49
50 /* A 16-bit r/w queue selector */
51 #define VIRTIO_PCI_QUEUE_SEL 14
52
53 /* A 16-bit r/w queue notifier */
54 #define VIRTIO_PCI_QUEUE_NOTIFY 16
55
56 /* An 8-bit device status register. */
57 #define VIRTIO_PCI_STATUS 18
58
59 /* An 8-bit r/o interrupt status register. Reading the value will return the
60 * current contents of the ISR and will also clear it. This is effectively
61 * a read-and-acknowledge. */
62 #define VIRTIO_PCI_ISR 19
63
64 /* MSI-X registers: only enabled if MSI-X is enabled. */
65 /* A 16-bit vector for configuration changes. */
66 #define VIRTIO_MSI_CONFIG_VECTOR 20
67 /* A 16-bit vector for selected queue notifications. */
68 #define VIRTIO_MSI_QUEUE_VECTOR 22
69
70 /* Config space size */
71 #define VIRTIO_PCI_CONFIG_NOMSI 20
72 #define VIRTIO_PCI_CONFIG_MSI 24
73 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
74 VIRTIO_PCI_CONFIG_MSI : \
75 VIRTIO_PCI_CONFIG_NOMSI)
76
77 /* The remaining space is defined by each driver as the per-driver
78 * configuration space */
79 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
80 VIRTIO_PCI_CONFIG_MSI : \
81 VIRTIO_PCI_CONFIG_NOMSI)
82
83 /* How many bits to shift physical queue address written to QUEUE_PFN.
84 * 12 is historical, and due to x86 page size. */
85 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
86
87 /* Flags track per-device state like workarounds for quirks in older guests. */
88 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG (1 << 0)
89
90 /* QEMU doesn't strictly need write barriers since everything runs in
91 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
92 * KVM or if kqemu gets SMP support.
93 */
94 #define wmb() do { } while (0)
95
96 /* HACK for virtio to determine if it's running a big endian guest */
97 bool virtio_is_big_endian(void);
98
99 /* virtio device */
100
101 static void virtio_pci_notify(void *opaque, uint16_t vector)
102 {
103 VirtIOPCIProxy *proxy = opaque;
104 if (msix_enabled(&proxy->pci_dev))
105 msix_notify(&proxy->pci_dev, vector);
106 else
107 qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
108 }
109
110 static void virtio_pci_save_config(void * opaque, QEMUFile *f)
111 {
112 VirtIOPCIProxy *proxy = opaque;
113 pci_device_save(&proxy->pci_dev, f);
114 msix_save(&proxy->pci_dev, f);
115 if (msix_present(&proxy->pci_dev))
116 qemu_put_be16(f, proxy->vdev->config_vector);
117 }
118
119 static void virtio_pci_save_queue(void * opaque, int n, QEMUFile *f)
120 {
121 VirtIOPCIProxy *proxy = opaque;
122 if (msix_present(&proxy->pci_dev))
123 qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
124 }
125
126 static int virtio_pci_load_config(void * opaque, QEMUFile *f)
127 {
128 VirtIOPCIProxy *proxy = opaque;
129 int ret;
130 ret = pci_device_load(&proxy->pci_dev, f);
131 if (ret) {
132 return ret;
133 }
134 msix_load(&proxy->pci_dev, f);
135 if (msix_present(&proxy->pci_dev)) {
136 qemu_get_be16s(f, &proxy->vdev->config_vector);
137 } else {
138 proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
139 }
140 if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
141 return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
142 }
143 return 0;
144 }
145
146 static int virtio_pci_load_queue(void * opaque, int n, QEMUFile *f)
147 {
148 VirtIOPCIProxy *proxy = opaque;
149 uint16_t vector;
150 if (msix_present(&proxy->pci_dev)) {
151 qemu_get_be16s(f, &vector);
152 } else {
153 vector = VIRTIO_NO_VECTOR;
154 }
155 virtio_queue_set_vector(proxy->vdev, n, vector);
156 if (vector != VIRTIO_NO_VECTOR) {
157 return msix_vector_use(&proxy->pci_dev, vector);
158 }
159 return 0;
160 }
161
162 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
163 int n, bool assign)
164 {
165 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
166 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
167 int r = 0;
168
169 if (assign) {
170 r = event_notifier_init(notifier, 1);
171 if (r < 0) {
172 error_report("%s: unable to init event notifier: %d",
173 __func__, r);
174 return r;
175 }
176 memory_region_add_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
177 true, n, event_notifier_get_fd(notifier));
178 } else {
179 memory_region_del_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
180 true, n, event_notifier_get_fd(notifier));
181 /* Handle the race condition where the guest kicked and we deassigned
182 * before we got around to handling the kick.
183 */
184 if (event_notifier_test_and_clear(notifier)) {
185 virtio_queue_notify_vq(vq);
186 }
187
188 event_notifier_cleanup(notifier);
189 }
190 return r;
191 }
192
193 static void virtio_pci_host_notifier_read(void *opaque)
194 {
195 VirtQueue *vq = opaque;
196 EventNotifier *n = virtio_queue_get_host_notifier(vq);
197 if (event_notifier_test_and_clear(n)) {
198 virtio_queue_notify_vq(vq);
199 }
200 }
201
202 static void virtio_pci_set_host_notifier_fd_handler(VirtIOPCIProxy *proxy,
203 int n, bool assign)
204 {
205 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
206 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
207 if (assign) {
208 qemu_set_fd_handler(event_notifier_get_fd(notifier),
209 virtio_pci_host_notifier_read, NULL, vq);
210 } else {
211 qemu_set_fd_handler(event_notifier_get_fd(notifier),
212 NULL, NULL, NULL);
213 }
214 }
215
216 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
217 {
218 int n, r;
219
220 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
221 proxy->ioeventfd_disabled ||
222 proxy->ioeventfd_started) {
223 return;
224 }
225
226 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
227 if (!virtio_queue_get_num(proxy->vdev, n)) {
228 continue;
229 }
230
231 r = virtio_pci_set_host_notifier_internal(proxy, n, true);
232 if (r < 0) {
233 goto assign_error;
234 }
235
236 virtio_pci_set_host_notifier_fd_handler(proxy, n, true);
237 }
238 proxy->ioeventfd_started = true;
239 return;
240
241 assign_error:
242 while (--n >= 0) {
243 if (!virtio_queue_get_num(proxy->vdev, n)) {
244 continue;
245 }
246
247 virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
248 r = virtio_pci_set_host_notifier_internal(proxy, n, false);
249 assert(r >= 0);
250 }
251 proxy->ioeventfd_started = false;
252 error_report("%s: failed. Fallback to a userspace (slower).", __func__);
253 }
254
255 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
256 {
257 int r;
258 int n;
259
260 if (!proxy->ioeventfd_started) {
261 return;
262 }
263
264 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
265 if (!virtio_queue_get_num(proxy->vdev, n)) {
266 continue;
267 }
268
269 virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
270 r = virtio_pci_set_host_notifier_internal(proxy, n, false);
271 assert(r >= 0);
272 }
273 proxy->ioeventfd_started = false;
274 }
275
276 void virtio_pci_reset(DeviceState *d)
277 {
278 VirtIOPCIProxy *proxy = container_of(d, VirtIOPCIProxy, pci_dev.qdev);
279 virtio_pci_stop_ioeventfd(proxy);
280 virtio_reset(proxy->vdev);
281 proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
282 }
283
284 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
285 {
286 VirtIOPCIProxy *proxy = opaque;
287 VirtIODevice *vdev = proxy->vdev;
288 target_phys_addr_t pa;
289
290 switch (addr) {
291 case VIRTIO_PCI_GUEST_FEATURES:
292 /* Guest does not negotiate properly? We have to assume nothing. */
293 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
294 val = vdev->bad_features ? vdev->bad_features(vdev) : 0;
295 }
296 virtio_set_features(vdev, val);
297 break;
298 case VIRTIO_PCI_QUEUE_PFN:
299 pa = (target_phys_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
300 if (pa == 0) {
301 virtio_pci_stop_ioeventfd(proxy);
302 virtio_reset(proxy->vdev);
303 msix_unuse_all_vectors(&proxy->pci_dev);
304 }
305 else
306 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
307 break;
308 case VIRTIO_PCI_QUEUE_SEL:
309 if (val < VIRTIO_PCI_QUEUE_MAX)
310 vdev->queue_sel = val;
311 break;
312 case VIRTIO_PCI_QUEUE_NOTIFY:
313 if (val < VIRTIO_PCI_QUEUE_MAX) {
314 virtio_queue_notify(vdev, val);
315 }
316 break;
317 case VIRTIO_PCI_STATUS:
318 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
319 virtio_pci_stop_ioeventfd(proxy);
320 }
321
322 virtio_set_status(vdev, val & 0xFF);
323
324 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
325 virtio_pci_start_ioeventfd(proxy);
326 }
327
328 if (vdev->status == 0) {
329 virtio_reset(proxy->vdev);
330 msix_unuse_all_vectors(&proxy->pci_dev);
331 }
332
333 /* Linux before 2.6.34 sets the device as OK without enabling
334 the PCI device bus master bit. In this case we need to disable
335 some safety checks. */
336 if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
337 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
338 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
339 }
340 break;
341 case VIRTIO_MSI_CONFIG_VECTOR:
342 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
343 /* Make it possible for guest to discover an error took place. */
344 if (msix_vector_use(&proxy->pci_dev, val) < 0)
345 val = VIRTIO_NO_VECTOR;
346 vdev->config_vector = val;
347 break;
348 case VIRTIO_MSI_QUEUE_VECTOR:
349 msix_vector_unuse(&proxy->pci_dev,
350 virtio_queue_vector(vdev, vdev->queue_sel));
351 /* Make it possible for guest to discover an error took place. */
352 if (msix_vector_use(&proxy->pci_dev, val) < 0)
353 val = VIRTIO_NO_VECTOR;
354 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
355 break;
356 default:
357 error_report("%s: unexpected address 0x%x value 0x%x",
358 __func__, addr, val);
359 break;
360 }
361 }
362
363 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
364 {
365 VirtIODevice *vdev = proxy->vdev;
366 uint32_t ret = 0xFFFFFFFF;
367
368 switch (addr) {
369 case VIRTIO_PCI_HOST_FEATURES:
370 ret = proxy->host_features;
371 break;
372 case VIRTIO_PCI_GUEST_FEATURES:
373 ret = vdev->guest_features;
374 break;
375 case VIRTIO_PCI_QUEUE_PFN:
376 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
377 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
378 break;
379 case VIRTIO_PCI_QUEUE_NUM:
380 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
381 break;
382 case VIRTIO_PCI_QUEUE_SEL:
383 ret = vdev->queue_sel;
384 break;
385 case VIRTIO_PCI_STATUS:
386 ret = vdev->status;
387 break;
388 case VIRTIO_PCI_ISR:
389 /* reading from the ISR also clears it. */
390 ret = vdev->isr;
391 vdev->isr = 0;
392 qemu_set_irq(proxy->pci_dev.irq[0], 0);
393 break;
394 case VIRTIO_MSI_CONFIG_VECTOR:
395 ret = vdev->config_vector;
396 break;
397 case VIRTIO_MSI_QUEUE_VECTOR:
398 ret = virtio_queue_vector(vdev, vdev->queue_sel);
399 break;
400 default:
401 break;
402 }
403
404 return ret;
405 }
406
407 static uint32_t virtio_pci_config_readb(void *opaque, uint32_t addr)
408 {
409 VirtIOPCIProxy *proxy = opaque;
410 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
411 if (addr < config)
412 return virtio_ioport_read(proxy, addr);
413 addr -= config;
414 return virtio_config_readb(proxy->vdev, addr);
415 }
416
417 static uint32_t virtio_pci_config_readw(void *opaque, uint32_t addr)
418 {
419 VirtIOPCIProxy *proxy = opaque;
420 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
421 uint16_t val;
422 if (addr < config)
423 return virtio_ioport_read(proxy, addr);
424 addr -= config;
425 val = virtio_config_readw(proxy->vdev, addr);
426 if (virtio_is_big_endian()) {
427 /*
428 * virtio is odd, ioports are LE but config space is target native
429 * endian. However, in qemu, all PIO is LE, so we need to re-swap
430 * on BE targets
431 */
432 val = bswap16(val);
433 }
434 return val;
435 }
436
437 static uint32_t virtio_pci_config_readl(void *opaque, uint32_t addr)
438 {
439 VirtIOPCIProxy *proxy = opaque;
440 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
441 uint32_t val;
442 if (addr < config)
443 return virtio_ioport_read(proxy, addr);
444 addr -= config;
445 val = virtio_config_readl(proxy->vdev, addr);
446 if (virtio_is_big_endian()) {
447 val = bswap32(val);
448 }
449 return val;
450 }
451
452 static void virtio_pci_config_writeb(void *opaque, uint32_t addr, uint32_t val)
453 {
454 VirtIOPCIProxy *proxy = opaque;
455 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
456 if (addr < config) {
457 virtio_ioport_write(proxy, addr, val);
458 return;
459 }
460 addr -= config;
461 virtio_config_writeb(proxy->vdev, addr, val);
462 }
463
464 static void virtio_pci_config_writew(void *opaque, uint32_t addr, uint32_t val)
465 {
466 VirtIOPCIProxy *proxy = opaque;
467 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
468 if (addr < config) {
469 virtio_ioport_write(proxy, addr, val);
470 return;
471 }
472 addr -= config;
473 if (virtio_is_big_endian()) {
474 val = bswap16(val);
475 }
476 virtio_config_writew(proxy->vdev, addr, val);
477 }
478
479 static void virtio_pci_config_writel(void *opaque, uint32_t addr, uint32_t val)
480 {
481 VirtIOPCIProxy *proxy = opaque;
482 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
483 if (addr < config) {
484 virtio_ioport_write(proxy, addr, val);
485 return;
486 }
487 addr -= config;
488 if (virtio_is_big_endian()) {
489 val = bswap32(val);
490 }
491 virtio_config_writel(proxy->vdev, addr, val);
492 }
493
494 static const MemoryRegionPortio virtio_portio[] = {
495 { 0, 0x10000, 1, .write = virtio_pci_config_writeb, },
496 { 0, 0x10000, 2, .write = virtio_pci_config_writew, },
497 { 0, 0x10000, 4, .write = virtio_pci_config_writel, },
498 { 0, 0x10000, 1, .read = virtio_pci_config_readb, },
499 { 0, 0x10000, 2, .read = virtio_pci_config_readw, },
500 { 0, 0x10000, 4, .read = virtio_pci_config_readl, },
501 PORTIO_END_OF_LIST()
502 };
503
504 static const MemoryRegionOps virtio_pci_config_ops = {
505 .old_portio = virtio_portio,
506 .endianness = DEVICE_LITTLE_ENDIAN,
507 };
508
509 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
510 uint32_t val, int len)
511 {
512 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
513
514 pci_default_write_config(pci_dev, address, val, len);
515
516 if (range_covers_byte(address, len, PCI_COMMAND) &&
517 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
518 !(proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG)) {
519 virtio_pci_stop_ioeventfd(proxy);
520 virtio_set_status(proxy->vdev,
521 proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
522 }
523 }
524
525 static unsigned virtio_pci_get_features(void *opaque)
526 {
527 VirtIOPCIProxy *proxy = opaque;
528 return proxy->host_features;
529 }
530
531 static void virtio_pci_guest_notifier_read(void *opaque)
532 {
533 VirtQueue *vq = opaque;
534 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
535 if (event_notifier_test_and_clear(n)) {
536 virtio_irq(vq);
537 }
538 }
539
540 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
541 unsigned int queue_no,
542 unsigned int vector,
543 MSIMessage msg)
544 {
545 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
546 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
547 int fd, ret;
548
549 fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vq));
550
551 if (irqfd->users == 0) {
552 ret = kvm_irqchip_add_msi_route(kvm_state, msg);
553 if (ret < 0) {
554 return ret;
555 }
556 irqfd->virq = ret;
557 }
558 irqfd->users++;
559
560 ret = kvm_irqchip_add_irqfd(kvm_state, fd, irqfd->virq);
561 if (ret < 0) {
562 if (--irqfd->users == 0) {
563 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
564 }
565 return ret;
566 }
567
568 qemu_set_fd_handler(fd, NULL, NULL, NULL);
569
570 return 0;
571 }
572
573 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
574 unsigned int queue_no,
575 unsigned int vector)
576 {
577 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
578 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
579 int fd, ret;
580
581 fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vq));
582
583 ret = kvm_irqchip_remove_irqfd(kvm_state, fd, irqfd->virq);
584 assert(ret == 0);
585
586 if (--irqfd->users == 0) {
587 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
588 }
589
590 qemu_set_fd_handler(fd, virtio_pci_guest_notifier_read, NULL, vq);
591 }
592
593 static int kvm_virtio_pci_vector_use(PCIDevice *dev, unsigned vector,
594 MSIMessage msg)
595 {
596 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
597 VirtIODevice *vdev = proxy->vdev;
598 int ret, queue_no;
599
600 for (queue_no = 0; queue_no < VIRTIO_PCI_QUEUE_MAX; queue_no++) {
601 if (!virtio_queue_get_num(vdev, queue_no)) {
602 break;
603 }
604 if (virtio_queue_vector(vdev, queue_no) != vector) {
605 continue;
606 }
607 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
608 if (ret < 0) {
609 goto undo;
610 }
611 }
612 return 0;
613
614 undo:
615 while (--queue_no >= 0) {
616 if (virtio_queue_vector(vdev, queue_no) != vector) {
617 continue;
618 }
619 kvm_virtio_pci_vq_vector_release(proxy, queue_no, vector);
620 }
621 return ret;
622 }
623
624 static void kvm_virtio_pci_vector_release(PCIDevice *dev, unsigned vector)
625 {
626 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
627 VirtIODevice *vdev = proxy->vdev;
628 int queue_no;
629
630 for (queue_no = 0; queue_no < VIRTIO_PCI_QUEUE_MAX; queue_no++) {
631 if (!virtio_queue_get_num(vdev, queue_no)) {
632 break;
633 }
634 if (virtio_queue_vector(vdev, queue_no) != vector) {
635 continue;
636 }
637 kvm_virtio_pci_vq_vector_release(proxy, queue_no, vector);
638 }
639 }
640
641 static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign)
642 {
643 VirtIOPCIProxy *proxy = opaque;
644 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
645 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
646
647 if (assign) {
648 int r = event_notifier_init(notifier, 0);
649 if (r < 0) {
650 return r;
651 }
652 qemu_set_fd_handler(event_notifier_get_fd(notifier),
653 virtio_pci_guest_notifier_read, NULL, vq);
654 } else {
655 qemu_set_fd_handler(event_notifier_get_fd(notifier),
656 NULL, NULL, NULL);
657 /* Test and clear notifier before closing it,
658 * in case poll callback didn't have time to run. */
659 virtio_pci_guest_notifier_read(vq);
660 event_notifier_cleanup(notifier);
661 }
662
663 return 0;
664 }
665
666 static bool virtio_pci_query_guest_notifiers(void *opaque)
667 {
668 VirtIOPCIProxy *proxy = opaque;
669 return msix_enabled(&proxy->pci_dev);
670 }
671
672 static int virtio_pci_set_guest_notifiers(void *opaque, bool assign)
673 {
674 VirtIOPCIProxy *proxy = opaque;
675 VirtIODevice *vdev = proxy->vdev;
676 int r, n;
677
678 /* Must unset vector notifier while guest notifier is still assigned */
679 if (kvm_irqchip_in_kernel() && !assign) {
680 msix_unset_vector_notifiers(&proxy->pci_dev);
681 g_free(proxy->vector_irqfd);
682 proxy->vector_irqfd = NULL;
683 }
684
685 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
686 if (!virtio_queue_get_num(vdev, n)) {
687 break;
688 }
689
690 r = virtio_pci_set_guest_notifier(opaque, n, assign);
691 if (r < 0) {
692 goto assign_error;
693 }
694 }
695
696 /* Must set vector notifier after guest notifier has been assigned */
697 if (kvm_irqchip_in_kernel() && assign) {
698 proxy->vector_irqfd =
699 g_malloc0(sizeof(*proxy->vector_irqfd) *
700 msix_nr_vectors_allocated(&proxy->pci_dev));
701 r = msix_set_vector_notifiers(&proxy->pci_dev,
702 kvm_virtio_pci_vector_use,
703 kvm_virtio_pci_vector_release);
704 if (r < 0) {
705 goto assign_error;
706 }
707 }
708
709 return 0;
710
711 assign_error:
712 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
713 assert(assign);
714 while (--n >= 0) {
715 virtio_pci_set_guest_notifier(opaque, n, !assign);
716 }
717 return r;
718 }
719
720 static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
721 {
722 VirtIOPCIProxy *proxy = opaque;
723
724 /* Stop using ioeventfd for virtqueue kick if the device starts using host
725 * notifiers. This makes it easy to avoid stepping on each others' toes.
726 */
727 proxy->ioeventfd_disabled = assign;
728 if (assign) {
729 virtio_pci_stop_ioeventfd(proxy);
730 }
731 /* We don't need to start here: it's not needed because backend
732 * currently only stops on status change away from ok,
733 * reset, vmstop and such. If we do add code to start here,
734 * need to check vmstate, device state etc. */
735 return virtio_pci_set_host_notifier_internal(proxy, n, assign);
736 }
737
738 static void virtio_pci_vmstate_change(void *opaque, bool running)
739 {
740 VirtIOPCIProxy *proxy = opaque;
741
742 if (running) {
743 /* Try to find out if the guest has bus master disabled, but is
744 in ready state. Then we have a buggy guest OS. */
745 if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
746 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
747 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
748 }
749 virtio_pci_start_ioeventfd(proxy);
750 } else {
751 virtio_pci_stop_ioeventfd(proxy);
752 }
753 }
754
755 static const VirtIOBindings virtio_pci_bindings = {
756 .notify = virtio_pci_notify,
757 .save_config = virtio_pci_save_config,
758 .load_config = virtio_pci_load_config,
759 .save_queue = virtio_pci_save_queue,
760 .load_queue = virtio_pci_load_queue,
761 .get_features = virtio_pci_get_features,
762 .query_guest_notifiers = virtio_pci_query_guest_notifiers,
763 .set_host_notifier = virtio_pci_set_host_notifier,
764 .set_guest_notifiers = virtio_pci_set_guest_notifiers,
765 .vmstate_change = virtio_pci_vmstate_change,
766 };
767
768 void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev)
769 {
770 uint8_t *config;
771 uint32_t size;
772
773 proxy->vdev = vdev;
774
775 config = proxy->pci_dev.config;
776
777 if (proxy->class_code) {
778 pci_config_set_class(config, proxy->class_code);
779 }
780 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
781 pci_get_word(config + PCI_VENDOR_ID));
782 pci_set_word(config + PCI_SUBSYSTEM_ID, vdev->device_id);
783 config[PCI_INTERRUPT_PIN] = 1;
784
785 memory_region_init(&proxy->msix_bar, "virtio-msix", 4096);
786 if (vdev->nvectors && !msix_init(&proxy->pci_dev, vdev->nvectors,
787 &proxy->msix_bar, 1, 0)) {
788 pci_register_bar(&proxy->pci_dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY,
789 &proxy->msix_bar);
790 } else
791 vdev->nvectors = 0;
792
793 proxy->pci_dev.config_write = virtio_write_config;
794
795 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + vdev->config_len;
796 if (size & (size-1))
797 size = 1 << qemu_fls(size);
798
799 memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
800 "virtio-pci", size);
801 pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
802 &proxy->bar);
803
804 if (!kvm_has_many_ioeventfds()) {
805 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
806 }
807
808 virtio_bind_device(vdev, &virtio_pci_bindings, proxy);
809 proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
810 proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
811 proxy->host_features = vdev->get_features(vdev, proxy->host_features);
812 }
813
814 static int virtio_blk_init_pci(PCIDevice *pci_dev)
815 {
816 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
817 VirtIODevice *vdev;
818
819 if (proxy->class_code != PCI_CLASS_STORAGE_SCSI &&
820 proxy->class_code != PCI_CLASS_STORAGE_OTHER)
821 proxy->class_code = PCI_CLASS_STORAGE_SCSI;
822
823 vdev = virtio_blk_init(&pci_dev->qdev, &proxy->blk);
824 if (!vdev) {
825 return -1;
826 }
827 vdev->nvectors = proxy->nvectors;
828 virtio_init_pci(proxy, vdev);
829 /* make the actual value visible */
830 proxy->nvectors = vdev->nvectors;
831 return 0;
832 }
833
834 static int virtio_exit_pci(PCIDevice *pci_dev)
835 {
836 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
837 int r;
838
839 memory_region_destroy(&proxy->bar);
840 r = msix_uninit(pci_dev, &proxy->msix_bar);
841 memory_region_destroy(&proxy->msix_bar);
842 return r;
843 }
844
845 static int virtio_blk_exit_pci(PCIDevice *pci_dev)
846 {
847 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
848
849 virtio_pci_stop_ioeventfd(proxy);
850 virtio_blk_exit(proxy->vdev);
851 return virtio_exit_pci(pci_dev);
852 }
853
854 static int virtio_serial_init_pci(PCIDevice *pci_dev)
855 {
856 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
857 VirtIODevice *vdev;
858
859 if (proxy->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
860 proxy->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
861 proxy->class_code != PCI_CLASS_OTHERS) /* qemu-kvm */
862 proxy->class_code = PCI_CLASS_COMMUNICATION_OTHER;
863
864 vdev = virtio_serial_init(&pci_dev->qdev, &proxy->serial);
865 if (!vdev) {
866 return -1;
867 }
868 vdev->nvectors = proxy->nvectors == DEV_NVECTORS_UNSPECIFIED
869 ? proxy->serial.max_virtserial_ports + 1
870 : proxy->nvectors;
871 virtio_init_pci(proxy, vdev);
872 proxy->nvectors = vdev->nvectors;
873 return 0;
874 }
875
876 static int virtio_serial_exit_pci(PCIDevice *pci_dev)
877 {
878 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
879
880 virtio_pci_stop_ioeventfd(proxy);
881 virtio_serial_exit(proxy->vdev);
882 return virtio_exit_pci(pci_dev);
883 }
884
885 static int virtio_net_init_pci(PCIDevice *pci_dev)
886 {
887 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
888 VirtIODevice *vdev;
889
890 vdev = virtio_net_init(&pci_dev->qdev, &proxy->nic, &proxy->net);
891
892 vdev->nvectors = proxy->nvectors;
893 virtio_init_pci(proxy, vdev);
894
895 /* make the actual value visible */
896 proxy->nvectors = vdev->nvectors;
897 return 0;
898 }
899
900 static int virtio_net_exit_pci(PCIDevice *pci_dev)
901 {
902 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
903
904 virtio_pci_stop_ioeventfd(proxy);
905 virtio_net_exit(proxy->vdev);
906 return virtio_exit_pci(pci_dev);
907 }
908
909 static int virtio_balloon_init_pci(PCIDevice *pci_dev)
910 {
911 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
912 VirtIODevice *vdev;
913
914 if (proxy->class_code != PCI_CLASS_OTHERS &&
915 proxy->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
916 proxy->class_code = PCI_CLASS_OTHERS;
917 }
918
919 vdev = virtio_balloon_init(&pci_dev->qdev);
920 if (!vdev) {
921 return -1;
922 }
923 virtio_init_pci(proxy, vdev);
924 return 0;
925 }
926
927 static int virtio_balloon_exit_pci(PCIDevice *pci_dev)
928 {
929 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
930
931 virtio_pci_stop_ioeventfd(proxy);
932 virtio_balloon_exit(proxy->vdev);
933 return virtio_exit_pci(pci_dev);
934 }
935
936 static Property virtio_blk_properties[] = {
937 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
938 DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy, blk.conf),
939 DEFINE_PROP_STRING("serial", VirtIOPCIProxy, blk.serial),
940 #ifdef __linux__
941 DEFINE_PROP_BIT("scsi", VirtIOPCIProxy, blk.scsi, 0, true),
942 #endif
943 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
944 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
945 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
946 DEFINE_PROP_END_OF_LIST(),
947 };
948
949 static void virtio_blk_class_init(ObjectClass *klass, void *data)
950 {
951 DeviceClass *dc = DEVICE_CLASS(klass);
952 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
953
954 k->init = virtio_blk_init_pci;
955 k->exit = virtio_blk_exit_pci;
956 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
957 k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
958 k->revision = VIRTIO_PCI_ABI_VERSION;
959 k->class_id = PCI_CLASS_STORAGE_SCSI;
960 dc->reset = virtio_pci_reset;
961 dc->props = virtio_blk_properties;
962 }
963
964 static TypeInfo virtio_blk_info = {
965 .name = "virtio-blk-pci",
966 .parent = TYPE_PCI_DEVICE,
967 .instance_size = sizeof(VirtIOPCIProxy),
968 .class_init = virtio_blk_class_init,
969 };
970
971 static Property virtio_net_properties[] = {
972 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
973 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
974 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
975 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
976 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy, net.txtimer, TX_TIMER_INTERVAL),
977 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy, net.txburst, TX_BURST),
978 DEFINE_PROP_STRING("tx", VirtIOPCIProxy, net.tx),
979 DEFINE_PROP_END_OF_LIST(),
980 };
981
982 static void virtio_net_class_init(ObjectClass *klass, void *data)
983 {
984 DeviceClass *dc = DEVICE_CLASS(klass);
985 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
986
987 k->init = virtio_net_init_pci;
988 k->exit = virtio_net_exit_pci;
989 k->romfile = "pxe-virtio.rom";
990 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
991 k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
992 k->revision = VIRTIO_PCI_ABI_VERSION;
993 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
994 dc->reset = virtio_pci_reset;
995 dc->props = virtio_net_properties;
996 }
997
998 static TypeInfo virtio_net_info = {
999 .name = "virtio-net-pci",
1000 .parent = TYPE_PCI_DEVICE,
1001 .instance_size = sizeof(VirtIOPCIProxy),
1002 .class_init = virtio_net_class_init,
1003 };
1004
1005 static Property virtio_serial_properties[] = {
1006 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1007 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED),
1008 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1009 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1010 DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy, serial.max_virtserial_ports, 31),
1011 DEFINE_PROP_END_OF_LIST(),
1012 };
1013
1014 static void virtio_serial_class_init(ObjectClass *klass, void *data)
1015 {
1016 DeviceClass *dc = DEVICE_CLASS(klass);
1017 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1018
1019 k->init = virtio_serial_init_pci;
1020 k->exit = virtio_serial_exit_pci;
1021 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1022 k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
1023 k->revision = VIRTIO_PCI_ABI_VERSION;
1024 k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
1025 dc->reset = virtio_pci_reset;
1026 dc->props = virtio_serial_properties;
1027 }
1028
1029 static TypeInfo virtio_serial_info = {
1030 .name = "virtio-serial-pci",
1031 .parent = TYPE_PCI_DEVICE,
1032 .instance_size = sizeof(VirtIOPCIProxy),
1033 .class_init = virtio_serial_class_init,
1034 };
1035
1036 static Property virtio_balloon_properties[] = {
1037 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1038 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1039 DEFINE_PROP_END_OF_LIST(),
1040 };
1041
1042 static void virtio_balloon_class_init(ObjectClass *klass, void *data)
1043 {
1044 DeviceClass *dc = DEVICE_CLASS(klass);
1045 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1046
1047 k->init = virtio_balloon_init_pci;
1048 k->exit = virtio_balloon_exit_pci;
1049 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1050 k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
1051 k->revision = VIRTIO_PCI_ABI_VERSION;
1052 k->class_id = PCI_CLASS_OTHERS;
1053 dc->reset = virtio_pci_reset;
1054 dc->props = virtio_balloon_properties;
1055 }
1056
1057 static TypeInfo virtio_balloon_info = {
1058 .name = "virtio-balloon-pci",
1059 .parent = TYPE_PCI_DEVICE,
1060 .instance_size = sizeof(VirtIOPCIProxy),
1061 .class_init = virtio_balloon_class_init,
1062 };
1063
1064 static int virtio_scsi_init_pci(PCIDevice *pci_dev)
1065 {
1066 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1067 VirtIODevice *vdev;
1068
1069 vdev = virtio_scsi_init(&pci_dev->qdev, &proxy->scsi);
1070 if (!vdev) {
1071 return -EINVAL;
1072 }
1073
1074 vdev->nvectors = proxy->nvectors;
1075 virtio_init_pci(proxy, vdev);
1076
1077 /* make the actual value visible */
1078 proxy->nvectors = vdev->nvectors;
1079 return 0;
1080 }
1081
1082 static int virtio_scsi_exit_pci(PCIDevice *pci_dev)
1083 {
1084 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1085
1086 virtio_scsi_exit(proxy->vdev);
1087 return virtio_exit_pci(pci_dev);
1088 }
1089
1090 static Property virtio_scsi_properties[] = {
1091 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1092 DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOPCIProxy, host_features, scsi),
1093 DEFINE_PROP_END_OF_LIST(),
1094 };
1095
1096 static void virtio_scsi_class_init(ObjectClass *klass, void *data)
1097 {
1098 DeviceClass *dc = DEVICE_CLASS(klass);
1099 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1100
1101 k->init = virtio_scsi_init_pci;
1102 k->exit = virtio_scsi_exit_pci;
1103 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1104 k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
1105 k->revision = 0x00;
1106 k->class_id = PCI_CLASS_STORAGE_SCSI;
1107 dc->reset = virtio_pci_reset;
1108 dc->props = virtio_scsi_properties;
1109 }
1110
1111 static TypeInfo virtio_scsi_info = {
1112 .name = "virtio-scsi-pci",
1113 .parent = TYPE_PCI_DEVICE,
1114 .instance_size = sizeof(VirtIOPCIProxy),
1115 .class_init = virtio_scsi_class_init,
1116 };
1117
1118 static void virtio_pci_register_types(void)
1119 {
1120 type_register_static(&virtio_blk_info);
1121 type_register_static(&virtio_net_info);
1122 type_register_static(&virtio_serial_info);
1123 type_register_static(&virtio_balloon_info);
1124 type_register_static(&virtio_scsi_info);
1125 }
1126
1127 type_init(virtio_pci_register_types)