]> git.proxmox.com Git - qemu.git/blob - hw/virtio/virtio-pci.c
virtio: remove the function pointer.
[qemu.git] / hw / virtio / virtio-pci.c
1 /*
2 * Virtio PCI Bindings
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include <inttypes.h>
19
20 #include "hw/virtio/virtio.h"
21 #include "hw/virtio/virtio-blk.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "hw/virtio/virtio-serial.h"
24 #include "hw/virtio/virtio-scsi.h"
25 #include "hw/virtio/virtio-balloon.h"
26 #include "hw/pci/pci.h"
27 #include "qemu/error-report.h"
28 #include "hw/pci/msi.h"
29 #include "hw/pci/msix.h"
30 #include "hw/loader.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/blockdev.h"
33 #include "virtio-pci.h"
34 #include "qemu/range.h"
35 #include "hw/virtio/virtio-bus.h"
36 #include "qapi/visitor.h"
37
38 /* from Linux's linux/virtio_pci.h */
39
40 /* A 32-bit r/o bitmask of the features supported by the host */
41 #define VIRTIO_PCI_HOST_FEATURES 0
42
43 /* A 32-bit r/w bitmask of features activated by the guest */
44 #define VIRTIO_PCI_GUEST_FEATURES 4
45
46 /* A 32-bit r/w PFN for the currently selected queue */
47 #define VIRTIO_PCI_QUEUE_PFN 8
48
49 /* A 16-bit r/o queue size for the currently selected queue */
50 #define VIRTIO_PCI_QUEUE_NUM 12
51
52 /* A 16-bit r/w queue selector */
53 #define VIRTIO_PCI_QUEUE_SEL 14
54
55 /* A 16-bit r/w queue notifier */
56 #define VIRTIO_PCI_QUEUE_NOTIFY 16
57
58 /* An 8-bit device status register. */
59 #define VIRTIO_PCI_STATUS 18
60
61 /* An 8-bit r/o interrupt status register. Reading the value will return the
62 * current contents of the ISR and will also clear it. This is effectively
63 * a read-and-acknowledge. */
64 #define VIRTIO_PCI_ISR 19
65
66 /* MSI-X registers: only enabled if MSI-X is enabled. */
67 /* A 16-bit vector for configuration changes. */
68 #define VIRTIO_MSI_CONFIG_VECTOR 20
69 /* A 16-bit vector for selected queue notifications. */
70 #define VIRTIO_MSI_QUEUE_VECTOR 22
71
72 /* Config space size */
73 #define VIRTIO_PCI_CONFIG_NOMSI 20
74 #define VIRTIO_PCI_CONFIG_MSI 24
75 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
76 VIRTIO_PCI_CONFIG_MSI : \
77 VIRTIO_PCI_CONFIG_NOMSI)
78
79 /* The remaining space is defined by each driver as the per-driver
80 * configuration space */
81 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
82 VIRTIO_PCI_CONFIG_MSI : \
83 VIRTIO_PCI_CONFIG_NOMSI)
84
85 /* How many bits to shift physical queue address written to QUEUE_PFN.
86 * 12 is historical, and due to x86 page size. */
87 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
88
89 /* Flags track per-device state like workarounds for quirks in older guests. */
90 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG (1 << 0)
91
92 /* QEMU doesn't strictly need write barriers since everything runs in
93 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
94 * KVM or if kqemu gets SMP support.
95 */
96 #define wmb() do { } while (0)
97
98 /* HACK for virtio to determine if it's running a big endian guest */
99 bool virtio_is_big_endian(void);
100
101 static void virtio_pci_bus_new(VirtioBusState *bus, VirtIOPCIProxy *dev);
102
103 /* virtio device */
104 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
105 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
106 {
107 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
108 }
109
110 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
111 * be careful and test performance if you change this.
112 */
113 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
114 {
115 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
116 }
117
118 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
119 {
120 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
121 if (msix_enabled(&proxy->pci_dev))
122 msix_notify(&proxy->pci_dev, vector);
123 else
124 qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
125 }
126
127 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
128 {
129 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
130 pci_device_save(&proxy->pci_dev, f);
131 msix_save(&proxy->pci_dev, f);
132 if (msix_present(&proxy->pci_dev))
133 qemu_put_be16(f, proxy->vdev->config_vector);
134 }
135
136 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
137 {
138 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
139 if (msix_present(&proxy->pci_dev))
140 qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
141 }
142
143 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
144 {
145 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
146 int ret;
147 ret = pci_device_load(&proxy->pci_dev, f);
148 if (ret) {
149 return ret;
150 }
151 msix_unuse_all_vectors(&proxy->pci_dev);
152 msix_load(&proxy->pci_dev, f);
153 if (msix_present(&proxy->pci_dev)) {
154 qemu_get_be16s(f, &proxy->vdev->config_vector);
155 } else {
156 proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
157 }
158 if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
159 return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
160 }
161 return 0;
162 }
163
164 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
165 {
166 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
167 uint16_t vector;
168 if (msix_present(&proxy->pci_dev)) {
169 qemu_get_be16s(f, &vector);
170 } else {
171 vector = VIRTIO_NO_VECTOR;
172 }
173 virtio_queue_set_vector(proxy->vdev, n, vector);
174 if (vector != VIRTIO_NO_VECTOR) {
175 return msix_vector_use(&proxy->pci_dev, vector);
176 }
177 return 0;
178 }
179
180 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
181 int n, bool assign, bool set_handler)
182 {
183 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
184 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
185 int r = 0;
186
187 if (assign) {
188 r = event_notifier_init(notifier, 1);
189 if (r < 0) {
190 error_report("%s: unable to init event notifier: %d",
191 __func__, r);
192 return r;
193 }
194 virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
195 memory_region_add_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
196 true, n, notifier);
197 } else {
198 memory_region_del_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
199 true, n, notifier);
200 virtio_queue_set_host_notifier_fd_handler(vq, false, false);
201 event_notifier_cleanup(notifier);
202 }
203 return r;
204 }
205
206 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
207 {
208 int n, r;
209
210 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
211 proxy->ioeventfd_disabled ||
212 proxy->ioeventfd_started) {
213 return;
214 }
215
216 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
217 if (!virtio_queue_get_num(proxy->vdev, n)) {
218 continue;
219 }
220
221 r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
222 if (r < 0) {
223 goto assign_error;
224 }
225 }
226 proxy->ioeventfd_started = true;
227 return;
228
229 assign_error:
230 while (--n >= 0) {
231 if (!virtio_queue_get_num(proxy->vdev, n)) {
232 continue;
233 }
234
235 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
236 assert(r >= 0);
237 }
238 proxy->ioeventfd_started = false;
239 error_report("%s: failed. Fallback to a userspace (slower).", __func__);
240 }
241
242 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
243 {
244 int r;
245 int n;
246
247 if (!proxy->ioeventfd_started) {
248 return;
249 }
250
251 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
252 if (!virtio_queue_get_num(proxy->vdev, n)) {
253 continue;
254 }
255
256 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
257 assert(r >= 0);
258 }
259 proxy->ioeventfd_started = false;
260 }
261
262 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
263 {
264 VirtIOPCIProxy *proxy = opaque;
265 VirtIODevice *vdev = proxy->vdev;
266 hwaddr pa;
267
268 switch (addr) {
269 case VIRTIO_PCI_GUEST_FEATURES:
270 /* Guest does not negotiate properly? We have to assume nothing. */
271 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
272 val = virtio_bus_get_vdev_bad_features(&proxy->bus);
273 }
274 virtio_set_features(vdev, val);
275 break;
276 case VIRTIO_PCI_QUEUE_PFN:
277 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
278 if (pa == 0) {
279 virtio_pci_stop_ioeventfd(proxy);
280 virtio_reset(proxy->vdev);
281 msix_unuse_all_vectors(&proxy->pci_dev);
282 }
283 else
284 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
285 break;
286 case VIRTIO_PCI_QUEUE_SEL:
287 if (val < VIRTIO_PCI_QUEUE_MAX)
288 vdev->queue_sel = val;
289 break;
290 case VIRTIO_PCI_QUEUE_NOTIFY:
291 if (val < VIRTIO_PCI_QUEUE_MAX) {
292 virtio_queue_notify(vdev, val);
293 }
294 break;
295 case VIRTIO_PCI_STATUS:
296 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
297 virtio_pci_stop_ioeventfd(proxy);
298 }
299
300 virtio_set_status(vdev, val & 0xFF);
301
302 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
303 virtio_pci_start_ioeventfd(proxy);
304 }
305
306 if (vdev->status == 0) {
307 virtio_reset(proxy->vdev);
308 msix_unuse_all_vectors(&proxy->pci_dev);
309 }
310
311 /* Linux before 2.6.34 sets the device as OK without enabling
312 the PCI device bus master bit. In this case we need to disable
313 some safety checks. */
314 if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
315 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
316 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
317 }
318 break;
319 case VIRTIO_MSI_CONFIG_VECTOR:
320 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
321 /* Make it possible for guest to discover an error took place. */
322 if (msix_vector_use(&proxy->pci_dev, val) < 0)
323 val = VIRTIO_NO_VECTOR;
324 vdev->config_vector = val;
325 break;
326 case VIRTIO_MSI_QUEUE_VECTOR:
327 msix_vector_unuse(&proxy->pci_dev,
328 virtio_queue_vector(vdev, vdev->queue_sel));
329 /* Make it possible for guest to discover an error took place. */
330 if (msix_vector_use(&proxy->pci_dev, val) < 0)
331 val = VIRTIO_NO_VECTOR;
332 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
333 break;
334 default:
335 error_report("%s: unexpected address 0x%x value 0x%x",
336 __func__, addr, val);
337 break;
338 }
339 }
340
341 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
342 {
343 VirtIODevice *vdev = proxy->vdev;
344 uint32_t ret = 0xFFFFFFFF;
345
346 switch (addr) {
347 case VIRTIO_PCI_HOST_FEATURES:
348 ret = proxy->host_features;
349 break;
350 case VIRTIO_PCI_GUEST_FEATURES:
351 ret = vdev->guest_features;
352 break;
353 case VIRTIO_PCI_QUEUE_PFN:
354 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
355 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
356 break;
357 case VIRTIO_PCI_QUEUE_NUM:
358 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
359 break;
360 case VIRTIO_PCI_QUEUE_SEL:
361 ret = vdev->queue_sel;
362 break;
363 case VIRTIO_PCI_STATUS:
364 ret = vdev->status;
365 break;
366 case VIRTIO_PCI_ISR:
367 /* reading from the ISR also clears it. */
368 ret = vdev->isr;
369 vdev->isr = 0;
370 qemu_set_irq(proxy->pci_dev.irq[0], 0);
371 break;
372 case VIRTIO_MSI_CONFIG_VECTOR:
373 ret = vdev->config_vector;
374 break;
375 case VIRTIO_MSI_QUEUE_VECTOR:
376 ret = virtio_queue_vector(vdev, vdev->queue_sel);
377 break;
378 default:
379 break;
380 }
381
382 return ret;
383 }
384
385 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
386 unsigned size)
387 {
388 VirtIOPCIProxy *proxy = opaque;
389 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
390 uint64_t val = 0;
391 if (addr < config) {
392 return virtio_ioport_read(proxy, addr);
393 }
394 addr -= config;
395
396 switch (size) {
397 case 1:
398 val = virtio_config_readb(proxy->vdev, addr);
399 break;
400 case 2:
401 val = virtio_config_readw(proxy->vdev, addr);
402 if (virtio_is_big_endian()) {
403 val = bswap16(val);
404 }
405 break;
406 case 4:
407 val = virtio_config_readl(proxy->vdev, addr);
408 if (virtio_is_big_endian()) {
409 val = bswap32(val);
410 }
411 break;
412 }
413 return val;
414 }
415
416 static void virtio_pci_config_write(void *opaque, hwaddr addr,
417 uint64_t val, unsigned size)
418 {
419 VirtIOPCIProxy *proxy = opaque;
420 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
421 if (addr < config) {
422 virtio_ioport_write(proxy, addr, val);
423 return;
424 }
425 addr -= config;
426 /*
427 * Virtio-PCI is odd. Ioports are LE but config space is target native
428 * endian.
429 */
430 switch (size) {
431 case 1:
432 virtio_config_writeb(proxy->vdev, addr, val);
433 break;
434 case 2:
435 if (virtio_is_big_endian()) {
436 val = bswap16(val);
437 }
438 virtio_config_writew(proxy->vdev, addr, val);
439 break;
440 case 4:
441 if (virtio_is_big_endian()) {
442 val = bswap32(val);
443 }
444 virtio_config_writel(proxy->vdev, addr, val);
445 break;
446 }
447 }
448
449 static const MemoryRegionOps virtio_pci_config_ops = {
450 .read = virtio_pci_config_read,
451 .write = virtio_pci_config_write,
452 .impl = {
453 .min_access_size = 1,
454 .max_access_size = 4,
455 },
456 .endianness = DEVICE_LITTLE_ENDIAN,
457 };
458
459 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
460 uint32_t val, int len)
461 {
462 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
463
464 pci_default_write_config(pci_dev, address, val, len);
465
466 if (range_covers_byte(address, len, PCI_COMMAND) &&
467 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
468 !(proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG)) {
469 virtio_pci_stop_ioeventfd(proxy);
470 virtio_set_status(proxy->vdev,
471 proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
472 }
473 }
474
475 static unsigned virtio_pci_get_features(DeviceState *d)
476 {
477 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
478 return proxy->host_features;
479 }
480
481 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
482 unsigned int queue_no,
483 unsigned int vector,
484 MSIMessage msg)
485 {
486 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
487 int ret;
488
489 if (irqfd->users == 0) {
490 ret = kvm_irqchip_add_msi_route(kvm_state, msg);
491 if (ret < 0) {
492 return ret;
493 }
494 irqfd->virq = ret;
495 }
496 irqfd->users++;
497 return 0;
498 }
499
500 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
501 unsigned int vector)
502 {
503 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
504 if (--irqfd->users == 0) {
505 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
506 }
507 }
508
509 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
510 unsigned int queue_no,
511 unsigned int vector)
512 {
513 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
514 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
515 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
516 int ret;
517 ret = kvm_irqchip_add_irqfd_notifier(kvm_state, n, irqfd->virq);
518 return ret;
519 }
520
521 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
522 unsigned int queue_no,
523 unsigned int vector)
524 {
525 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
526 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
527 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
528 int ret;
529
530 ret = kvm_irqchip_remove_irqfd_notifier(kvm_state, n, irqfd->virq);
531 assert(ret == 0);
532 }
533
534 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
535 {
536 PCIDevice *dev = &proxy->pci_dev;
537 VirtIODevice *vdev = proxy->vdev;
538 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
539 unsigned int vector;
540 int ret, queue_no;
541 MSIMessage msg;
542
543 for (queue_no = 0; queue_no < nvqs; queue_no++) {
544 if (!virtio_queue_get_num(vdev, queue_no)) {
545 break;
546 }
547 vector = virtio_queue_vector(vdev, queue_no);
548 if (vector >= msix_nr_vectors_allocated(dev)) {
549 continue;
550 }
551 msg = msix_get_message(dev, vector);
552 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
553 if (ret < 0) {
554 goto undo;
555 }
556 /* If guest supports masking, set up irqfd now.
557 * Otherwise, delay until unmasked in the frontend.
558 */
559 if (k->guest_notifier_mask) {
560 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
561 if (ret < 0) {
562 kvm_virtio_pci_vq_vector_release(proxy, vector);
563 goto undo;
564 }
565 }
566 }
567 return 0;
568
569 undo:
570 while (--queue_no >= 0) {
571 vector = virtio_queue_vector(vdev, queue_no);
572 if (vector >= msix_nr_vectors_allocated(dev)) {
573 continue;
574 }
575 if (k->guest_notifier_mask) {
576 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
577 }
578 kvm_virtio_pci_vq_vector_release(proxy, vector);
579 }
580 return ret;
581 }
582
583 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
584 {
585 PCIDevice *dev = &proxy->pci_dev;
586 VirtIODevice *vdev = proxy->vdev;
587 unsigned int vector;
588 int queue_no;
589 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
590
591 for (queue_no = 0; queue_no < nvqs; queue_no++) {
592 if (!virtio_queue_get_num(vdev, queue_no)) {
593 break;
594 }
595 vector = virtio_queue_vector(vdev, queue_no);
596 if (vector >= msix_nr_vectors_allocated(dev)) {
597 continue;
598 }
599 /* If guest supports masking, clean up irqfd now.
600 * Otherwise, it was cleaned when masked in the frontend.
601 */
602 if (k->guest_notifier_mask) {
603 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
604 }
605 kvm_virtio_pci_vq_vector_release(proxy, vector);
606 }
607 }
608
609 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
610 unsigned int queue_no,
611 unsigned int vector,
612 MSIMessage msg)
613 {
614 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(proxy->vdev);
615 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
616 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
617 VirtIOIRQFD *irqfd;
618 int ret = 0;
619
620 if (proxy->vector_irqfd) {
621 irqfd = &proxy->vector_irqfd[vector];
622 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
623 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg);
624 if (ret < 0) {
625 return ret;
626 }
627 }
628 }
629
630 /* If guest supports masking, irqfd is already setup, unmask it.
631 * Otherwise, set it up now.
632 */
633 if (k->guest_notifier_mask) {
634 k->guest_notifier_mask(proxy->vdev, queue_no, false);
635 /* Test after unmasking to avoid losing events. */
636 if (k->guest_notifier_pending &&
637 k->guest_notifier_pending(proxy->vdev, queue_no)) {
638 event_notifier_set(n);
639 }
640 } else {
641 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
642 }
643 return ret;
644 }
645
646 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
647 unsigned int queue_no,
648 unsigned int vector)
649 {
650 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(proxy->vdev);
651
652 /* If guest supports masking, keep irqfd but mask it.
653 * Otherwise, clean it up now.
654 */
655 if (k->guest_notifier_mask) {
656 k->guest_notifier_mask(proxy->vdev, queue_no, true);
657 } else {
658 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
659 }
660 }
661
662 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
663 MSIMessage msg)
664 {
665 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
666 VirtIODevice *vdev = proxy->vdev;
667 int ret, queue_no;
668
669 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
670 if (!virtio_queue_get_num(vdev, queue_no)) {
671 break;
672 }
673 if (virtio_queue_vector(vdev, queue_no) != vector) {
674 continue;
675 }
676 ret = virtio_pci_vq_vector_unmask(proxy, queue_no, vector, msg);
677 if (ret < 0) {
678 goto undo;
679 }
680 }
681 return 0;
682
683 undo:
684 while (--queue_no >= 0) {
685 if (virtio_queue_vector(vdev, queue_no) != vector) {
686 continue;
687 }
688 virtio_pci_vq_vector_mask(proxy, queue_no, vector);
689 }
690 return ret;
691 }
692
693 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
694 {
695 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
696 VirtIODevice *vdev = proxy->vdev;
697 int queue_no;
698
699 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
700 if (!virtio_queue_get_num(vdev, queue_no)) {
701 break;
702 }
703 if (virtio_queue_vector(vdev, queue_no) != vector) {
704 continue;
705 }
706 virtio_pci_vq_vector_mask(proxy, queue_no, vector);
707 }
708 }
709
710 static void virtio_pci_vector_poll(PCIDevice *dev,
711 unsigned int vector_start,
712 unsigned int vector_end)
713 {
714 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
715 VirtIODevice *vdev = proxy->vdev;
716 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
717 int queue_no;
718 unsigned int vector;
719 EventNotifier *notifier;
720 VirtQueue *vq;
721
722 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
723 if (!virtio_queue_get_num(vdev, queue_no)) {
724 break;
725 }
726 vector = virtio_queue_vector(vdev, queue_no);
727 if (vector < vector_start || vector >= vector_end ||
728 !msix_is_masked(dev, vector)) {
729 continue;
730 }
731 vq = virtio_get_queue(vdev, queue_no);
732 notifier = virtio_queue_get_guest_notifier(vq);
733 if (k->guest_notifier_pending) {
734 if (k->guest_notifier_pending(vdev, queue_no)) {
735 msix_set_pending(dev, vector);
736 }
737 } else if (event_notifier_test_and_clear(notifier)) {
738 msix_set_pending(dev, vector);
739 }
740 }
741 }
742
743 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
744 bool with_irqfd)
745 {
746 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
747 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
748 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
749
750 if (assign) {
751 int r = event_notifier_init(notifier, 0);
752 if (r < 0) {
753 return r;
754 }
755 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
756 } else {
757 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
758 event_notifier_cleanup(notifier);
759 }
760
761 return 0;
762 }
763
764 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
765 {
766 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
767 return msix_enabled(&proxy->pci_dev);
768 }
769
770 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
771 {
772 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
773 VirtIODevice *vdev = proxy->vdev;
774 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
775 int r, n;
776 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
777 kvm_msi_via_irqfd_enabled();
778
779 nvqs = MIN(nvqs, VIRTIO_PCI_QUEUE_MAX);
780
781 /* When deassigning, pass a consistent nvqs value
782 * to avoid leaking notifiers.
783 */
784 assert(assign || nvqs == proxy->nvqs_with_notifiers);
785
786 proxy->nvqs_with_notifiers = nvqs;
787
788 /* Must unset vector notifier while guest notifier is still assigned */
789 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
790 msix_unset_vector_notifiers(&proxy->pci_dev);
791 if (proxy->vector_irqfd) {
792 kvm_virtio_pci_vector_release(proxy, nvqs);
793 g_free(proxy->vector_irqfd);
794 proxy->vector_irqfd = NULL;
795 }
796 }
797
798 for (n = 0; n < nvqs; n++) {
799 if (!virtio_queue_get_num(vdev, n)) {
800 break;
801 }
802
803 r = virtio_pci_set_guest_notifier(d, n, assign,
804 kvm_msi_via_irqfd_enabled());
805 if (r < 0) {
806 goto assign_error;
807 }
808 }
809
810 /* Must set vector notifier after guest notifier has been assigned */
811 if ((with_irqfd || k->guest_notifier_mask) && assign) {
812 if (with_irqfd) {
813 proxy->vector_irqfd =
814 g_malloc0(sizeof(*proxy->vector_irqfd) *
815 msix_nr_vectors_allocated(&proxy->pci_dev));
816 r = kvm_virtio_pci_vector_use(proxy, nvqs);
817 if (r < 0) {
818 goto assign_error;
819 }
820 }
821 r = msix_set_vector_notifiers(&proxy->pci_dev,
822 virtio_pci_vector_unmask,
823 virtio_pci_vector_mask,
824 virtio_pci_vector_poll);
825 if (r < 0) {
826 goto notifiers_error;
827 }
828 }
829
830 return 0;
831
832 notifiers_error:
833 if (with_irqfd) {
834 assert(assign);
835 kvm_virtio_pci_vector_release(proxy, nvqs);
836 }
837
838 assign_error:
839 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
840 assert(assign);
841 while (--n >= 0) {
842 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
843 }
844 return r;
845 }
846
847 static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
848 {
849 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
850
851 /* Stop using ioeventfd for virtqueue kick if the device starts using host
852 * notifiers. This makes it easy to avoid stepping on each others' toes.
853 */
854 proxy->ioeventfd_disabled = assign;
855 if (assign) {
856 virtio_pci_stop_ioeventfd(proxy);
857 }
858 /* We don't need to start here: it's not needed because backend
859 * currently only stops on status change away from ok,
860 * reset, vmstop and such. If we do add code to start here,
861 * need to check vmstate, device state etc. */
862 return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
863 }
864
865 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
866 {
867 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
868
869 if (running) {
870 /* Try to find out if the guest has bus master disabled, but is
871 in ready state. Then we have a buggy guest OS. */
872 if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
873 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
874 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
875 }
876 virtio_pci_start_ioeventfd(proxy);
877 } else {
878 virtio_pci_stop_ioeventfd(proxy);
879 }
880 }
881
882 static const VirtIOBindings virtio_pci_bindings = {
883 .notify = virtio_pci_notify,
884 .save_config = virtio_pci_save_config,
885 .load_config = virtio_pci_load_config,
886 .save_queue = virtio_pci_save_queue,
887 .load_queue = virtio_pci_load_queue,
888 .get_features = virtio_pci_get_features,
889 .query_guest_notifiers = virtio_pci_query_guest_notifiers,
890 .set_host_notifier = virtio_pci_set_host_notifier,
891 .set_guest_notifiers = virtio_pci_set_guest_notifiers,
892 .vmstate_change = virtio_pci_vmstate_change,
893 };
894
895 #ifdef CONFIG_VIRTFS
896 static int virtio_9p_init_pci(VirtIOPCIProxy *vpci_dev)
897 {
898 V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev);
899 DeviceState *vdev = DEVICE(&dev->vdev);
900
901 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
902 if (qdev_init(vdev) < 0) {
903 return -1;
904 }
905 return 0;
906 }
907
908 static Property virtio_9p_pci_properties[] = {
909 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
910 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
911 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
912 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
913 DEFINE_VIRTIO_9P_PROPERTIES(V9fsPCIState, vdev.fsconf),
914 DEFINE_PROP_END_OF_LIST(),
915 };
916
917 static void virtio_9p_pci_class_init(ObjectClass *klass, void *data)
918 {
919 DeviceClass *dc = DEVICE_CLASS(klass);
920 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
921 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
922
923 k->init = virtio_9p_init_pci;
924 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
925 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
926 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
927 pcidev_k->class_id = 0x2;
928 dc->props = virtio_9p_pci_properties;
929 }
930
931 static void virtio_9p_pci_instance_init(Object *obj)
932 {
933 V9fsPCIState *dev = VIRTIO_9P_PCI(obj);
934 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_9P);
935 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
936 }
937
938 static const TypeInfo virtio_9p_pci_info = {
939 .name = TYPE_VIRTIO_9P_PCI,
940 .parent = TYPE_VIRTIO_PCI,
941 .instance_size = sizeof(V9fsPCIState),
942 .instance_init = virtio_9p_pci_instance_init,
943 .class_init = virtio_9p_pci_class_init,
944 };
945 #endif /* CONFIG_VIRTFS */
946
947 /*
948 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
949 */
950
951 /* This is called by virtio-bus just after the device is plugged. */
952 static void virtio_pci_device_plugged(DeviceState *d)
953 {
954 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
955 VirtioBusState *bus = &proxy->bus;
956 uint8_t *config;
957 uint32_t size;
958
959 proxy->vdev = bus->vdev;
960
961 config = proxy->pci_dev.config;
962 if (proxy->class_code) {
963 pci_config_set_class(config, proxy->class_code);
964 }
965 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
966 pci_get_word(config + PCI_VENDOR_ID));
967 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
968 config[PCI_INTERRUPT_PIN] = 1;
969
970 if (proxy->nvectors &&
971 msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
972 proxy->nvectors = 0;
973 }
974
975 proxy->pci_dev.config_write = virtio_write_config;
976
977 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
978 + virtio_bus_get_vdev_config_len(bus);
979 if (size & (size - 1)) {
980 size = 1 << qemu_fls(size);
981 }
982
983 memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
984 "virtio-pci", size);
985 pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
986 &proxy->bar);
987
988 if (!kvm_has_many_ioeventfds()) {
989 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
990 }
991
992 proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
993 proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
994 proxy->host_features = virtio_bus_get_vdev_features(bus,
995 proxy->host_features);
996 }
997
998 static int virtio_pci_init(PCIDevice *pci_dev)
999 {
1000 VirtIOPCIProxy *dev = VIRTIO_PCI(pci_dev);
1001 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1002 virtio_pci_bus_new(&dev->bus, dev);
1003 if (k->init != NULL) {
1004 return k->init(dev);
1005 }
1006 return 0;
1007 }
1008
1009 static void virtio_pci_exit(PCIDevice *pci_dev)
1010 {
1011 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1012 virtio_pci_stop_ioeventfd(proxy);
1013 memory_region_destroy(&proxy->bar);
1014 msix_uninit_exclusive_bar(pci_dev);
1015 }
1016
1017 static void virtio_pci_reset(DeviceState *qdev)
1018 {
1019 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1020 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1021 virtio_pci_stop_ioeventfd(proxy);
1022 virtio_bus_reset(bus);
1023 msix_unuse_all_vectors(&proxy->pci_dev);
1024 proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
1025 }
1026
1027 static void virtio_pci_class_init(ObjectClass *klass, void *data)
1028 {
1029 DeviceClass *dc = DEVICE_CLASS(klass);
1030 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1031
1032 k->init = virtio_pci_init;
1033 k->exit = virtio_pci_exit;
1034 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1035 k->revision = VIRTIO_PCI_ABI_VERSION;
1036 k->class_id = PCI_CLASS_OTHERS;
1037 dc->reset = virtio_pci_reset;
1038 }
1039
1040 static const TypeInfo virtio_pci_info = {
1041 .name = TYPE_VIRTIO_PCI,
1042 .parent = TYPE_PCI_DEVICE,
1043 .instance_size = sizeof(VirtIOPCIProxy),
1044 .class_init = virtio_pci_class_init,
1045 .class_size = sizeof(VirtioPCIClass),
1046 .abstract = true,
1047 };
1048
1049 /* virtio-blk-pci */
1050
1051 static Property virtio_blk_pci_properties[] = {
1052 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1053 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1054 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1055 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1056 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
1057 DEFINE_PROP_BIT("x-data-plane", VirtIOBlkPCI, blk.data_plane, 0, false),
1058 #endif
1059 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
1060 DEFINE_VIRTIO_BLK_PROPERTIES(VirtIOBlkPCI, blk),
1061 DEFINE_PROP_END_OF_LIST(),
1062 };
1063
1064 static int virtio_blk_pci_init(VirtIOPCIProxy *vpci_dev)
1065 {
1066 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
1067 DeviceState *vdev = DEVICE(&dev->vdev);
1068 virtio_blk_set_conf(vdev, &(dev->blk));
1069 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1070 if (qdev_init(vdev) < 0) {
1071 return -1;
1072 }
1073 return 0;
1074 }
1075
1076 static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
1077 {
1078 DeviceClass *dc = DEVICE_CLASS(klass);
1079 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1080 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1081
1082 dc->props = virtio_blk_pci_properties;
1083 k->init = virtio_blk_pci_init;
1084 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1085 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
1086 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1087 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1088 }
1089
1090 static void virtio_blk_pci_instance_init(Object *obj)
1091 {
1092 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
1093 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_BLK);
1094 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1095 }
1096
1097 static const TypeInfo virtio_blk_pci_info = {
1098 .name = TYPE_VIRTIO_BLK_PCI,
1099 .parent = TYPE_VIRTIO_PCI,
1100 .instance_size = sizeof(VirtIOBlkPCI),
1101 .instance_init = virtio_blk_pci_instance_init,
1102 .class_init = virtio_blk_pci_class_init,
1103 };
1104
1105 /* virtio-scsi-pci */
1106
1107 static Property virtio_scsi_pci_properties[] = {
1108 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1109 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1110 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
1111 DEV_NVECTORS_UNSPECIFIED),
1112 DEFINE_VIRTIO_SCSI_FEATURES(VirtIOPCIProxy, host_features),
1113 DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOSCSIPCI, vdev.parent_obj.conf),
1114 DEFINE_PROP_END_OF_LIST(),
1115 };
1116
1117 static int virtio_scsi_pci_init_pci(VirtIOPCIProxy *vpci_dev)
1118 {
1119 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
1120 DeviceState *vdev = DEVICE(&dev->vdev);
1121 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
1122
1123 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1124 vpci_dev->nvectors = vs->conf.num_queues + 3;
1125 }
1126
1127 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1128 if (qdev_init(vdev) < 0) {
1129 return -1;
1130 }
1131 return 0;
1132 }
1133
1134 static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
1135 {
1136 DeviceClass *dc = DEVICE_CLASS(klass);
1137 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1138 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1139 k->init = virtio_scsi_pci_init_pci;
1140 dc->props = virtio_scsi_pci_properties;
1141 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1142 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
1143 pcidev_k->revision = 0x00;
1144 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1145 }
1146
1147 static void virtio_scsi_pci_instance_init(Object *obj)
1148 {
1149 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
1150 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_SCSI);
1151 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1152 }
1153
1154 static const TypeInfo virtio_scsi_pci_info = {
1155 .name = TYPE_VIRTIO_SCSI_PCI,
1156 .parent = TYPE_VIRTIO_PCI,
1157 .instance_size = sizeof(VirtIOSCSIPCI),
1158 .instance_init = virtio_scsi_pci_instance_init,
1159 .class_init = virtio_scsi_pci_class_init,
1160 };
1161
1162 /* vhost-scsi-pci */
1163
1164 #ifdef CONFIG_VHOST_SCSI
1165 static Property vhost_scsi_pci_properties[] = {
1166 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
1167 DEV_NVECTORS_UNSPECIFIED),
1168 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1169 DEFINE_VHOST_SCSI_PROPERTIES(VHostSCSIPCI, vdev.parent_obj.conf),
1170 DEFINE_PROP_END_OF_LIST(),
1171 };
1172
1173 static int vhost_scsi_pci_init_pci(VirtIOPCIProxy *vpci_dev)
1174 {
1175 VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev);
1176 DeviceState *vdev = DEVICE(&dev->vdev);
1177 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
1178
1179 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1180 vpci_dev->nvectors = vs->conf.num_queues + 3;
1181 }
1182
1183 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1184 if (qdev_init(vdev) < 0) {
1185 return -1;
1186 }
1187 return 0;
1188 }
1189
1190 static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
1191 {
1192 DeviceClass *dc = DEVICE_CLASS(klass);
1193 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1194 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1195 k->init = vhost_scsi_pci_init_pci;
1196 dc->props = vhost_scsi_pci_properties;
1197 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1198 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
1199 pcidev_k->revision = 0x00;
1200 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1201 }
1202
1203 static void vhost_scsi_pci_instance_init(Object *obj)
1204 {
1205 VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj);
1206 object_initialize(OBJECT(&dev->vdev), TYPE_VHOST_SCSI);
1207 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1208 }
1209
1210 static const TypeInfo vhost_scsi_pci_info = {
1211 .name = TYPE_VHOST_SCSI_PCI,
1212 .parent = TYPE_VIRTIO_PCI,
1213 .instance_size = sizeof(VHostSCSIPCI),
1214 .instance_init = vhost_scsi_pci_instance_init,
1215 .class_init = vhost_scsi_pci_class_init,
1216 };
1217 #endif
1218
1219 /* virtio-balloon-pci */
1220
1221 static void balloon_pci_stats_get_all(Object *obj, struct Visitor *v,
1222 void *opaque, const char *name,
1223 Error **errp)
1224 {
1225 VirtIOBalloonPCI *dev = opaque;
1226 object_property_get(OBJECT(&dev->vdev), v, "guest-stats", errp);
1227 }
1228
1229 static void balloon_pci_stats_get_poll_interval(Object *obj, struct Visitor *v,
1230 void *opaque, const char *name,
1231 Error **errp)
1232 {
1233 VirtIOBalloonPCI *dev = opaque;
1234 object_property_get(OBJECT(&dev->vdev), v, "guest-stats-polling-interval",
1235 errp);
1236 }
1237
1238 static void balloon_pci_stats_set_poll_interval(Object *obj, struct Visitor *v,
1239 void *opaque, const char *name,
1240 Error **errp)
1241 {
1242 VirtIOBalloonPCI *dev = opaque;
1243 object_property_set(OBJECT(&dev->vdev), v, "guest-stats-polling-interval",
1244 errp);
1245 }
1246
1247 static Property virtio_balloon_pci_properties[] = {
1248 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1249 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1250 DEFINE_PROP_END_OF_LIST(),
1251 };
1252
1253 static int virtio_balloon_pci_init(VirtIOPCIProxy *vpci_dev)
1254 {
1255 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
1256 DeviceState *vdev = DEVICE(&dev->vdev);
1257
1258 if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
1259 vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
1260 vpci_dev->class_code = PCI_CLASS_OTHERS;
1261 }
1262
1263 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1264 if (qdev_init(vdev) < 0) {
1265 return -1;
1266 }
1267 return 0;
1268 }
1269
1270 static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
1271 {
1272 DeviceClass *dc = DEVICE_CLASS(klass);
1273 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1274 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1275 k->init = virtio_balloon_pci_init;
1276 dc->props = virtio_balloon_pci_properties;
1277 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1278 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
1279 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1280 pcidev_k->class_id = PCI_CLASS_OTHERS;
1281 }
1282
1283 static void virtio_balloon_pci_instance_init(Object *obj)
1284 {
1285 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
1286 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_BALLOON);
1287 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1288
1289 object_property_add(obj, "guest-stats", "guest statistics",
1290 balloon_pci_stats_get_all, NULL, NULL, dev,
1291 NULL);
1292
1293 object_property_add(obj, "guest-stats-polling-interval", "int",
1294 balloon_pci_stats_get_poll_interval,
1295 balloon_pci_stats_set_poll_interval,
1296 NULL, dev, NULL);
1297 }
1298
1299 static const TypeInfo virtio_balloon_pci_info = {
1300 .name = TYPE_VIRTIO_BALLOON_PCI,
1301 .parent = TYPE_VIRTIO_PCI,
1302 .instance_size = sizeof(VirtIOBalloonPCI),
1303 .instance_init = virtio_balloon_pci_instance_init,
1304 .class_init = virtio_balloon_pci_class_init,
1305 };
1306
1307 /* virtio-serial-pci */
1308
1309 static int virtio_serial_pci_init(VirtIOPCIProxy *vpci_dev)
1310 {
1311 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
1312 DeviceState *vdev = DEVICE(&dev->vdev);
1313
1314 if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
1315 vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
1316 vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */
1317 vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
1318 }
1319
1320 /* backwards-compatibility with machines that were created with
1321 DEV_NVECTORS_UNSPECIFIED */
1322 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1323 vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
1324 }
1325
1326 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1327 if (qdev_init(vdev) < 0) {
1328 return -1;
1329 }
1330 return 0;
1331 }
1332
1333 static Property virtio_serial_pci_properties[] = {
1334 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1335 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1336 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1337 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1338 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1339 DEFINE_VIRTIO_SERIAL_PROPERTIES(VirtIOSerialPCI, vdev.serial),
1340 DEFINE_PROP_END_OF_LIST(),
1341 };
1342
1343 static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
1344 {
1345 DeviceClass *dc = DEVICE_CLASS(klass);
1346 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1347 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1348 k->init = virtio_serial_pci_init;
1349 dc->props = virtio_serial_pci_properties;
1350 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1351 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
1352 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1353 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
1354 }
1355
1356 static void virtio_serial_pci_instance_init(Object *obj)
1357 {
1358 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
1359 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_SERIAL);
1360 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1361 }
1362
1363 static const TypeInfo virtio_serial_pci_info = {
1364 .name = TYPE_VIRTIO_SERIAL_PCI,
1365 .parent = TYPE_VIRTIO_PCI,
1366 .instance_size = sizeof(VirtIOSerialPCI),
1367 .instance_init = virtio_serial_pci_instance_init,
1368 .class_init = virtio_serial_pci_class_init,
1369 };
1370
1371 /* virtio-net-pci */
1372
1373 static Property virtio_net_properties[] = {
1374 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1375 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
1376 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
1377 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
1378 DEFINE_NIC_PROPERTIES(VirtIONetPCI, vdev.nic_conf),
1379 DEFINE_VIRTIO_NET_PROPERTIES(VirtIONetPCI, vdev.net_conf),
1380 DEFINE_PROP_END_OF_LIST(),
1381 };
1382
1383 static int virtio_net_pci_init(VirtIOPCIProxy *vpci_dev)
1384 {
1385 VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev);
1386 DeviceState *vdev = DEVICE(&dev->vdev);
1387
1388 virtio_net_set_config_size(&dev->vdev, vpci_dev->host_features);
1389 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1390 if (qdev_init(vdev) < 0) {
1391 return -1;
1392 }
1393 return 0;
1394 }
1395
1396 static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
1397 {
1398 DeviceClass *dc = DEVICE_CLASS(klass);
1399 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1400 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1401
1402 k->romfile = "efi-virtio.rom";
1403 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1404 k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
1405 k->revision = VIRTIO_PCI_ABI_VERSION;
1406 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1407 dc->props = virtio_net_properties;
1408 vpciklass->init = virtio_net_pci_init;
1409 }
1410
1411 static void virtio_net_pci_instance_init(Object *obj)
1412 {
1413 VirtIONetPCI *dev = VIRTIO_NET_PCI(obj);
1414 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_NET);
1415 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1416 }
1417
1418 static const TypeInfo virtio_net_pci_info = {
1419 .name = TYPE_VIRTIO_NET_PCI,
1420 .parent = TYPE_VIRTIO_PCI,
1421 .instance_size = sizeof(VirtIONetPCI),
1422 .instance_init = virtio_net_pci_instance_init,
1423 .class_init = virtio_net_pci_class_init,
1424 };
1425
1426 /* virtio-rng-pci */
1427
1428 static Property virtio_rng_pci_properties[] = {
1429 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1430 DEFINE_VIRTIO_RNG_PROPERTIES(VirtIORngPCI, vdev.conf),
1431 DEFINE_PROP_END_OF_LIST(),
1432 };
1433
1434 static int virtio_rng_pci_init(VirtIOPCIProxy *vpci_dev)
1435 {
1436 VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev);
1437 DeviceState *vdev = DEVICE(&vrng->vdev);
1438
1439 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1440 if (qdev_init(vdev) < 0) {
1441 return -1;
1442 }
1443
1444 object_property_set_link(OBJECT(vrng),
1445 OBJECT(vrng->vdev.conf.default_backend), "rng",
1446 NULL);
1447
1448 return 0;
1449 }
1450
1451 static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
1452 {
1453 DeviceClass *dc = DEVICE_CLASS(klass);
1454 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1455 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1456
1457 k->init = virtio_rng_pci_init;
1458 dc->props = virtio_rng_pci_properties;
1459
1460 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1461 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
1462 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1463 pcidev_k->class_id = PCI_CLASS_OTHERS;
1464 }
1465
1466 static void virtio_rng_initfn(Object *obj)
1467 {
1468 VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj);
1469 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_RNG);
1470 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1471 object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
1472 (Object **)&dev->vdev.conf.rng, NULL);
1473
1474 }
1475
1476 static const TypeInfo virtio_rng_pci_info = {
1477 .name = TYPE_VIRTIO_RNG_PCI,
1478 .parent = TYPE_VIRTIO_PCI,
1479 .instance_size = sizeof(VirtIORngPCI),
1480 .instance_init = virtio_rng_initfn,
1481 .class_init = virtio_rng_pci_class_init,
1482 };
1483
1484 /* virtio-pci-bus */
1485
1486 static void virtio_pci_bus_new(VirtioBusState *bus, VirtIOPCIProxy *dev)
1487 {
1488 DeviceState *qdev = DEVICE(dev);
1489 BusState *qbus;
1490 qbus_create_inplace((BusState *)bus, TYPE_VIRTIO_PCI_BUS, qdev, NULL);
1491 qbus = BUS(bus);
1492 qbus->allow_hotplug = 1;
1493 }
1494
1495 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
1496 {
1497 BusClass *bus_class = BUS_CLASS(klass);
1498 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
1499 bus_class->max_dev = 1;
1500 k->notify = virtio_pci_notify;
1501 k->save_config = virtio_pci_save_config;
1502 k->load_config = virtio_pci_load_config;
1503 k->save_queue = virtio_pci_save_queue;
1504 k->load_queue = virtio_pci_load_queue;
1505 k->get_features = virtio_pci_get_features;
1506 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
1507 k->set_host_notifier = virtio_pci_set_host_notifier;
1508 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
1509 k->vmstate_change = virtio_pci_vmstate_change;
1510 k->device_plugged = virtio_pci_device_plugged;
1511 }
1512
1513 static const TypeInfo virtio_pci_bus_info = {
1514 .name = TYPE_VIRTIO_PCI_BUS,
1515 .parent = TYPE_VIRTIO_BUS,
1516 .instance_size = sizeof(VirtioPCIBusState),
1517 .class_init = virtio_pci_bus_class_init,
1518 };
1519
1520 static void virtio_pci_register_types(void)
1521 {
1522 type_register_static(&virtio_rng_pci_info);
1523 type_register_static(&virtio_pci_bus_info);
1524 type_register_static(&virtio_pci_info);
1525 #ifdef CONFIG_VIRTFS
1526 type_register_static(&virtio_9p_pci_info);
1527 #endif
1528 type_register_static(&virtio_blk_pci_info);
1529 type_register_static(&virtio_scsi_pci_info);
1530 type_register_static(&virtio_balloon_pci_info);
1531 type_register_static(&virtio_serial_pci_info);
1532 type_register_static(&virtio_net_pci_info);
1533 #ifdef CONFIG_VHOST_SCSI
1534 type_register_static(&vhost_scsi_pci_info);
1535 #endif
1536 }
1537
1538 type_init(virtio_pci_register_types)