]> git.proxmox.com Git - mirror_qemu.git/blob - hw/virtio/virtio-pci.c
virtio: Helper for registering virtio device types
[mirror_qemu.git] / hw / virtio / virtio-pci.c
1 /*
2 * Virtio PCI Bindings
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include "qemu/osdep.h"
19
20 #include "standard-headers/linux/virtio_pci.h"
21 #include "hw/virtio/virtio.h"
22 #include "hw/virtio/virtio-blk.h"
23 #include "hw/virtio/virtio-net.h"
24 #include "hw/virtio/virtio-serial.h"
25 #include "hw/virtio/virtio-scsi.h"
26 #include "hw/virtio/virtio-balloon.h"
27 #include "hw/virtio/virtio-input.h"
28 #include "hw/pci/pci.h"
29 #include "qapi/error.h"
30 #include "qemu/error-report.h"
31 #include "hw/pci/msi.h"
32 #include "hw/pci/msix.h"
33 #include "hw/loader.h"
34 #include "sysemu/kvm.h"
35 #include "virtio-pci.h"
36 #include "qemu/range.h"
37 #include "hw/virtio/virtio-bus.h"
38 #include "qapi/visitor.h"
39
40 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
41
42 #undef VIRTIO_PCI_CONFIG
43
44 /* The remaining space is defined by each driver as the per-driver
45 * configuration space */
46 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
47
48 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
49 VirtIOPCIProxy *dev);
50 static void virtio_pci_reset(DeviceState *qdev);
51
52 /* virtio device */
53 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
54 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
55 {
56 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
57 }
58
59 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
60 * be careful and test performance if you change this.
61 */
62 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
63 {
64 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
65 }
66
67 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
68 {
69 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
70
71 if (msix_enabled(&proxy->pci_dev))
72 msix_notify(&proxy->pci_dev, vector);
73 else {
74 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
75 pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1);
76 }
77 }
78
79 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
80 {
81 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
82 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
83
84 pci_device_save(&proxy->pci_dev, f);
85 msix_save(&proxy->pci_dev, f);
86 if (msix_present(&proxy->pci_dev))
87 qemu_put_be16(f, vdev->config_vector);
88 }
89
90 static const VMStateDescription vmstate_virtio_pci_modern_queue_state = {
91 .name = "virtio_pci/modern_queue_state",
92 .version_id = 1,
93 .minimum_version_id = 1,
94 .fields = (VMStateField[]) {
95 VMSTATE_UINT16(num, VirtIOPCIQueue),
96 VMSTATE_UNUSED(1), /* enabled was stored as be16 */
97 VMSTATE_BOOL(enabled, VirtIOPCIQueue),
98 VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2),
99 VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2),
100 VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2),
101 VMSTATE_END_OF_LIST()
102 }
103 };
104
105 static bool virtio_pci_modern_state_needed(void *opaque)
106 {
107 VirtIOPCIProxy *proxy = opaque;
108
109 return virtio_pci_modern(proxy);
110 }
111
112 static const VMStateDescription vmstate_virtio_pci_modern_state_sub = {
113 .name = "virtio_pci/modern_state",
114 .version_id = 1,
115 .minimum_version_id = 1,
116 .needed = &virtio_pci_modern_state_needed,
117 .fields = (VMStateField[]) {
118 VMSTATE_UINT32(dfselect, VirtIOPCIProxy),
119 VMSTATE_UINT32(gfselect, VirtIOPCIProxy),
120 VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2),
121 VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0,
122 vmstate_virtio_pci_modern_queue_state,
123 VirtIOPCIQueue),
124 VMSTATE_END_OF_LIST()
125 }
126 };
127
128 static const VMStateDescription vmstate_virtio_pci = {
129 .name = "virtio_pci",
130 .version_id = 1,
131 .minimum_version_id = 1,
132 .minimum_version_id_old = 1,
133 .fields = (VMStateField[]) {
134 VMSTATE_END_OF_LIST()
135 },
136 .subsections = (const VMStateDescription*[]) {
137 &vmstate_virtio_pci_modern_state_sub,
138 NULL
139 }
140 };
141
142 static bool virtio_pci_has_extra_state(DeviceState *d)
143 {
144 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
145
146 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
147 }
148
149 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
150 {
151 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
152
153 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
154 }
155
156 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
157 {
158 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
159
160 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
161 }
162
163 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
164 {
165 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
166 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
167
168 if (msix_present(&proxy->pci_dev))
169 qemu_put_be16(f, virtio_queue_vector(vdev, n));
170 }
171
172 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
173 {
174 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
175 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
176
177 int ret;
178 ret = pci_device_load(&proxy->pci_dev, f);
179 if (ret) {
180 return ret;
181 }
182 msix_unuse_all_vectors(&proxy->pci_dev);
183 msix_load(&proxy->pci_dev, f);
184 if (msix_present(&proxy->pci_dev)) {
185 qemu_get_be16s(f, &vdev->config_vector);
186 } else {
187 vdev->config_vector = VIRTIO_NO_VECTOR;
188 }
189 if (vdev->config_vector != VIRTIO_NO_VECTOR) {
190 return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
191 }
192 return 0;
193 }
194
195 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
196 {
197 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
198 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
199
200 uint16_t vector;
201 if (msix_present(&proxy->pci_dev)) {
202 qemu_get_be16s(f, &vector);
203 } else {
204 vector = VIRTIO_NO_VECTOR;
205 }
206 virtio_queue_set_vector(vdev, n, vector);
207 if (vector != VIRTIO_NO_VECTOR) {
208 return msix_vector_use(&proxy->pci_dev, vector);
209 }
210
211 return 0;
212 }
213
214 static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
215 {
216 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
217
218 return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
219 }
220
221 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
222
223 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy)
224 {
225 return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ?
226 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4;
227 }
228
229 static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
230 int n, bool assign)
231 {
232 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
233 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
234 VirtQueue *vq = virtio_get_queue(vdev, n);
235 bool legacy = virtio_pci_legacy(proxy);
236 bool modern = virtio_pci_modern(proxy);
237 bool fast_mmio = kvm_ioeventfd_any_length_enabled();
238 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
239 MemoryRegion *modern_mr = &proxy->notify.mr;
240 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
241 MemoryRegion *legacy_mr = &proxy->bar;
242 hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) *
243 virtio_get_queue_index(vq);
244 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
245
246 if (assign) {
247 if (modern) {
248 if (fast_mmio) {
249 memory_region_add_eventfd(modern_mr, modern_addr, 0,
250 false, n, notifier);
251 } else {
252 memory_region_add_eventfd(modern_mr, modern_addr, 2,
253 false, n, notifier);
254 }
255 if (modern_pio) {
256 memory_region_add_eventfd(modern_notify_mr, 0, 2,
257 true, n, notifier);
258 }
259 }
260 if (legacy) {
261 memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
262 true, n, notifier);
263 }
264 } else {
265 if (modern) {
266 if (fast_mmio) {
267 memory_region_del_eventfd(modern_mr, modern_addr, 0,
268 false, n, notifier);
269 } else {
270 memory_region_del_eventfd(modern_mr, modern_addr, 2,
271 false, n, notifier);
272 }
273 if (modern_pio) {
274 memory_region_del_eventfd(modern_notify_mr, 0, 2,
275 true, n, notifier);
276 }
277 }
278 if (legacy) {
279 memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
280 true, n, notifier);
281 }
282 }
283 return 0;
284 }
285
286 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
287 {
288 virtio_bus_start_ioeventfd(&proxy->bus);
289 }
290
291 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
292 {
293 virtio_bus_stop_ioeventfd(&proxy->bus);
294 }
295
296 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
297 {
298 VirtIOPCIProxy *proxy = opaque;
299 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
300 hwaddr pa;
301
302 switch (addr) {
303 case VIRTIO_PCI_GUEST_FEATURES:
304 /* Guest does not negotiate properly? We have to assume nothing. */
305 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
306 val = virtio_bus_get_vdev_bad_features(&proxy->bus);
307 }
308 virtio_set_features(vdev, val);
309 break;
310 case VIRTIO_PCI_QUEUE_PFN:
311 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
312 if (pa == 0) {
313 virtio_pci_reset(DEVICE(proxy));
314 }
315 else
316 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
317 break;
318 case VIRTIO_PCI_QUEUE_SEL:
319 if (val < VIRTIO_QUEUE_MAX)
320 vdev->queue_sel = val;
321 break;
322 case VIRTIO_PCI_QUEUE_NOTIFY:
323 if (val < VIRTIO_QUEUE_MAX) {
324 virtio_queue_notify(vdev, val);
325 }
326 break;
327 case VIRTIO_PCI_STATUS:
328 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
329 virtio_pci_stop_ioeventfd(proxy);
330 }
331
332 virtio_set_status(vdev, val & 0xFF);
333
334 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
335 virtio_pci_start_ioeventfd(proxy);
336 }
337
338 if (vdev->status == 0) {
339 virtio_pci_reset(DEVICE(proxy));
340 }
341
342 /* Linux before 2.6.34 drives the device without enabling
343 the PCI device bus master bit. Enable it automatically
344 for the guest. This is a PCI spec violation but so is
345 initiating DMA with bus master bit clear. */
346 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
347 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
348 proxy->pci_dev.config[PCI_COMMAND] |
349 PCI_COMMAND_MASTER, 1);
350 }
351 break;
352 case VIRTIO_MSI_CONFIG_VECTOR:
353 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
354 /* Make it possible for guest to discover an error took place. */
355 if (msix_vector_use(&proxy->pci_dev, val) < 0)
356 val = VIRTIO_NO_VECTOR;
357 vdev->config_vector = val;
358 break;
359 case VIRTIO_MSI_QUEUE_VECTOR:
360 msix_vector_unuse(&proxy->pci_dev,
361 virtio_queue_vector(vdev, vdev->queue_sel));
362 /* Make it possible for guest to discover an error took place. */
363 if (msix_vector_use(&proxy->pci_dev, val) < 0)
364 val = VIRTIO_NO_VECTOR;
365 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
366 break;
367 default:
368 error_report("%s: unexpected address 0x%x value 0x%x",
369 __func__, addr, val);
370 break;
371 }
372 }
373
374 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
375 {
376 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
377 uint32_t ret = 0xFFFFFFFF;
378
379 switch (addr) {
380 case VIRTIO_PCI_HOST_FEATURES:
381 ret = vdev->host_features;
382 break;
383 case VIRTIO_PCI_GUEST_FEATURES:
384 ret = vdev->guest_features;
385 break;
386 case VIRTIO_PCI_QUEUE_PFN:
387 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
388 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
389 break;
390 case VIRTIO_PCI_QUEUE_NUM:
391 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
392 break;
393 case VIRTIO_PCI_QUEUE_SEL:
394 ret = vdev->queue_sel;
395 break;
396 case VIRTIO_PCI_STATUS:
397 ret = vdev->status;
398 break;
399 case VIRTIO_PCI_ISR:
400 /* reading from the ISR also clears it. */
401 ret = atomic_xchg(&vdev->isr, 0);
402 pci_irq_deassert(&proxy->pci_dev);
403 break;
404 case VIRTIO_MSI_CONFIG_VECTOR:
405 ret = vdev->config_vector;
406 break;
407 case VIRTIO_MSI_QUEUE_VECTOR:
408 ret = virtio_queue_vector(vdev, vdev->queue_sel);
409 break;
410 default:
411 break;
412 }
413
414 return ret;
415 }
416
417 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
418 unsigned size)
419 {
420 VirtIOPCIProxy *proxy = opaque;
421 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
422 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
423 uint64_t val = 0;
424 if (addr < config) {
425 return virtio_ioport_read(proxy, addr);
426 }
427 addr -= config;
428
429 switch (size) {
430 case 1:
431 val = virtio_config_readb(vdev, addr);
432 break;
433 case 2:
434 val = virtio_config_readw(vdev, addr);
435 if (virtio_is_big_endian(vdev)) {
436 val = bswap16(val);
437 }
438 break;
439 case 4:
440 val = virtio_config_readl(vdev, addr);
441 if (virtio_is_big_endian(vdev)) {
442 val = bswap32(val);
443 }
444 break;
445 }
446 return val;
447 }
448
449 static void virtio_pci_config_write(void *opaque, hwaddr addr,
450 uint64_t val, unsigned size)
451 {
452 VirtIOPCIProxy *proxy = opaque;
453 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
454 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
455 if (addr < config) {
456 virtio_ioport_write(proxy, addr, val);
457 return;
458 }
459 addr -= config;
460 /*
461 * Virtio-PCI is odd. Ioports are LE but config space is target native
462 * endian.
463 */
464 switch (size) {
465 case 1:
466 virtio_config_writeb(vdev, addr, val);
467 break;
468 case 2:
469 if (virtio_is_big_endian(vdev)) {
470 val = bswap16(val);
471 }
472 virtio_config_writew(vdev, addr, val);
473 break;
474 case 4:
475 if (virtio_is_big_endian(vdev)) {
476 val = bswap32(val);
477 }
478 virtio_config_writel(vdev, addr, val);
479 break;
480 }
481 }
482
483 static const MemoryRegionOps virtio_pci_config_ops = {
484 .read = virtio_pci_config_read,
485 .write = virtio_pci_config_write,
486 .impl = {
487 .min_access_size = 1,
488 .max_access_size = 4,
489 },
490 .endianness = DEVICE_LITTLE_ENDIAN,
491 };
492
493 static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy,
494 hwaddr *off, int len)
495 {
496 int i;
497 VirtIOPCIRegion *reg;
498
499 for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) {
500 reg = &proxy->regs[i];
501 if (*off >= reg->offset &&
502 *off + len <= reg->offset + reg->size) {
503 *off -= reg->offset;
504 return &reg->mr;
505 }
506 }
507
508 return NULL;
509 }
510
511 /* Below are generic functions to do memcpy from/to an address space,
512 * without byteswaps, with input validation.
513 *
514 * As regular address_space_* APIs all do some kind of byteswap at least for
515 * some host/target combinations, we are forced to explicitly convert to a
516 * known-endianness integer value.
517 * It doesn't really matter which endian format to go through, so the code
518 * below selects the endian that causes the least amount of work on the given
519 * host.
520 *
521 * Note: host pointer must be aligned.
522 */
523 static
524 void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
525 const uint8_t *buf, int len)
526 {
527 uint64_t val;
528 MemoryRegion *mr;
529
530 /* address_space_* APIs assume an aligned address.
531 * As address is under guest control, handle illegal values.
532 */
533 addr &= ~(len - 1);
534
535 mr = virtio_address_space_lookup(proxy, &addr, len);
536 if (!mr) {
537 return;
538 }
539
540 /* Make sure caller aligned buf properly */
541 assert(!(((uintptr_t)buf) & (len - 1)));
542
543 switch (len) {
544 case 1:
545 val = pci_get_byte(buf);
546 break;
547 case 2:
548 val = cpu_to_le16(pci_get_word(buf));
549 break;
550 case 4:
551 val = cpu_to_le32(pci_get_long(buf));
552 break;
553 default:
554 /* As length is under guest control, handle illegal values. */
555 return;
556 }
557 memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED);
558 }
559
560 static void
561 virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
562 uint8_t *buf, int len)
563 {
564 uint64_t val;
565 MemoryRegion *mr;
566
567 /* address_space_* APIs assume an aligned address.
568 * As address is under guest control, handle illegal values.
569 */
570 addr &= ~(len - 1);
571
572 mr = virtio_address_space_lookup(proxy, &addr, len);
573 if (!mr) {
574 return;
575 }
576
577 /* Make sure caller aligned buf properly */
578 assert(!(((uintptr_t)buf) & (len - 1)));
579
580 memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED);
581 switch (len) {
582 case 1:
583 pci_set_byte(buf, val);
584 break;
585 case 2:
586 pci_set_word(buf, le16_to_cpu(val));
587 break;
588 case 4:
589 pci_set_long(buf, le32_to_cpu(val));
590 break;
591 default:
592 /* As length is under guest control, handle illegal values. */
593 break;
594 }
595 }
596
597 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
598 uint32_t val, int len)
599 {
600 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
601 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
602 struct virtio_pci_cfg_cap *cfg;
603
604 pci_default_write_config(pci_dev, address, val, len);
605
606 if (range_covers_byte(address, len, PCI_COMMAND) &&
607 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
608 virtio_pci_stop_ioeventfd(proxy);
609 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
610 }
611
612 if (proxy->config_cap &&
613 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
614 pci_cfg_data),
615 sizeof cfg->pci_cfg_data)) {
616 uint32_t off;
617 uint32_t len;
618
619 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
620 off = le32_to_cpu(cfg->cap.offset);
621 len = le32_to_cpu(cfg->cap.length);
622
623 if (len == 1 || len == 2 || len == 4) {
624 assert(len <= sizeof cfg->pci_cfg_data);
625 virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len);
626 }
627 }
628 }
629
630 static uint32_t virtio_read_config(PCIDevice *pci_dev,
631 uint32_t address, int len)
632 {
633 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
634 struct virtio_pci_cfg_cap *cfg;
635
636 if (proxy->config_cap &&
637 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
638 pci_cfg_data),
639 sizeof cfg->pci_cfg_data)) {
640 uint32_t off;
641 uint32_t len;
642
643 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
644 off = le32_to_cpu(cfg->cap.offset);
645 len = le32_to_cpu(cfg->cap.length);
646
647 if (len == 1 || len == 2 || len == 4) {
648 assert(len <= sizeof cfg->pci_cfg_data);
649 virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len);
650 }
651 }
652
653 return pci_default_read_config(pci_dev, address, len);
654 }
655
656 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
657 unsigned int queue_no,
658 unsigned int vector)
659 {
660 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
661 int ret;
662
663 if (irqfd->users == 0) {
664 ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev);
665 if (ret < 0) {
666 return ret;
667 }
668 irqfd->virq = ret;
669 }
670 irqfd->users++;
671 return 0;
672 }
673
674 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
675 unsigned int vector)
676 {
677 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
678 if (--irqfd->users == 0) {
679 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
680 }
681 }
682
683 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
684 unsigned int queue_no,
685 unsigned int vector)
686 {
687 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
688 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
689 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
690 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
691 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
692 }
693
694 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
695 unsigned int queue_no,
696 unsigned int vector)
697 {
698 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
699 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
700 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
701 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
702 int ret;
703
704 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
705 assert(ret == 0);
706 }
707
708 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
709 {
710 PCIDevice *dev = &proxy->pci_dev;
711 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
712 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
713 unsigned int vector;
714 int ret, queue_no;
715
716 for (queue_no = 0; queue_no < nvqs; queue_no++) {
717 if (!virtio_queue_get_num(vdev, queue_no)) {
718 break;
719 }
720 vector = virtio_queue_vector(vdev, queue_no);
721 if (vector >= msix_nr_vectors_allocated(dev)) {
722 continue;
723 }
724 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
725 if (ret < 0) {
726 goto undo;
727 }
728 /* If guest supports masking, set up irqfd now.
729 * Otherwise, delay until unmasked in the frontend.
730 */
731 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
732 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
733 if (ret < 0) {
734 kvm_virtio_pci_vq_vector_release(proxy, vector);
735 goto undo;
736 }
737 }
738 }
739 return 0;
740
741 undo:
742 while (--queue_no >= 0) {
743 vector = virtio_queue_vector(vdev, queue_no);
744 if (vector >= msix_nr_vectors_allocated(dev)) {
745 continue;
746 }
747 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
748 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
749 }
750 kvm_virtio_pci_vq_vector_release(proxy, vector);
751 }
752 return ret;
753 }
754
755 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
756 {
757 PCIDevice *dev = &proxy->pci_dev;
758 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
759 unsigned int vector;
760 int queue_no;
761 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
762
763 for (queue_no = 0; queue_no < nvqs; queue_no++) {
764 if (!virtio_queue_get_num(vdev, queue_no)) {
765 break;
766 }
767 vector = virtio_queue_vector(vdev, queue_no);
768 if (vector >= msix_nr_vectors_allocated(dev)) {
769 continue;
770 }
771 /* If guest supports masking, clean up irqfd now.
772 * Otherwise, it was cleaned when masked in the frontend.
773 */
774 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
775 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
776 }
777 kvm_virtio_pci_vq_vector_release(proxy, vector);
778 }
779 }
780
781 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
782 unsigned int queue_no,
783 unsigned int vector,
784 MSIMessage msg)
785 {
786 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
787 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
788 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
789 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
790 VirtIOIRQFD *irqfd;
791 int ret = 0;
792
793 if (proxy->vector_irqfd) {
794 irqfd = &proxy->vector_irqfd[vector];
795 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
796 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
797 &proxy->pci_dev);
798 if (ret < 0) {
799 return ret;
800 }
801 kvm_irqchip_commit_routes(kvm_state);
802 }
803 }
804
805 /* If guest supports masking, irqfd is already setup, unmask it.
806 * Otherwise, set it up now.
807 */
808 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
809 k->guest_notifier_mask(vdev, queue_no, false);
810 /* Test after unmasking to avoid losing events. */
811 if (k->guest_notifier_pending &&
812 k->guest_notifier_pending(vdev, queue_no)) {
813 event_notifier_set(n);
814 }
815 } else {
816 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
817 }
818 return ret;
819 }
820
821 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
822 unsigned int queue_no,
823 unsigned int vector)
824 {
825 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
826 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
827
828 /* If guest supports masking, keep irqfd but mask it.
829 * Otherwise, clean it up now.
830 */
831 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
832 k->guest_notifier_mask(vdev, queue_no, true);
833 } else {
834 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
835 }
836 }
837
838 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
839 MSIMessage msg)
840 {
841 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
842 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
843 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
844 int ret, index, unmasked = 0;
845
846 while (vq) {
847 index = virtio_get_queue_index(vq);
848 if (!virtio_queue_get_num(vdev, index)) {
849 break;
850 }
851 if (index < proxy->nvqs_with_notifiers) {
852 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
853 if (ret < 0) {
854 goto undo;
855 }
856 ++unmasked;
857 }
858 vq = virtio_vector_next_queue(vq);
859 }
860
861 return 0;
862
863 undo:
864 vq = virtio_vector_first_queue(vdev, vector);
865 while (vq && unmasked >= 0) {
866 index = virtio_get_queue_index(vq);
867 if (index < proxy->nvqs_with_notifiers) {
868 virtio_pci_vq_vector_mask(proxy, index, vector);
869 --unmasked;
870 }
871 vq = virtio_vector_next_queue(vq);
872 }
873 return ret;
874 }
875
876 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
877 {
878 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
879 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
880 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
881 int index;
882
883 while (vq) {
884 index = virtio_get_queue_index(vq);
885 if (!virtio_queue_get_num(vdev, index)) {
886 break;
887 }
888 if (index < proxy->nvqs_with_notifiers) {
889 virtio_pci_vq_vector_mask(proxy, index, vector);
890 }
891 vq = virtio_vector_next_queue(vq);
892 }
893 }
894
895 static void virtio_pci_vector_poll(PCIDevice *dev,
896 unsigned int vector_start,
897 unsigned int vector_end)
898 {
899 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
900 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
901 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
902 int queue_no;
903 unsigned int vector;
904 EventNotifier *notifier;
905 VirtQueue *vq;
906
907 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
908 if (!virtio_queue_get_num(vdev, queue_no)) {
909 break;
910 }
911 vector = virtio_queue_vector(vdev, queue_no);
912 if (vector < vector_start || vector >= vector_end ||
913 !msix_is_masked(dev, vector)) {
914 continue;
915 }
916 vq = virtio_get_queue(vdev, queue_no);
917 notifier = virtio_queue_get_guest_notifier(vq);
918 if (k->guest_notifier_pending) {
919 if (k->guest_notifier_pending(vdev, queue_no)) {
920 msix_set_pending(dev, vector);
921 }
922 } else if (event_notifier_test_and_clear(notifier)) {
923 msix_set_pending(dev, vector);
924 }
925 }
926 }
927
928 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
929 bool with_irqfd)
930 {
931 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
932 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
933 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
934 VirtQueue *vq = virtio_get_queue(vdev, n);
935 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
936
937 if (assign) {
938 int r = event_notifier_init(notifier, 0);
939 if (r < 0) {
940 return r;
941 }
942 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
943 } else {
944 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
945 event_notifier_cleanup(notifier);
946 }
947
948 if (!msix_enabled(&proxy->pci_dev) &&
949 vdev->use_guest_notifier_mask &&
950 vdc->guest_notifier_mask) {
951 vdc->guest_notifier_mask(vdev, n, !assign);
952 }
953
954 return 0;
955 }
956
957 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
958 {
959 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
960 return msix_enabled(&proxy->pci_dev);
961 }
962
963 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
964 {
965 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
966 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
967 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
968 int r, n;
969 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
970 kvm_msi_via_irqfd_enabled();
971
972 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
973
974 /* When deassigning, pass a consistent nvqs value
975 * to avoid leaking notifiers.
976 */
977 assert(assign || nvqs == proxy->nvqs_with_notifiers);
978
979 proxy->nvqs_with_notifiers = nvqs;
980
981 /* Must unset vector notifier while guest notifier is still assigned */
982 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
983 msix_unset_vector_notifiers(&proxy->pci_dev);
984 if (proxy->vector_irqfd) {
985 kvm_virtio_pci_vector_release(proxy, nvqs);
986 g_free(proxy->vector_irqfd);
987 proxy->vector_irqfd = NULL;
988 }
989 }
990
991 for (n = 0; n < nvqs; n++) {
992 if (!virtio_queue_get_num(vdev, n)) {
993 break;
994 }
995
996 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
997 if (r < 0) {
998 goto assign_error;
999 }
1000 }
1001
1002 /* Must set vector notifier after guest notifier has been assigned */
1003 if ((with_irqfd || k->guest_notifier_mask) && assign) {
1004 if (with_irqfd) {
1005 proxy->vector_irqfd =
1006 g_malloc0(sizeof(*proxy->vector_irqfd) *
1007 msix_nr_vectors_allocated(&proxy->pci_dev));
1008 r = kvm_virtio_pci_vector_use(proxy, nvqs);
1009 if (r < 0) {
1010 goto assign_error;
1011 }
1012 }
1013 r = msix_set_vector_notifiers(&proxy->pci_dev,
1014 virtio_pci_vector_unmask,
1015 virtio_pci_vector_mask,
1016 virtio_pci_vector_poll);
1017 if (r < 0) {
1018 goto notifiers_error;
1019 }
1020 }
1021
1022 return 0;
1023
1024 notifiers_error:
1025 if (with_irqfd) {
1026 assert(assign);
1027 kvm_virtio_pci_vector_release(proxy, nvqs);
1028 }
1029
1030 assign_error:
1031 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1032 assert(assign);
1033 while (--n >= 0) {
1034 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
1035 }
1036 return r;
1037 }
1038
1039 static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n,
1040 MemoryRegion *mr, bool assign)
1041 {
1042 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1043 int offset;
1044
1045 if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) ||
1046 virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) {
1047 return -1;
1048 }
1049
1050 if (assign) {
1051 offset = virtio_pci_queue_mem_mult(proxy) * n;
1052 memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1);
1053 } else {
1054 memory_region_del_subregion(&proxy->notify.mr, mr);
1055 }
1056
1057 return 0;
1058 }
1059
1060 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
1061 {
1062 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1063 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1064
1065 if (running) {
1066 /* Old QEMU versions did not set bus master enable on status write.
1067 * Detect DRIVER set and enable it.
1068 */
1069 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
1070 (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
1071 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1072 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
1073 proxy->pci_dev.config[PCI_COMMAND] |
1074 PCI_COMMAND_MASTER, 1);
1075 }
1076 virtio_pci_start_ioeventfd(proxy);
1077 } else {
1078 virtio_pci_stop_ioeventfd(proxy);
1079 }
1080 }
1081
1082 #ifdef CONFIG_VIRTFS
1083 static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1084 {
1085 V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev);
1086 DeviceState *vdev = DEVICE(&dev->vdev);
1087
1088 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1089 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1090 }
1091
1092 static Property virtio_9p_pci_properties[] = {
1093 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1094 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1095 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1096 DEFINE_PROP_END_OF_LIST(),
1097 };
1098
1099 static void virtio_9p_pci_class_init(ObjectClass *klass, void *data)
1100 {
1101 DeviceClass *dc = DEVICE_CLASS(klass);
1102 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1103 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1104
1105 k->realize = virtio_9p_pci_realize;
1106 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1107 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
1108 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1109 pcidev_k->class_id = 0x2;
1110 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1111 dc->props = virtio_9p_pci_properties;
1112 }
1113
1114 static void virtio_9p_pci_instance_init(Object *obj)
1115 {
1116 V9fsPCIState *dev = VIRTIO_9P_PCI(obj);
1117
1118 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
1119 TYPE_VIRTIO_9P);
1120 }
1121
1122 static const VirtioPCIDeviceTypeInfo virtio_9p_pci_info = {
1123 .generic_name = TYPE_VIRTIO_9P_PCI,
1124 .instance_size = sizeof(V9fsPCIState),
1125 .instance_init = virtio_9p_pci_instance_init,
1126 .class_init = virtio_9p_pci_class_init,
1127 };
1128 #endif /* CONFIG_VIRTFS */
1129
1130 /*
1131 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1132 */
1133
1134 static int virtio_pci_query_nvectors(DeviceState *d)
1135 {
1136 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1137
1138 return proxy->nvectors;
1139 }
1140
1141 static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
1142 {
1143 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1144 PCIDevice *dev = &proxy->pci_dev;
1145
1146 return pci_get_address_space(dev);
1147 }
1148
1149 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
1150 struct virtio_pci_cap *cap)
1151 {
1152 PCIDevice *dev = &proxy->pci_dev;
1153 int offset;
1154
1155 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0,
1156 cap->cap_len, &error_abort);
1157
1158 assert(cap->cap_len >= sizeof *cap);
1159 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
1160 cap->cap_len - PCI_CAP_FLAGS);
1161
1162 return offset;
1163 }
1164
1165 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
1166 unsigned size)
1167 {
1168 VirtIOPCIProxy *proxy = opaque;
1169 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1170 uint32_t val = 0;
1171 int i;
1172
1173 switch (addr) {
1174 case VIRTIO_PCI_COMMON_DFSELECT:
1175 val = proxy->dfselect;
1176 break;
1177 case VIRTIO_PCI_COMMON_DF:
1178 if (proxy->dfselect <= 1) {
1179 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1180
1181 val = (vdev->host_features & ~vdc->legacy_features) >>
1182 (32 * proxy->dfselect);
1183 }
1184 break;
1185 case VIRTIO_PCI_COMMON_GFSELECT:
1186 val = proxy->gfselect;
1187 break;
1188 case VIRTIO_PCI_COMMON_GF:
1189 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1190 val = proxy->guest_features[proxy->gfselect];
1191 }
1192 break;
1193 case VIRTIO_PCI_COMMON_MSIX:
1194 val = vdev->config_vector;
1195 break;
1196 case VIRTIO_PCI_COMMON_NUMQ:
1197 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
1198 if (virtio_queue_get_num(vdev, i)) {
1199 val = i + 1;
1200 }
1201 }
1202 break;
1203 case VIRTIO_PCI_COMMON_STATUS:
1204 val = vdev->status;
1205 break;
1206 case VIRTIO_PCI_COMMON_CFGGENERATION:
1207 val = vdev->generation;
1208 break;
1209 case VIRTIO_PCI_COMMON_Q_SELECT:
1210 val = vdev->queue_sel;
1211 break;
1212 case VIRTIO_PCI_COMMON_Q_SIZE:
1213 val = virtio_queue_get_num(vdev, vdev->queue_sel);
1214 break;
1215 case VIRTIO_PCI_COMMON_Q_MSIX:
1216 val = virtio_queue_vector(vdev, vdev->queue_sel);
1217 break;
1218 case VIRTIO_PCI_COMMON_Q_ENABLE:
1219 val = proxy->vqs[vdev->queue_sel].enabled;
1220 break;
1221 case VIRTIO_PCI_COMMON_Q_NOFF:
1222 /* Simply map queues in order */
1223 val = vdev->queue_sel;
1224 break;
1225 case VIRTIO_PCI_COMMON_Q_DESCLO:
1226 val = proxy->vqs[vdev->queue_sel].desc[0];
1227 break;
1228 case VIRTIO_PCI_COMMON_Q_DESCHI:
1229 val = proxy->vqs[vdev->queue_sel].desc[1];
1230 break;
1231 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1232 val = proxy->vqs[vdev->queue_sel].avail[0];
1233 break;
1234 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1235 val = proxy->vqs[vdev->queue_sel].avail[1];
1236 break;
1237 case VIRTIO_PCI_COMMON_Q_USEDLO:
1238 val = proxy->vqs[vdev->queue_sel].used[0];
1239 break;
1240 case VIRTIO_PCI_COMMON_Q_USEDHI:
1241 val = proxy->vqs[vdev->queue_sel].used[1];
1242 break;
1243 default:
1244 val = 0;
1245 }
1246
1247 return val;
1248 }
1249
1250 static void virtio_pci_common_write(void *opaque, hwaddr addr,
1251 uint64_t val, unsigned size)
1252 {
1253 VirtIOPCIProxy *proxy = opaque;
1254 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1255
1256 switch (addr) {
1257 case VIRTIO_PCI_COMMON_DFSELECT:
1258 proxy->dfselect = val;
1259 break;
1260 case VIRTIO_PCI_COMMON_GFSELECT:
1261 proxy->gfselect = val;
1262 break;
1263 case VIRTIO_PCI_COMMON_GF:
1264 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1265 proxy->guest_features[proxy->gfselect] = val;
1266 virtio_set_features(vdev,
1267 (((uint64_t)proxy->guest_features[1]) << 32) |
1268 proxy->guest_features[0]);
1269 }
1270 break;
1271 case VIRTIO_PCI_COMMON_MSIX:
1272 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
1273 /* Make it possible for guest to discover an error took place. */
1274 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1275 val = VIRTIO_NO_VECTOR;
1276 }
1277 vdev->config_vector = val;
1278 break;
1279 case VIRTIO_PCI_COMMON_STATUS:
1280 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1281 virtio_pci_stop_ioeventfd(proxy);
1282 }
1283
1284 virtio_set_status(vdev, val & 0xFF);
1285
1286 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
1287 virtio_pci_start_ioeventfd(proxy);
1288 }
1289
1290 if (vdev->status == 0) {
1291 virtio_pci_reset(DEVICE(proxy));
1292 }
1293
1294 break;
1295 case VIRTIO_PCI_COMMON_Q_SELECT:
1296 if (val < VIRTIO_QUEUE_MAX) {
1297 vdev->queue_sel = val;
1298 }
1299 break;
1300 case VIRTIO_PCI_COMMON_Q_SIZE:
1301 proxy->vqs[vdev->queue_sel].num = val;
1302 break;
1303 case VIRTIO_PCI_COMMON_Q_MSIX:
1304 msix_vector_unuse(&proxy->pci_dev,
1305 virtio_queue_vector(vdev, vdev->queue_sel));
1306 /* Make it possible for guest to discover an error took place. */
1307 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1308 val = VIRTIO_NO_VECTOR;
1309 }
1310 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
1311 break;
1312 case VIRTIO_PCI_COMMON_Q_ENABLE:
1313 virtio_queue_set_num(vdev, vdev->queue_sel,
1314 proxy->vqs[vdev->queue_sel].num);
1315 virtio_queue_set_rings(vdev, vdev->queue_sel,
1316 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
1317 proxy->vqs[vdev->queue_sel].desc[0],
1318 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
1319 proxy->vqs[vdev->queue_sel].avail[0],
1320 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
1321 proxy->vqs[vdev->queue_sel].used[0]);
1322 proxy->vqs[vdev->queue_sel].enabled = 1;
1323 break;
1324 case VIRTIO_PCI_COMMON_Q_DESCLO:
1325 proxy->vqs[vdev->queue_sel].desc[0] = val;
1326 break;
1327 case VIRTIO_PCI_COMMON_Q_DESCHI:
1328 proxy->vqs[vdev->queue_sel].desc[1] = val;
1329 break;
1330 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1331 proxy->vqs[vdev->queue_sel].avail[0] = val;
1332 break;
1333 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1334 proxy->vqs[vdev->queue_sel].avail[1] = val;
1335 break;
1336 case VIRTIO_PCI_COMMON_Q_USEDLO:
1337 proxy->vqs[vdev->queue_sel].used[0] = val;
1338 break;
1339 case VIRTIO_PCI_COMMON_Q_USEDHI:
1340 proxy->vqs[vdev->queue_sel].used[1] = val;
1341 break;
1342 default:
1343 break;
1344 }
1345 }
1346
1347
1348 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
1349 unsigned size)
1350 {
1351 return 0;
1352 }
1353
1354 static void virtio_pci_notify_write(void *opaque, hwaddr addr,
1355 uint64_t val, unsigned size)
1356 {
1357 VirtIODevice *vdev = opaque;
1358 VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent);
1359 unsigned queue = addr / virtio_pci_queue_mem_mult(proxy);
1360
1361 if (queue < VIRTIO_QUEUE_MAX) {
1362 virtio_queue_notify(vdev, queue);
1363 }
1364 }
1365
1366 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
1367 uint64_t val, unsigned size)
1368 {
1369 VirtIODevice *vdev = opaque;
1370 unsigned queue = val;
1371
1372 if (queue < VIRTIO_QUEUE_MAX) {
1373 virtio_queue_notify(vdev, queue);
1374 }
1375 }
1376
1377 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
1378 unsigned size)
1379 {
1380 VirtIOPCIProxy *proxy = opaque;
1381 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1382 uint64_t val = atomic_xchg(&vdev->isr, 0);
1383 pci_irq_deassert(&proxy->pci_dev);
1384
1385 return val;
1386 }
1387
1388 static void virtio_pci_isr_write(void *opaque, hwaddr addr,
1389 uint64_t val, unsigned size)
1390 {
1391 }
1392
1393 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
1394 unsigned size)
1395 {
1396 VirtIODevice *vdev = opaque;
1397 uint64_t val = 0;
1398
1399 switch (size) {
1400 case 1:
1401 val = virtio_config_modern_readb(vdev, addr);
1402 break;
1403 case 2:
1404 val = virtio_config_modern_readw(vdev, addr);
1405 break;
1406 case 4:
1407 val = virtio_config_modern_readl(vdev, addr);
1408 break;
1409 }
1410 return val;
1411 }
1412
1413 static void virtio_pci_device_write(void *opaque, hwaddr addr,
1414 uint64_t val, unsigned size)
1415 {
1416 VirtIODevice *vdev = opaque;
1417 switch (size) {
1418 case 1:
1419 virtio_config_modern_writeb(vdev, addr, val);
1420 break;
1421 case 2:
1422 virtio_config_modern_writew(vdev, addr, val);
1423 break;
1424 case 4:
1425 virtio_config_modern_writel(vdev, addr, val);
1426 break;
1427 }
1428 }
1429
1430 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
1431 {
1432 static const MemoryRegionOps common_ops = {
1433 .read = virtio_pci_common_read,
1434 .write = virtio_pci_common_write,
1435 .impl = {
1436 .min_access_size = 1,
1437 .max_access_size = 4,
1438 },
1439 .endianness = DEVICE_LITTLE_ENDIAN,
1440 };
1441 static const MemoryRegionOps isr_ops = {
1442 .read = virtio_pci_isr_read,
1443 .write = virtio_pci_isr_write,
1444 .impl = {
1445 .min_access_size = 1,
1446 .max_access_size = 4,
1447 },
1448 .endianness = DEVICE_LITTLE_ENDIAN,
1449 };
1450 static const MemoryRegionOps device_ops = {
1451 .read = virtio_pci_device_read,
1452 .write = virtio_pci_device_write,
1453 .impl = {
1454 .min_access_size = 1,
1455 .max_access_size = 4,
1456 },
1457 .endianness = DEVICE_LITTLE_ENDIAN,
1458 };
1459 static const MemoryRegionOps notify_ops = {
1460 .read = virtio_pci_notify_read,
1461 .write = virtio_pci_notify_write,
1462 .impl = {
1463 .min_access_size = 1,
1464 .max_access_size = 4,
1465 },
1466 .endianness = DEVICE_LITTLE_ENDIAN,
1467 };
1468 static const MemoryRegionOps notify_pio_ops = {
1469 .read = virtio_pci_notify_read,
1470 .write = virtio_pci_notify_write_pio,
1471 .impl = {
1472 .min_access_size = 1,
1473 .max_access_size = 4,
1474 },
1475 .endianness = DEVICE_LITTLE_ENDIAN,
1476 };
1477
1478
1479 memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
1480 &common_ops,
1481 proxy,
1482 "virtio-pci-common",
1483 proxy->common.size);
1484
1485 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
1486 &isr_ops,
1487 proxy,
1488 "virtio-pci-isr",
1489 proxy->isr.size);
1490
1491 memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
1492 &device_ops,
1493 virtio_bus_get_device(&proxy->bus),
1494 "virtio-pci-device",
1495 proxy->device.size);
1496
1497 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
1498 &notify_ops,
1499 virtio_bus_get_device(&proxy->bus),
1500 "virtio-pci-notify",
1501 proxy->notify.size);
1502
1503 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
1504 &notify_pio_ops,
1505 virtio_bus_get_device(&proxy->bus),
1506 "virtio-pci-notify-pio",
1507 proxy->notify_pio.size);
1508 }
1509
1510 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
1511 VirtIOPCIRegion *region,
1512 struct virtio_pci_cap *cap,
1513 MemoryRegion *mr,
1514 uint8_t bar)
1515 {
1516 memory_region_add_subregion(mr, region->offset, &region->mr);
1517
1518 cap->cfg_type = region->type;
1519 cap->bar = bar;
1520 cap->offset = cpu_to_le32(region->offset);
1521 cap->length = cpu_to_le32(region->size);
1522 virtio_pci_add_mem_cap(proxy, cap);
1523
1524 }
1525
1526 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
1527 VirtIOPCIRegion *region,
1528 struct virtio_pci_cap *cap)
1529 {
1530 virtio_pci_modern_region_map(proxy, region, cap,
1531 &proxy->modern_bar, proxy->modern_mem_bar_idx);
1532 }
1533
1534 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
1535 VirtIOPCIRegion *region,
1536 struct virtio_pci_cap *cap)
1537 {
1538 virtio_pci_modern_region_map(proxy, region, cap,
1539 &proxy->io_bar, proxy->modern_io_bar_idx);
1540 }
1541
1542 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
1543 VirtIOPCIRegion *region)
1544 {
1545 memory_region_del_subregion(&proxy->modern_bar,
1546 &region->mr);
1547 }
1548
1549 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
1550 VirtIOPCIRegion *region)
1551 {
1552 memory_region_del_subregion(&proxy->io_bar,
1553 &region->mr);
1554 }
1555
1556 static void virtio_pci_pre_plugged(DeviceState *d, Error **errp)
1557 {
1558 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1559 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1560
1561 if (virtio_pci_modern(proxy)) {
1562 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1563 }
1564
1565 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
1566 }
1567
1568 /* This is called by virtio-bus just after the device is plugged. */
1569 static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
1570 {
1571 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1572 VirtioBusState *bus = &proxy->bus;
1573 bool legacy = virtio_pci_legacy(proxy);
1574 bool modern;
1575 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1576 uint8_t *config;
1577 uint32_t size;
1578 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1579
1580 /*
1581 * Virtio capabilities present without
1582 * VIRTIO_F_VERSION_1 confuses guests
1583 */
1584 if (!proxy->ignore_backend_features &&
1585 !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1586 virtio_pci_disable_modern(proxy);
1587
1588 if (!legacy) {
1589 error_setg(errp, "Device doesn't support modern mode, and legacy"
1590 " mode is disabled");
1591 error_append_hint(errp, "Set disable-legacy to off\n");
1592
1593 return;
1594 }
1595 }
1596
1597 modern = virtio_pci_modern(proxy);
1598
1599 config = proxy->pci_dev.config;
1600 if (proxy->class_code) {
1601 pci_config_set_class(config, proxy->class_code);
1602 }
1603
1604 if (legacy) {
1605 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1606 error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
1607 " neither legacy nor transitional device");
1608 return ;
1609 }
1610 /*
1611 * Legacy and transitional devices use specific subsystem IDs.
1612 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
1613 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
1614 */
1615 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
1616 } else {
1617 /* pure virtio-1.0 */
1618 pci_set_word(config + PCI_VENDOR_ID,
1619 PCI_VENDOR_ID_REDHAT_QUMRANET);
1620 pci_set_word(config + PCI_DEVICE_ID,
1621 0x1040 + virtio_bus_get_vdev_id(bus));
1622 pci_config_set_revision(config, 1);
1623 }
1624 config[PCI_INTERRUPT_PIN] = 1;
1625
1626
1627 if (modern) {
1628 struct virtio_pci_cap cap = {
1629 .cap_len = sizeof cap,
1630 };
1631 struct virtio_pci_notify_cap notify = {
1632 .cap.cap_len = sizeof notify,
1633 .notify_off_multiplier =
1634 cpu_to_le32(virtio_pci_queue_mem_mult(proxy)),
1635 };
1636 struct virtio_pci_cfg_cap cfg = {
1637 .cap.cap_len = sizeof cfg,
1638 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
1639 };
1640 struct virtio_pci_notify_cap notify_pio = {
1641 .cap.cap_len = sizeof notify,
1642 .notify_off_multiplier = cpu_to_le32(0x0),
1643 };
1644
1645 struct virtio_pci_cfg_cap *cfg_mask;
1646
1647 virtio_pci_modern_regions_init(proxy);
1648
1649 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
1650 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
1651 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
1652 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
1653
1654 if (modern_pio) {
1655 memory_region_init(&proxy->io_bar, OBJECT(proxy),
1656 "virtio-pci-io", 0x4);
1657
1658 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
1659 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
1660
1661 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
1662 &notify_pio.cap);
1663 }
1664
1665 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx,
1666 PCI_BASE_ADDRESS_SPACE_MEMORY |
1667 PCI_BASE_ADDRESS_MEM_PREFETCH |
1668 PCI_BASE_ADDRESS_MEM_TYPE_64,
1669 &proxy->modern_bar);
1670
1671 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
1672 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
1673 pci_set_byte(&cfg_mask->cap.bar, ~0x0);
1674 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
1675 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
1676 pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
1677 }
1678
1679 if (proxy->nvectors) {
1680 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
1681 proxy->msix_bar_idx, NULL);
1682 if (err) {
1683 /* Notice when a system that supports MSIx can't initialize it */
1684 if (err != -ENOTSUP) {
1685 warn_report("unable to init msix vectors to %" PRIu32,
1686 proxy->nvectors);
1687 }
1688 proxy->nvectors = 0;
1689 }
1690 }
1691
1692 proxy->pci_dev.config_write = virtio_write_config;
1693 proxy->pci_dev.config_read = virtio_read_config;
1694
1695 if (legacy) {
1696 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
1697 + virtio_bus_get_vdev_config_len(bus);
1698 size = pow2ceil(size);
1699
1700 memory_region_init_io(&proxy->bar, OBJECT(proxy),
1701 &virtio_pci_config_ops,
1702 proxy, "virtio-pci", size);
1703
1704 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
1705 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
1706 }
1707 }
1708
1709 static void virtio_pci_device_unplugged(DeviceState *d)
1710 {
1711 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1712 bool modern = virtio_pci_modern(proxy);
1713 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1714
1715 virtio_pci_stop_ioeventfd(proxy);
1716
1717 if (modern) {
1718 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
1719 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
1720 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
1721 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
1722 if (modern_pio) {
1723 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
1724 }
1725 }
1726 }
1727
1728 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
1729 {
1730 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1731 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1732 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
1733 !pci_bus_is_root(pci_get_bus(pci_dev));
1734
1735 if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
1736 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
1737 }
1738
1739 /*
1740 * virtio pci bar layout used by default.
1741 * subclasses can re-arrange things if needed.
1742 *
1743 * region 0 -- virtio legacy io bar
1744 * region 1 -- msi-x bar
1745 * region 4+5 -- virtio modern memory (64bit) bar
1746 *
1747 */
1748 proxy->legacy_io_bar_idx = 0;
1749 proxy->msix_bar_idx = 1;
1750 proxy->modern_io_bar_idx = 2;
1751 proxy->modern_mem_bar_idx = 4;
1752
1753 proxy->common.offset = 0x0;
1754 proxy->common.size = 0x1000;
1755 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
1756
1757 proxy->isr.offset = 0x1000;
1758 proxy->isr.size = 0x1000;
1759 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
1760
1761 proxy->device.offset = 0x2000;
1762 proxy->device.size = 0x1000;
1763 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
1764
1765 proxy->notify.offset = 0x3000;
1766 proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
1767 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1768
1769 proxy->notify_pio.offset = 0x0;
1770 proxy->notify_pio.size = 0x4;
1771 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1772
1773 /* subclasses can enforce modern, so do this unconditionally */
1774 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
1775 /* PCI BAR regions must be powers of 2 */
1776 pow2ceil(proxy->notify.offset + proxy->notify.size));
1777
1778 if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
1779 proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
1780 }
1781
1782 if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) {
1783 error_setg(errp, "device cannot work as neither modern nor legacy mode"
1784 " is enabled");
1785 error_append_hint(errp, "Set either disable-modern or disable-legacy"
1786 " to off\n");
1787 return;
1788 }
1789
1790 if (pcie_port && pci_is_express(pci_dev)) {
1791 int pos;
1792
1793 pos = pcie_endpoint_cap_init(pci_dev, 0);
1794 assert(pos > 0);
1795
1796 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0,
1797 PCI_PM_SIZEOF, errp);
1798 if (pos < 0) {
1799 return;
1800 }
1801
1802 pci_dev->exp.pm_cap = pos;
1803
1804 /*
1805 * Indicates that this function complies with revision 1.2 of the
1806 * PCI Power Management Interface Specification.
1807 */
1808 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
1809
1810 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) {
1811 /* Init error enabling flags */
1812 pcie_cap_deverr_init(pci_dev);
1813 }
1814
1815 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) {
1816 /* Init Link Control Register */
1817 pcie_cap_lnkctl_init(pci_dev);
1818 }
1819
1820 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) {
1821 /* Init Power Management Control Register */
1822 pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL,
1823 PCI_PM_CTRL_STATE_MASK);
1824 }
1825
1826 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
1827 pcie_ats_init(pci_dev, 256);
1828 }
1829
1830 } else {
1831 /*
1832 * make future invocations of pci_is_express() return false
1833 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
1834 */
1835 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
1836 }
1837
1838 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
1839 if (k->realize) {
1840 k->realize(proxy, errp);
1841 }
1842 }
1843
1844 static void virtio_pci_exit(PCIDevice *pci_dev)
1845 {
1846 msix_uninit_exclusive_bar(pci_dev);
1847 }
1848
1849 static void virtio_pci_reset(DeviceState *qdev)
1850 {
1851 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1852 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1853 PCIDevice *dev = PCI_DEVICE(qdev);
1854 int i;
1855
1856 virtio_pci_stop_ioeventfd(proxy);
1857 virtio_bus_reset(bus);
1858 msix_unuse_all_vectors(&proxy->pci_dev);
1859
1860 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1861 proxy->vqs[i].enabled = 0;
1862 proxy->vqs[i].num = 0;
1863 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
1864 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
1865 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
1866 }
1867
1868 if (pci_is_express(dev)) {
1869 pcie_cap_deverr_reset(dev);
1870 pcie_cap_lnkctl_reset(dev);
1871
1872 pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0);
1873 }
1874 }
1875
1876 static Property virtio_pci_properties[] = {
1877 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
1878 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
1879 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
1880 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
1881 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
1882 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
1883 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
1884 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
1885 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
1886 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
1887 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
1888 ignore_backend_features, false),
1889 DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
1890 VIRTIO_PCI_FLAG_ATS_BIT, false),
1891 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags,
1892 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true),
1893 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags,
1894 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true),
1895 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags,
1896 VIRTIO_PCI_FLAG_INIT_PM_BIT, true),
1897 DEFINE_PROP_END_OF_LIST(),
1898 };
1899
1900 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
1901 {
1902 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
1903 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1904 PCIDevice *pci_dev = &proxy->pci_dev;
1905
1906 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
1907 virtio_pci_modern(proxy)) {
1908 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
1909 }
1910
1911 vpciklass->parent_dc_realize(qdev, errp);
1912 }
1913
1914 static void virtio_pci_class_init(ObjectClass *klass, void *data)
1915 {
1916 DeviceClass *dc = DEVICE_CLASS(klass);
1917 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1918 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1919
1920 dc->props = virtio_pci_properties;
1921 k->realize = virtio_pci_realize;
1922 k->exit = virtio_pci_exit;
1923 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1924 k->revision = VIRTIO_PCI_ABI_VERSION;
1925 k->class_id = PCI_CLASS_OTHERS;
1926 device_class_set_parent_realize(dc, virtio_pci_dc_realize,
1927 &vpciklass->parent_dc_realize);
1928 dc->reset = virtio_pci_reset;
1929 }
1930
1931 static const TypeInfo virtio_pci_info = {
1932 .name = TYPE_VIRTIO_PCI,
1933 .parent = TYPE_PCI_DEVICE,
1934 .instance_size = sizeof(VirtIOPCIProxy),
1935 .class_init = virtio_pci_class_init,
1936 .class_size = sizeof(VirtioPCIClass),
1937 .abstract = true,
1938 };
1939
1940 static Property virtio_pci_generic_properties[] = {
1941 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
1942 ON_OFF_AUTO_AUTO),
1943 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
1944 DEFINE_PROP_END_OF_LIST(),
1945 };
1946
1947 static void virtio_pci_base_class_init(ObjectClass *klass, void *data)
1948 {
1949 const VirtioPCIDeviceTypeInfo *t = data;
1950 if (t->class_init) {
1951 t->class_init(klass, NULL);
1952 }
1953 }
1954
1955 static void virtio_pci_generic_class_init(ObjectClass *klass, void *data)
1956 {
1957 DeviceClass *dc = DEVICE_CLASS(klass);
1958
1959 dc->props = virtio_pci_generic_properties;
1960 }
1961
1962 /* Used when the generic type and the base type is the same */
1963 static void virtio_pci_generic_base_class_init(ObjectClass *klass, void *data)
1964 {
1965 virtio_pci_base_class_init(klass, data);
1966 virtio_pci_generic_class_init(klass, NULL);
1967 }
1968
1969 static void virtio_pci_transitional_instance_init(Object *obj)
1970 {
1971 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
1972
1973 proxy->disable_legacy = ON_OFF_AUTO_OFF;
1974 proxy->disable_modern = false;
1975 }
1976
1977 static void virtio_pci_non_transitional_instance_init(Object *obj)
1978 {
1979 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
1980
1981 proxy->disable_legacy = ON_OFF_AUTO_ON;
1982 proxy->disable_modern = false;
1983 }
1984
1985 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
1986 {
1987 TypeInfo base_type_info = {
1988 .name = t->base_name,
1989 .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI,
1990 .instance_size = t->instance_size,
1991 .instance_init = t->instance_init,
1992 .class_init = virtio_pci_base_class_init,
1993 .class_data = (void *)t,
1994 .abstract = true,
1995 };
1996 TypeInfo generic_type_info = {
1997 .name = t->generic_name,
1998 .parent = base_type_info.name,
1999 .class_init = virtio_pci_generic_class_init,
2000 .interfaces = (InterfaceInfo[]) {
2001 { INTERFACE_PCIE_DEVICE },
2002 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2003 { }
2004 },
2005 };
2006
2007 if (!base_type_info.name) {
2008 /* No base type -> register a single generic device type */
2009 base_type_info.name = t->generic_name;
2010 base_type_info.class_init = virtio_pci_generic_base_class_init;
2011 base_type_info.interfaces = generic_type_info.interfaces;
2012 base_type_info.abstract = false;
2013 generic_type_info.name = NULL;
2014 assert(!t->non_transitional_name);
2015 assert(!t->transitional_name);
2016 }
2017
2018 type_register(&base_type_info);
2019 if (generic_type_info.name) {
2020 type_register(&generic_type_info);
2021 }
2022
2023 if (t->non_transitional_name) {
2024 const TypeInfo non_transitional_type_info = {
2025 .name = t->non_transitional_name,
2026 .parent = base_type_info.name,
2027 .instance_init = virtio_pci_non_transitional_instance_init,
2028 .interfaces = (InterfaceInfo[]) {
2029 { INTERFACE_PCIE_DEVICE },
2030 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2031 { }
2032 },
2033 };
2034 type_register(&non_transitional_type_info);
2035 }
2036
2037 if (t->transitional_name) {
2038 const TypeInfo transitional_type_info = {
2039 .name = t->transitional_name,
2040 .parent = base_type_info.name,
2041 .instance_init = virtio_pci_transitional_instance_init,
2042 .interfaces = (InterfaceInfo[]) {
2043 /*
2044 * Transitional virtio devices work only as Conventional PCI
2045 * devices because they require PIO ports.
2046 */
2047 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2048 { }
2049 },
2050 };
2051 type_register(&transitional_type_info);
2052 }
2053 }
2054
2055 /* virtio-blk-pci */
2056
2057 static Property virtio_blk_pci_properties[] = {
2058 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2059 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2060 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2061 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2062 DEV_NVECTORS_UNSPECIFIED),
2063 DEFINE_PROP_END_OF_LIST(),
2064 };
2065
2066 static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2067 {
2068 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
2069 DeviceState *vdev = DEVICE(&dev->vdev);
2070
2071 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2072 vpci_dev->nvectors = dev->vdev.conf.num_queues + 1;
2073 }
2074
2075 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2076 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2077 }
2078
2079 static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
2080 {
2081 DeviceClass *dc = DEVICE_CLASS(klass);
2082 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2083 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2084
2085 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2086 dc->props = virtio_blk_pci_properties;
2087 k->realize = virtio_blk_pci_realize;
2088 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2089 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
2090 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2091 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2092 }
2093
2094 static void virtio_blk_pci_instance_init(Object *obj)
2095 {
2096 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
2097
2098 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2099 TYPE_VIRTIO_BLK);
2100 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2101 "bootindex", &error_abort);
2102 }
2103
2104 static const VirtioPCIDeviceTypeInfo virtio_blk_pci_info = {
2105 .generic_name = TYPE_VIRTIO_BLK_PCI,
2106 .instance_size = sizeof(VirtIOBlkPCI),
2107 .instance_init = virtio_blk_pci_instance_init,
2108 .class_init = virtio_blk_pci_class_init,
2109 };
2110
2111 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2112 /* vhost-user-blk */
2113
2114 static Property vhost_user_blk_pci_properties[] = {
2115 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2116 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2117 DEV_NVECTORS_UNSPECIFIED),
2118 DEFINE_PROP_END_OF_LIST(),
2119 };
2120
2121 static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2122 {
2123 VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(vpci_dev);
2124 DeviceState *vdev = DEVICE(&dev->vdev);
2125
2126 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2127 vpci_dev->nvectors = dev->vdev.num_queues + 1;
2128 }
2129
2130 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2131 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2132 }
2133
2134 static void vhost_user_blk_pci_class_init(ObjectClass *klass, void *data)
2135 {
2136 DeviceClass *dc = DEVICE_CLASS(klass);
2137 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2138 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2139
2140 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2141 dc->props = vhost_user_blk_pci_properties;
2142 k->realize = vhost_user_blk_pci_realize;
2143 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2144 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
2145 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2146 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2147 }
2148
2149 static void vhost_user_blk_pci_instance_init(Object *obj)
2150 {
2151 VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(obj);
2152
2153 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2154 TYPE_VHOST_USER_BLK);
2155 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2156 "bootindex", &error_abort);
2157 }
2158
2159 static const VirtioPCIDeviceTypeInfo vhost_user_blk_pci_info = {
2160 .generic_name = TYPE_VHOST_USER_BLK_PCI,
2161 .instance_size = sizeof(VHostUserBlkPCI),
2162 .instance_init = vhost_user_blk_pci_instance_init,
2163 .class_init = vhost_user_blk_pci_class_init,
2164 };
2165 #endif
2166
2167 /* virtio-scsi-pci */
2168
2169 static Property virtio_scsi_pci_properties[] = {
2170 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2171 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2172 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2173 DEV_NVECTORS_UNSPECIFIED),
2174 DEFINE_PROP_END_OF_LIST(),
2175 };
2176
2177 static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2178 {
2179 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
2180 DeviceState *vdev = DEVICE(&dev->vdev);
2181 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
2182 DeviceState *proxy = DEVICE(vpci_dev);
2183 char *bus_name;
2184
2185 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2186 vpci_dev->nvectors = vs->conf.num_queues + 3;
2187 }
2188
2189 /*
2190 * For command line compatibility, this sets the virtio-scsi-device bus
2191 * name as before.
2192 */
2193 if (proxy->id) {
2194 bus_name = g_strdup_printf("%s.0", proxy->id);
2195 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
2196 g_free(bus_name);
2197 }
2198
2199 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2200 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2201 }
2202
2203 static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
2204 {
2205 DeviceClass *dc = DEVICE_CLASS(klass);
2206 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2207 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2208
2209 k->realize = virtio_scsi_pci_realize;
2210 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2211 dc->props = virtio_scsi_pci_properties;
2212 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2213 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2214 pcidev_k->revision = 0x00;
2215 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2216 }
2217
2218 static void virtio_scsi_pci_instance_init(Object *obj)
2219 {
2220 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
2221
2222 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2223 TYPE_VIRTIO_SCSI);
2224 }
2225
2226 static const VirtioPCIDeviceTypeInfo virtio_scsi_pci_info = {
2227 .generic_name = TYPE_VIRTIO_SCSI_PCI,
2228 .instance_size = sizeof(VirtIOSCSIPCI),
2229 .instance_init = virtio_scsi_pci_instance_init,
2230 .class_init = virtio_scsi_pci_class_init,
2231 };
2232
2233 /* vhost-scsi-pci */
2234
2235 #ifdef CONFIG_VHOST_SCSI
2236 static Property vhost_scsi_pci_properties[] = {
2237 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2238 DEV_NVECTORS_UNSPECIFIED),
2239 DEFINE_PROP_END_OF_LIST(),
2240 };
2241
2242 static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2243 {
2244 VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev);
2245 DeviceState *vdev = DEVICE(&dev->vdev);
2246 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
2247
2248 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2249 vpci_dev->nvectors = vs->conf.num_queues + 3;
2250 }
2251
2252 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2253 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2254 }
2255
2256 static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
2257 {
2258 DeviceClass *dc = DEVICE_CLASS(klass);
2259 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2260 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2261 k->realize = vhost_scsi_pci_realize;
2262 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2263 dc->props = vhost_scsi_pci_properties;
2264 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2265 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2266 pcidev_k->revision = 0x00;
2267 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2268 }
2269
2270 static void vhost_scsi_pci_instance_init(Object *obj)
2271 {
2272 VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj);
2273
2274 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2275 TYPE_VHOST_SCSI);
2276 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2277 "bootindex", &error_abort);
2278 }
2279
2280 static const VirtioPCIDeviceTypeInfo vhost_scsi_pci_info = {
2281 .generic_name = TYPE_VHOST_SCSI_PCI,
2282 .instance_size = sizeof(VHostSCSIPCI),
2283 .instance_init = vhost_scsi_pci_instance_init,
2284 .class_init = vhost_scsi_pci_class_init,
2285 };
2286 #endif
2287
2288 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2289 /* vhost-user-scsi-pci */
2290 static Property vhost_user_scsi_pci_properties[] = {
2291 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2292 DEV_NVECTORS_UNSPECIFIED),
2293 DEFINE_PROP_END_OF_LIST(),
2294 };
2295
2296 static void vhost_user_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2297 {
2298 VHostUserSCSIPCI *dev = VHOST_USER_SCSI_PCI(vpci_dev);
2299 DeviceState *vdev = DEVICE(&dev->vdev);
2300 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
2301
2302 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2303 vpci_dev->nvectors = vs->conf.num_queues + 3;
2304 }
2305
2306 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2307 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2308 }
2309
2310 static void vhost_user_scsi_pci_class_init(ObjectClass *klass, void *data)
2311 {
2312 DeviceClass *dc = DEVICE_CLASS(klass);
2313 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2314 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2315 k->realize = vhost_user_scsi_pci_realize;
2316 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2317 dc->props = vhost_user_scsi_pci_properties;
2318 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2319 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2320 pcidev_k->revision = 0x00;
2321 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2322 }
2323
2324 static void vhost_user_scsi_pci_instance_init(Object *obj)
2325 {
2326 VHostUserSCSIPCI *dev = VHOST_USER_SCSI_PCI(obj);
2327
2328 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2329 TYPE_VHOST_USER_SCSI);
2330 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2331 "bootindex", &error_abort);
2332 }
2333
2334 static const VirtioPCIDeviceTypeInfo vhost_user_scsi_pci_info = {
2335 .generic_name = TYPE_VHOST_USER_SCSI_PCI,
2336 .instance_size = sizeof(VHostUserSCSIPCI),
2337 .instance_init = vhost_user_scsi_pci_instance_init,
2338 .class_init = vhost_user_scsi_pci_class_init,
2339 };
2340 #endif
2341
2342 /* vhost-vsock-pci */
2343
2344 #ifdef CONFIG_VHOST_VSOCK
2345 static Property vhost_vsock_pci_properties[] = {
2346 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
2347 DEFINE_PROP_END_OF_LIST(),
2348 };
2349
2350 static void vhost_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2351 {
2352 VHostVSockPCI *dev = VHOST_VSOCK_PCI(vpci_dev);
2353 DeviceState *vdev = DEVICE(&dev->vdev);
2354
2355 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2356 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2357 }
2358
2359 static void vhost_vsock_pci_class_init(ObjectClass *klass, void *data)
2360 {
2361 DeviceClass *dc = DEVICE_CLASS(klass);
2362 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2363 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2364 k->realize = vhost_vsock_pci_realize;
2365 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2366 dc->props = vhost_vsock_pci_properties;
2367 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2368 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_VSOCK;
2369 pcidev_k->revision = 0x00;
2370 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
2371 }
2372
2373 static void vhost_vsock_pci_instance_init(Object *obj)
2374 {
2375 VHostVSockPCI *dev = VHOST_VSOCK_PCI(obj);
2376
2377 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2378 TYPE_VHOST_VSOCK);
2379 }
2380
2381 static const VirtioPCIDeviceTypeInfo vhost_vsock_pci_info = {
2382 .generic_name = TYPE_VHOST_VSOCK_PCI,
2383 .instance_size = sizeof(VHostVSockPCI),
2384 .instance_init = vhost_vsock_pci_instance_init,
2385 .class_init = vhost_vsock_pci_class_init,
2386 };
2387 #endif
2388
2389 /* virtio-balloon-pci */
2390
2391 static Property virtio_balloon_pci_properties[] = {
2392 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2393 DEFINE_PROP_END_OF_LIST(),
2394 };
2395
2396 static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2397 {
2398 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
2399 DeviceState *vdev = DEVICE(&dev->vdev);
2400
2401 if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
2402 vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
2403 vpci_dev->class_code = PCI_CLASS_OTHERS;
2404 }
2405
2406 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2407 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2408 }
2409
2410 static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
2411 {
2412 DeviceClass *dc = DEVICE_CLASS(klass);
2413 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2414 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2415 k->realize = virtio_balloon_pci_realize;
2416 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2417 dc->props = virtio_balloon_pci_properties;
2418 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2419 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
2420 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2421 pcidev_k->class_id = PCI_CLASS_OTHERS;
2422 }
2423
2424 static void virtio_balloon_pci_instance_init(Object *obj)
2425 {
2426 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
2427
2428 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2429 TYPE_VIRTIO_BALLOON);
2430 object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev),
2431 "guest-stats", &error_abort);
2432 object_property_add_alias(obj, "guest-stats-polling-interval",
2433 OBJECT(&dev->vdev),
2434 "guest-stats-polling-interval", &error_abort);
2435 }
2436
2437 static const VirtioPCIDeviceTypeInfo virtio_balloon_pci_info = {
2438 .generic_name = TYPE_VIRTIO_BALLOON_PCI,
2439 .instance_size = sizeof(VirtIOBalloonPCI),
2440 .instance_init = virtio_balloon_pci_instance_init,
2441 .class_init = virtio_balloon_pci_class_init,
2442 };
2443
2444 /* virtio-serial-pci */
2445
2446 static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2447 {
2448 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
2449 DeviceState *vdev = DEVICE(&dev->vdev);
2450 DeviceState *proxy = DEVICE(vpci_dev);
2451 char *bus_name;
2452
2453 if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
2454 vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
2455 vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */
2456 vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
2457 }
2458
2459 /* backwards-compatibility with machines that were created with
2460 DEV_NVECTORS_UNSPECIFIED */
2461 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2462 vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
2463 }
2464
2465 /*
2466 * For command line compatibility, this sets the virtio-serial-device bus
2467 * name as before.
2468 */
2469 if (proxy->id) {
2470 bus_name = g_strdup_printf("%s.0", proxy->id);
2471 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
2472 g_free(bus_name);
2473 }
2474
2475 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2476 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2477 }
2478
2479 static Property virtio_serial_pci_properties[] = {
2480 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2481 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2482 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2483 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2484 DEFINE_PROP_END_OF_LIST(),
2485 };
2486
2487 static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
2488 {
2489 DeviceClass *dc = DEVICE_CLASS(klass);
2490 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2491 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2492 k->realize = virtio_serial_pci_realize;
2493 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2494 dc->props = virtio_serial_pci_properties;
2495 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2496 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
2497 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2498 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
2499 }
2500
2501 static void virtio_serial_pci_instance_init(Object *obj)
2502 {
2503 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
2504
2505 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2506 TYPE_VIRTIO_SERIAL);
2507 }
2508
2509 static const VirtioPCIDeviceTypeInfo virtio_serial_pci_info = {
2510 .generic_name = TYPE_VIRTIO_SERIAL_PCI,
2511 .instance_size = sizeof(VirtIOSerialPCI),
2512 .instance_init = virtio_serial_pci_instance_init,
2513 .class_init = virtio_serial_pci_class_init,
2514 };
2515
2516 /* virtio-net-pci */
2517
2518 static Property virtio_net_properties[] = {
2519 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2520 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2521 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
2522 DEFINE_PROP_END_OF_LIST(),
2523 };
2524
2525 static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2526 {
2527 DeviceState *qdev = DEVICE(vpci_dev);
2528 VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev);
2529 DeviceState *vdev = DEVICE(&dev->vdev);
2530
2531 virtio_net_set_netclient_name(&dev->vdev, qdev->id,
2532 object_get_typename(OBJECT(qdev)));
2533 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2534 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2535 }
2536
2537 static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
2538 {
2539 DeviceClass *dc = DEVICE_CLASS(klass);
2540 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
2541 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
2542
2543 k->romfile = "efi-virtio.rom";
2544 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2545 k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
2546 k->revision = VIRTIO_PCI_ABI_VERSION;
2547 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
2548 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2549 dc->props = virtio_net_properties;
2550 vpciklass->realize = virtio_net_pci_realize;
2551 }
2552
2553 static void virtio_net_pci_instance_init(Object *obj)
2554 {
2555 VirtIONetPCI *dev = VIRTIO_NET_PCI(obj);
2556
2557 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2558 TYPE_VIRTIO_NET);
2559 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2560 "bootindex", &error_abort);
2561 }
2562
2563 static const VirtioPCIDeviceTypeInfo virtio_net_pci_info = {
2564 .generic_name = TYPE_VIRTIO_NET_PCI,
2565 .instance_size = sizeof(VirtIONetPCI),
2566 .instance_init = virtio_net_pci_instance_init,
2567 .class_init = virtio_net_pci_class_init,
2568 };
2569
2570 /* virtio-rng-pci */
2571
2572 static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2573 {
2574 VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev);
2575 DeviceState *vdev = DEVICE(&vrng->vdev);
2576 Error *err = NULL;
2577
2578 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2579 object_property_set_bool(OBJECT(vdev), true, "realized", &err);
2580 if (err) {
2581 error_propagate(errp, err);
2582 return;
2583 }
2584
2585 object_property_set_link(OBJECT(vrng),
2586 OBJECT(vrng->vdev.conf.rng), "rng",
2587 NULL);
2588 }
2589
2590 static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
2591 {
2592 DeviceClass *dc = DEVICE_CLASS(klass);
2593 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2594 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2595
2596 k->realize = virtio_rng_pci_realize;
2597 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2598
2599 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2600 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
2601 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2602 pcidev_k->class_id = PCI_CLASS_OTHERS;
2603 }
2604
2605 static void virtio_rng_initfn(Object *obj)
2606 {
2607 VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj);
2608
2609 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2610 TYPE_VIRTIO_RNG);
2611 }
2612
2613 static const VirtioPCIDeviceTypeInfo virtio_rng_pci_info = {
2614 .generic_name = TYPE_VIRTIO_RNG_PCI,
2615 .instance_size = sizeof(VirtIORngPCI),
2616 .instance_init = virtio_rng_initfn,
2617 .class_init = virtio_rng_pci_class_init,
2618 };
2619
2620 /* virtio-input-pci */
2621
2622 static Property virtio_input_pci_properties[] = {
2623 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2624 DEFINE_PROP_END_OF_LIST(),
2625 };
2626
2627 static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2628 {
2629 VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev);
2630 DeviceState *vdev = DEVICE(&vinput->vdev);
2631
2632 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2633 virtio_pci_force_virtio_1(vpci_dev);
2634 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2635 }
2636
2637 static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
2638 {
2639 DeviceClass *dc = DEVICE_CLASS(klass);
2640 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2641 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2642
2643 dc->props = virtio_input_pci_properties;
2644 k->realize = virtio_input_pci_realize;
2645 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2646
2647 pcidev_k->class_id = PCI_CLASS_INPUT_OTHER;
2648 }
2649
2650 static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
2651 {
2652 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2653
2654 pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD;
2655 }
2656
2657 static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass,
2658 void *data)
2659 {
2660 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2661
2662 pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE;
2663 }
2664
2665 static void virtio_keyboard_initfn(Object *obj)
2666 {
2667 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2668
2669 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2670 TYPE_VIRTIO_KEYBOARD);
2671 }
2672
2673 static void virtio_mouse_initfn(Object *obj)
2674 {
2675 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2676
2677 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2678 TYPE_VIRTIO_MOUSE);
2679 }
2680
2681 static void virtio_tablet_initfn(Object *obj)
2682 {
2683 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2684
2685 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2686 TYPE_VIRTIO_TABLET);
2687 }
2688
2689 static const TypeInfo virtio_input_pci_info = {
2690 .name = TYPE_VIRTIO_INPUT_PCI,
2691 .parent = TYPE_VIRTIO_PCI,
2692 .instance_size = sizeof(VirtIOInputPCI),
2693 .class_init = virtio_input_pci_class_init,
2694 .abstract = true,
2695 };
2696
2697 static const TypeInfo virtio_input_hid_pci_info = {
2698 .name = TYPE_VIRTIO_INPUT_HID_PCI,
2699 .parent = TYPE_VIRTIO_INPUT_PCI,
2700 .instance_size = sizeof(VirtIOInputHIDPCI),
2701 .abstract = true,
2702 };
2703
2704 static const VirtioPCIDeviceTypeInfo virtio_keyboard_pci_info = {
2705 .generic_name = TYPE_VIRTIO_KEYBOARD_PCI,
2706 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2707 .class_init = virtio_input_hid_kbd_pci_class_init,
2708 .instance_size = sizeof(VirtIOInputHIDPCI),
2709 .instance_init = virtio_keyboard_initfn,
2710 };
2711
2712 static const VirtioPCIDeviceTypeInfo virtio_mouse_pci_info = {
2713 .generic_name = TYPE_VIRTIO_MOUSE_PCI,
2714 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2715 .class_init = virtio_input_hid_mouse_pci_class_init,
2716 .instance_size = sizeof(VirtIOInputHIDPCI),
2717 .instance_init = virtio_mouse_initfn,
2718 };
2719
2720 static const VirtioPCIDeviceTypeInfo virtio_tablet_pci_info = {
2721 .generic_name = TYPE_VIRTIO_TABLET_PCI,
2722 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2723 .instance_size = sizeof(VirtIOInputHIDPCI),
2724 .instance_init = virtio_tablet_initfn,
2725 };
2726
2727 #ifdef CONFIG_LINUX
2728 static void virtio_host_initfn(Object *obj)
2729 {
2730 VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj);
2731
2732 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2733 TYPE_VIRTIO_INPUT_HOST);
2734 }
2735
2736 static const VirtioPCIDeviceTypeInfo virtio_host_pci_info = {
2737 .generic_name = TYPE_VIRTIO_INPUT_HOST_PCI,
2738 .parent = TYPE_VIRTIO_INPUT_PCI,
2739 .instance_size = sizeof(VirtIOInputHostPCI),
2740 .instance_init = virtio_host_initfn,
2741 };
2742 #endif
2743
2744 /* virtio-pci-bus */
2745
2746 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
2747 VirtIOPCIProxy *dev)
2748 {
2749 DeviceState *qdev = DEVICE(dev);
2750 char virtio_bus_name[] = "virtio-bus";
2751
2752 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
2753 virtio_bus_name);
2754 }
2755
2756 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
2757 {
2758 BusClass *bus_class = BUS_CLASS(klass);
2759 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
2760 bus_class->max_dev = 1;
2761 k->notify = virtio_pci_notify;
2762 k->save_config = virtio_pci_save_config;
2763 k->load_config = virtio_pci_load_config;
2764 k->save_queue = virtio_pci_save_queue;
2765 k->load_queue = virtio_pci_load_queue;
2766 k->save_extra_state = virtio_pci_save_extra_state;
2767 k->load_extra_state = virtio_pci_load_extra_state;
2768 k->has_extra_state = virtio_pci_has_extra_state;
2769 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
2770 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
2771 k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr;
2772 k->vmstate_change = virtio_pci_vmstate_change;
2773 k->pre_plugged = virtio_pci_pre_plugged;
2774 k->device_plugged = virtio_pci_device_plugged;
2775 k->device_unplugged = virtio_pci_device_unplugged;
2776 k->query_nvectors = virtio_pci_query_nvectors;
2777 k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
2778 k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
2779 k->get_dma_as = virtio_pci_get_dma_as;
2780 }
2781
2782 static const TypeInfo virtio_pci_bus_info = {
2783 .name = TYPE_VIRTIO_PCI_BUS,
2784 .parent = TYPE_VIRTIO_BUS,
2785 .instance_size = sizeof(VirtioPCIBusState),
2786 .class_init = virtio_pci_bus_class_init,
2787 };
2788
2789 static void virtio_pci_register_types(void)
2790 {
2791 /* Base types: */
2792 type_register_static(&virtio_pci_bus_info);
2793 type_register_static(&virtio_pci_info);
2794 type_register_static(&virtio_input_pci_info);
2795 type_register_static(&virtio_input_hid_pci_info);
2796
2797 /* Implementations: */
2798 virtio_pci_types_register(&virtio_rng_pci_info);
2799 virtio_pci_types_register(&virtio_keyboard_pci_info);
2800 virtio_pci_types_register(&virtio_mouse_pci_info);
2801 virtio_pci_types_register(&virtio_tablet_pci_info);
2802 #ifdef CONFIG_LINUX
2803 virtio_pci_types_register(&virtio_host_pci_info);
2804 #endif
2805 #ifdef CONFIG_VIRTFS
2806 virtio_pci_types_register(&virtio_9p_pci_info);
2807 #endif
2808 virtio_pci_types_register(&virtio_blk_pci_info);
2809 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2810 virtio_pci_types_register(&vhost_user_blk_pci_info);
2811 #endif
2812 virtio_pci_types_register(&virtio_scsi_pci_info);
2813 virtio_pci_types_register(&virtio_balloon_pci_info);
2814 virtio_pci_types_register(&virtio_serial_pci_info);
2815 virtio_pci_types_register(&virtio_net_pci_info);
2816 #ifdef CONFIG_VHOST_SCSI
2817 virtio_pci_types_register(&vhost_scsi_pci_info);
2818 #endif
2819 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2820 virtio_pci_types_register(&vhost_user_scsi_pci_info);
2821 #endif
2822 #ifdef CONFIG_VHOST_VSOCK
2823 virtio_pci_types_register(&vhost_vsock_pci_info);
2824 #endif
2825 }
2826
2827 type_init(virtio_pci_register_types)