]> git.proxmox.com Git - mirror_qemu.git/blob - hw/virtio/virtio-pci.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[mirror_qemu.git] / hw / virtio / virtio-pci.c
1 /*
2 * Virtio PCI Bindings
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include "qemu/osdep.h"
19
20 #include "standard-headers/linux/virtio_pci.h"
21 #include "hw/virtio/virtio.h"
22 #include "hw/virtio/virtio-blk.h"
23 #include "hw/virtio/virtio-net.h"
24 #include "hw/virtio/virtio-serial.h"
25 #include "hw/virtio/virtio-scsi.h"
26 #include "hw/virtio/virtio-balloon.h"
27 #include "hw/virtio/virtio-input.h"
28 #include "hw/pci/pci.h"
29 #include "qapi/error.h"
30 #include "qemu/error-report.h"
31 #include "hw/pci/msi.h"
32 #include "hw/pci/msix.h"
33 #include "hw/loader.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/block-backend.h"
36 #include "virtio-pci.h"
37 #include "qemu/range.h"
38 #include "hw/virtio/virtio-bus.h"
39 #include "qapi/visitor.h"
40
41 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
42
43 #undef VIRTIO_PCI_CONFIG
44
45 /* The remaining space is defined by each driver as the per-driver
46 * configuration space */
47 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
48
49 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
50 VirtIOPCIProxy *dev);
51 static void virtio_pci_reset(DeviceState *qdev);
52
53 /* virtio device */
54 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
55 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
56 {
57 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
58 }
59
60 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
61 * be careful and test performance if you change this.
62 */
63 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
64 {
65 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
66 }
67
68 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
69 {
70 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
71
72 if (msix_enabled(&proxy->pci_dev))
73 msix_notify(&proxy->pci_dev, vector);
74 else {
75 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
76 pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1);
77 }
78 }
79
80 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
81 {
82 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
83 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
84
85 pci_device_save(&proxy->pci_dev, f);
86 msix_save(&proxy->pci_dev, f);
87 if (msix_present(&proxy->pci_dev))
88 qemu_put_be16(f, vdev->config_vector);
89 }
90
91 static const VMStateDescription vmstate_virtio_pci_modern_queue_state = {
92 .name = "virtio_pci/modern_queue_state",
93 .version_id = 1,
94 .minimum_version_id = 1,
95 .fields = (VMStateField[]) {
96 VMSTATE_UINT16(num, VirtIOPCIQueue),
97 VMSTATE_UNUSED(1), /* enabled was stored as be16 */
98 VMSTATE_BOOL(enabled, VirtIOPCIQueue),
99 VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2),
100 VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2),
101 VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2),
102 VMSTATE_END_OF_LIST()
103 }
104 };
105
106 static bool virtio_pci_modern_state_needed(void *opaque)
107 {
108 VirtIOPCIProxy *proxy = opaque;
109
110 return virtio_pci_modern(proxy);
111 }
112
113 static const VMStateDescription vmstate_virtio_pci_modern_state_sub = {
114 .name = "virtio_pci/modern_state",
115 .version_id = 1,
116 .minimum_version_id = 1,
117 .needed = &virtio_pci_modern_state_needed,
118 .fields = (VMStateField[]) {
119 VMSTATE_UINT32(dfselect, VirtIOPCIProxy),
120 VMSTATE_UINT32(gfselect, VirtIOPCIProxy),
121 VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2),
122 VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0,
123 vmstate_virtio_pci_modern_queue_state,
124 VirtIOPCIQueue),
125 VMSTATE_END_OF_LIST()
126 }
127 };
128
129 static const VMStateDescription vmstate_virtio_pci = {
130 .name = "virtio_pci",
131 .version_id = 1,
132 .minimum_version_id = 1,
133 .minimum_version_id_old = 1,
134 .fields = (VMStateField[]) {
135 VMSTATE_END_OF_LIST()
136 },
137 .subsections = (const VMStateDescription*[]) {
138 &vmstate_virtio_pci_modern_state_sub,
139 NULL
140 }
141 };
142
143 static bool virtio_pci_has_extra_state(DeviceState *d)
144 {
145 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
146
147 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
148 }
149
150 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
151 {
152 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
153
154 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
155 }
156
157 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
158 {
159 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
160
161 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
162 }
163
164 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
165 {
166 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
167 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
168
169 if (msix_present(&proxy->pci_dev))
170 qemu_put_be16(f, virtio_queue_vector(vdev, n));
171 }
172
173 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
174 {
175 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
176 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
177
178 int ret;
179 ret = pci_device_load(&proxy->pci_dev, f);
180 if (ret) {
181 return ret;
182 }
183 msix_unuse_all_vectors(&proxy->pci_dev);
184 msix_load(&proxy->pci_dev, f);
185 if (msix_present(&proxy->pci_dev)) {
186 qemu_get_be16s(f, &vdev->config_vector);
187 } else {
188 vdev->config_vector = VIRTIO_NO_VECTOR;
189 }
190 if (vdev->config_vector != VIRTIO_NO_VECTOR) {
191 return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
192 }
193 return 0;
194 }
195
196 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
197 {
198 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
199 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
200
201 uint16_t vector;
202 if (msix_present(&proxy->pci_dev)) {
203 qemu_get_be16s(f, &vector);
204 } else {
205 vector = VIRTIO_NO_VECTOR;
206 }
207 virtio_queue_set_vector(vdev, n, vector);
208 if (vector != VIRTIO_NO_VECTOR) {
209 return msix_vector_use(&proxy->pci_dev, vector);
210 }
211
212 return 0;
213 }
214
215 static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
216 {
217 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
218
219 return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
220 }
221
222 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
223
224 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy)
225 {
226 return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ?
227 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4;
228 }
229
230 static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
231 int n, bool assign)
232 {
233 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
234 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
235 VirtQueue *vq = virtio_get_queue(vdev, n);
236 bool legacy = virtio_pci_legacy(proxy);
237 bool modern = virtio_pci_modern(proxy);
238 bool fast_mmio = kvm_ioeventfd_any_length_enabled();
239 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
240 MemoryRegion *modern_mr = &proxy->notify.mr;
241 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
242 MemoryRegion *legacy_mr = &proxy->bar;
243 hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) *
244 virtio_get_queue_index(vq);
245 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
246
247 if (assign) {
248 if (modern) {
249 if (fast_mmio) {
250 memory_region_add_eventfd(modern_mr, modern_addr, 0,
251 false, n, notifier);
252 } else {
253 memory_region_add_eventfd(modern_mr, modern_addr, 2,
254 false, n, notifier);
255 }
256 if (modern_pio) {
257 memory_region_add_eventfd(modern_notify_mr, 0, 2,
258 true, n, notifier);
259 }
260 }
261 if (legacy) {
262 memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
263 true, n, notifier);
264 }
265 } else {
266 if (modern) {
267 if (fast_mmio) {
268 memory_region_del_eventfd(modern_mr, modern_addr, 0,
269 false, n, notifier);
270 } else {
271 memory_region_del_eventfd(modern_mr, modern_addr, 2,
272 false, n, notifier);
273 }
274 if (modern_pio) {
275 memory_region_del_eventfd(modern_notify_mr, 0, 2,
276 true, n, notifier);
277 }
278 }
279 if (legacy) {
280 memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
281 true, n, notifier);
282 }
283 }
284 return 0;
285 }
286
287 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
288 {
289 virtio_bus_start_ioeventfd(&proxy->bus);
290 }
291
292 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
293 {
294 virtio_bus_stop_ioeventfd(&proxy->bus);
295 }
296
297 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
298 {
299 VirtIOPCIProxy *proxy = opaque;
300 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
301 hwaddr pa;
302
303 switch (addr) {
304 case VIRTIO_PCI_GUEST_FEATURES:
305 /* Guest does not negotiate properly? We have to assume nothing. */
306 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
307 val = virtio_bus_get_vdev_bad_features(&proxy->bus);
308 }
309 virtio_set_features(vdev, val);
310 break;
311 case VIRTIO_PCI_QUEUE_PFN:
312 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
313 if (pa == 0) {
314 virtio_pci_reset(DEVICE(proxy));
315 }
316 else
317 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
318 break;
319 case VIRTIO_PCI_QUEUE_SEL:
320 if (val < VIRTIO_QUEUE_MAX)
321 vdev->queue_sel = val;
322 break;
323 case VIRTIO_PCI_QUEUE_NOTIFY:
324 if (val < VIRTIO_QUEUE_MAX) {
325 virtio_queue_notify(vdev, val);
326 }
327 break;
328 case VIRTIO_PCI_STATUS:
329 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
330 virtio_pci_stop_ioeventfd(proxy);
331 }
332
333 virtio_set_status(vdev, val & 0xFF);
334
335 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
336 virtio_pci_start_ioeventfd(proxy);
337 }
338
339 if (vdev->status == 0) {
340 virtio_pci_reset(DEVICE(proxy));
341 }
342
343 /* Linux before 2.6.34 drives the device without enabling
344 the PCI device bus master bit. Enable it automatically
345 for the guest. This is a PCI spec violation but so is
346 initiating DMA with bus master bit clear. */
347 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
348 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
349 proxy->pci_dev.config[PCI_COMMAND] |
350 PCI_COMMAND_MASTER, 1);
351 }
352 break;
353 case VIRTIO_MSI_CONFIG_VECTOR:
354 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
355 /* Make it possible for guest to discover an error took place. */
356 if (msix_vector_use(&proxy->pci_dev, val) < 0)
357 val = VIRTIO_NO_VECTOR;
358 vdev->config_vector = val;
359 break;
360 case VIRTIO_MSI_QUEUE_VECTOR:
361 msix_vector_unuse(&proxy->pci_dev,
362 virtio_queue_vector(vdev, vdev->queue_sel));
363 /* Make it possible for guest to discover an error took place. */
364 if (msix_vector_use(&proxy->pci_dev, val) < 0)
365 val = VIRTIO_NO_VECTOR;
366 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
367 break;
368 default:
369 error_report("%s: unexpected address 0x%x value 0x%x",
370 __func__, addr, val);
371 break;
372 }
373 }
374
375 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
376 {
377 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
378 uint32_t ret = 0xFFFFFFFF;
379
380 switch (addr) {
381 case VIRTIO_PCI_HOST_FEATURES:
382 ret = vdev->host_features;
383 break;
384 case VIRTIO_PCI_GUEST_FEATURES:
385 ret = vdev->guest_features;
386 break;
387 case VIRTIO_PCI_QUEUE_PFN:
388 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
389 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
390 break;
391 case VIRTIO_PCI_QUEUE_NUM:
392 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
393 break;
394 case VIRTIO_PCI_QUEUE_SEL:
395 ret = vdev->queue_sel;
396 break;
397 case VIRTIO_PCI_STATUS:
398 ret = vdev->status;
399 break;
400 case VIRTIO_PCI_ISR:
401 /* reading from the ISR also clears it. */
402 ret = atomic_xchg(&vdev->isr, 0);
403 pci_irq_deassert(&proxy->pci_dev);
404 break;
405 case VIRTIO_MSI_CONFIG_VECTOR:
406 ret = vdev->config_vector;
407 break;
408 case VIRTIO_MSI_QUEUE_VECTOR:
409 ret = virtio_queue_vector(vdev, vdev->queue_sel);
410 break;
411 default:
412 break;
413 }
414
415 return ret;
416 }
417
418 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
419 unsigned size)
420 {
421 VirtIOPCIProxy *proxy = opaque;
422 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
423 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
424 uint64_t val = 0;
425 if (addr < config) {
426 return virtio_ioport_read(proxy, addr);
427 }
428 addr -= config;
429
430 switch (size) {
431 case 1:
432 val = virtio_config_readb(vdev, addr);
433 break;
434 case 2:
435 val = virtio_config_readw(vdev, addr);
436 if (virtio_is_big_endian(vdev)) {
437 val = bswap16(val);
438 }
439 break;
440 case 4:
441 val = virtio_config_readl(vdev, addr);
442 if (virtio_is_big_endian(vdev)) {
443 val = bswap32(val);
444 }
445 break;
446 }
447 return val;
448 }
449
450 static void virtio_pci_config_write(void *opaque, hwaddr addr,
451 uint64_t val, unsigned size)
452 {
453 VirtIOPCIProxy *proxy = opaque;
454 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
455 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
456 if (addr < config) {
457 virtio_ioport_write(proxy, addr, val);
458 return;
459 }
460 addr -= config;
461 /*
462 * Virtio-PCI is odd. Ioports are LE but config space is target native
463 * endian.
464 */
465 switch (size) {
466 case 1:
467 virtio_config_writeb(vdev, addr, val);
468 break;
469 case 2:
470 if (virtio_is_big_endian(vdev)) {
471 val = bswap16(val);
472 }
473 virtio_config_writew(vdev, addr, val);
474 break;
475 case 4:
476 if (virtio_is_big_endian(vdev)) {
477 val = bswap32(val);
478 }
479 virtio_config_writel(vdev, addr, val);
480 break;
481 }
482 }
483
484 static const MemoryRegionOps virtio_pci_config_ops = {
485 .read = virtio_pci_config_read,
486 .write = virtio_pci_config_write,
487 .impl = {
488 .min_access_size = 1,
489 .max_access_size = 4,
490 },
491 .endianness = DEVICE_LITTLE_ENDIAN,
492 };
493
494 static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy,
495 hwaddr *off, int len)
496 {
497 int i;
498 VirtIOPCIRegion *reg;
499
500 for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) {
501 reg = &proxy->regs[i];
502 if (*off >= reg->offset &&
503 *off + len <= reg->offset + reg->size) {
504 *off -= reg->offset;
505 return &reg->mr;
506 }
507 }
508
509 return NULL;
510 }
511
512 /* Below are generic functions to do memcpy from/to an address space,
513 * without byteswaps, with input validation.
514 *
515 * As regular address_space_* APIs all do some kind of byteswap at least for
516 * some host/target combinations, we are forced to explicitly convert to a
517 * known-endianness integer value.
518 * It doesn't really matter which endian format to go through, so the code
519 * below selects the endian that causes the least amount of work on the given
520 * host.
521 *
522 * Note: host pointer must be aligned.
523 */
524 static
525 void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
526 const uint8_t *buf, int len)
527 {
528 uint64_t val;
529 MemoryRegion *mr;
530
531 /* address_space_* APIs assume an aligned address.
532 * As address is under guest control, handle illegal values.
533 */
534 addr &= ~(len - 1);
535
536 mr = virtio_address_space_lookup(proxy, &addr, len);
537 if (!mr) {
538 return;
539 }
540
541 /* Make sure caller aligned buf properly */
542 assert(!(((uintptr_t)buf) & (len - 1)));
543
544 switch (len) {
545 case 1:
546 val = pci_get_byte(buf);
547 break;
548 case 2:
549 val = cpu_to_le16(pci_get_word(buf));
550 break;
551 case 4:
552 val = cpu_to_le32(pci_get_long(buf));
553 break;
554 default:
555 /* As length is under guest control, handle illegal values. */
556 return;
557 }
558 memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED);
559 }
560
561 static void
562 virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
563 uint8_t *buf, int len)
564 {
565 uint64_t val;
566 MemoryRegion *mr;
567
568 /* address_space_* APIs assume an aligned address.
569 * As address is under guest control, handle illegal values.
570 */
571 addr &= ~(len - 1);
572
573 mr = virtio_address_space_lookup(proxy, &addr, len);
574 if (!mr) {
575 return;
576 }
577
578 /* Make sure caller aligned buf properly */
579 assert(!(((uintptr_t)buf) & (len - 1)));
580
581 memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED);
582 switch (len) {
583 case 1:
584 pci_set_byte(buf, val);
585 break;
586 case 2:
587 pci_set_word(buf, le16_to_cpu(val));
588 break;
589 case 4:
590 pci_set_long(buf, le32_to_cpu(val));
591 break;
592 default:
593 /* As length is under guest control, handle illegal values. */
594 break;
595 }
596 }
597
598 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
599 uint32_t val, int len)
600 {
601 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
602 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
603 struct virtio_pci_cfg_cap *cfg;
604
605 pci_default_write_config(pci_dev, address, val, len);
606
607 if (range_covers_byte(address, len, PCI_COMMAND) &&
608 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
609 virtio_pci_stop_ioeventfd(proxy);
610 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
611 }
612
613 if (proxy->config_cap &&
614 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
615 pci_cfg_data),
616 sizeof cfg->pci_cfg_data)) {
617 uint32_t off;
618 uint32_t len;
619
620 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
621 off = le32_to_cpu(cfg->cap.offset);
622 len = le32_to_cpu(cfg->cap.length);
623
624 if (len == 1 || len == 2 || len == 4) {
625 assert(len <= sizeof cfg->pci_cfg_data);
626 virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len);
627 }
628 }
629 }
630
631 static uint32_t virtio_read_config(PCIDevice *pci_dev,
632 uint32_t address, int len)
633 {
634 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
635 struct virtio_pci_cfg_cap *cfg;
636
637 if (proxy->config_cap &&
638 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
639 pci_cfg_data),
640 sizeof cfg->pci_cfg_data)) {
641 uint32_t off;
642 uint32_t len;
643
644 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
645 off = le32_to_cpu(cfg->cap.offset);
646 len = le32_to_cpu(cfg->cap.length);
647
648 if (len == 1 || len == 2 || len == 4) {
649 assert(len <= sizeof cfg->pci_cfg_data);
650 virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len);
651 }
652 }
653
654 return pci_default_read_config(pci_dev, address, len);
655 }
656
657 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
658 unsigned int queue_no,
659 unsigned int vector)
660 {
661 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
662 int ret;
663
664 if (irqfd->users == 0) {
665 ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev);
666 if (ret < 0) {
667 return ret;
668 }
669 irqfd->virq = ret;
670 }
671 irqfd->users++;
672 return 0;
673 }
674
675 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
676 unsigned int vector)
677 {
678 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
679 if (--irqfd->users == 0) {
680 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
681 }
682 }
683
684 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
685 unsigned int queue_no,
686 unsigned int vector)
687 {
688 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
689 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
690 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
691 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
692 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
693 }
694
695 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
696 unsigned int queue_no,
697 unsigned int vector)
698 {
699 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
700 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
701 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
702 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
703 int ret;
704
705 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
706 assert(ret == 0);
707 }
708
709 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
710 {
711 PCIDevice *dev = &proxy->pci_dev;
712 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
713 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
714 unsigned int vector;
715 int ret, queue_no;
716
717 for (queue_no = 0; queue_no < nvqs; queue_no++) {
718 if (!virtio_queue_get_num(vdev, queue_no)) {
719 break;
720 }
721 vector = virtio_queue_vector(vdev, queue_no);
722 if (vector >= msix_nr_vectors_allocated(dev)) {
723 continue;
724 }
725 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
726 if (ret < 0) {
727 goto undo;
728 }
729 /* If guest supports masking, set up irqfd now.
730 * Otherwise, delay until unmasked in the frontend.
731 */
732 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
733 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
734 if (ret < 0) {
735 kvm_virtio_pci_vq_vector_release(proxy, vector);
736 goto undo;
737 }
738 }
739 }
740 return 0;
741
742 undo:
743 while (--queue_no >= 0) {
744 vector = virtio_queue_vector(vdev, queue_no);
745 if (vector >= msix_nr_vectors_allocated(dev)) {
746 continue;
747 }
748 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
749 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
750 }
751 kvm_virtio_pci_vq_vector_release(proxy, vector);
752 }
753 return ret;
754 }
755
756 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
757 {
758 PCIDevice *dev = &proxy->pci_dev;
759 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
760 unsigned int vector;
761 int queue_no;
762 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
763
764 for (queue_no = 0; queue_no < nvqs; queue_no++) {
765 if (!virtio_queue_get_num(vdev, queue_no)) {
766 break;
767 }
768 vector = virtio_queue_vector(vdev, queue_no);
769 if (vector >= msix_nr_vectors_allocated(dev)) {
770 continue;
771 }
772 /* If guest supports masking, clean up irqfd now.
773 * Otherwise, it was cleaned when masked in the frontend.
774 */
775 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
776 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
777 }
778 kvm_virtio_pci_vq_vector_release(proxy, vector);
779 }
780 }
781
782 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
783 unsigned int queue_no,
784 unsigned int vector,
785 MSIMessage msg)
786 {
787 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
788 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
789 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
790 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
791 VirtIOIRQFD *irqfd;
792 int ret = 0;
793
794 if (proxy->vector_irqfd) {
795 irqfd = &proxy->vector_irqfd[vector];
796 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
797 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
798 &proxy->pci_dev);
799 if (ret < 0) {
800 return ret;
801 }
802 kvm_irqchip_commit_routes(kvm_state);
803 }
804 }
805
806 /* If guest supports masking, irqfd is already setup, unmask it.
807 * Otherwise, set it up now.
808 */
809 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
810 k->guest_notifier_mask(vdev, queue_no, false);
811 /* Test after unmasking to avoid losing events. */
812 if (k->guest_notifier_pending &&
813 k->guest_notifier_pending(vdev, queue_no)) {
814 event_notifier_set(n);
815 }
816 } else {
817 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
818 }
819 return ret;
820 }
821
822 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
823 unsigned int queue_no,
824 unsigned int vector)
825 {
826 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
827 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
828
829 /* If guest supports masking, keep irqfd but mask it.
830 * Otherwise, clean it up now.
831 */
832 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
833 k->guest_notifier_mask(vdev, queue_no, true);
834 } else {
835 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
836 }
837 }
838
839 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
840 MSIMessage msg)
841 {
842 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
843 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
844 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
845 int ret, index, unmasked = 0;
846
847 while (vq) {
848 index = virtio_get_queue_index(vq);
849 if (!virtio_queue_get_num(vdev, index)) {
850 break;
851 }
852 if (index < proxy->nvqs_with_notifiers) {
853 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
854 if (ret < 0) {
855 goto undo;
856 }
857 ++unmasked;
858 }
859 vq = virtio_vector_next_queue(vq);
860 }
861
862 return 0;
863
864 undo:
865 vq = virtio_vector_first_queue(vdev, vector);
866 while (vq && unmasked >= 0) {
867 index = virtio_get_queue_index(vq);
868 if (index < proxy->nvqs_with_notifiers) {
869 virtio_pci_vq_vector_mask(proxy, index, vector);
870 --unmasked;
871 }
872 vq = virtio_vector_next_queue(vq);
873 }
874 return ret;
875 }
876
877 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
878 {
879 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
880 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
881 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
882 int index;
883
884 while (vq) {
885 index = virtio_get_queue_index(vq);
886 if (!virtio_queue_get_num(vdev, index)) {
887 break;
888 }
889 if (index < proxy->nvqs_with_notifiers) {
890 virtio_pci_vq_vector_mask(proxy, index, vector);
891 }
892 vq = virtio_vector_next_queue(vq);
893 }
894 }
895
896 static void virtio_pci_vector_poll(PCIDevice *dev,
897 unsigned int vector_start,
898 unsigned int vector_end)
899 {
900 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
901 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
902 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
903 int queue_no;
904 unsigned int vector;
905 EventNotifier *notifier;
906 VirtQueue *vq;
907
908 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
909 if (!virtio_queue_get_num(vdev, queue_no)) {
910 break;
911 }
912 vector = virtio_queue_vector(vdev, queue_no);
913 if (vector < vector_start || vector >= vector_end ||
914 !msix_is_masked(dev, vector)) {
915 continue;
916 }
917 vq = virtio_get_queue(vdev, queue_no);
918 notifier = virtio_queue_get_guest_notifier(vq);
919 if (k->guest_notifier_pending) {
920 if (k->guest_notifier_pending(vdev, queue_no)) {
921 msix_set_pending(dev, vector);
922 }
923 } else if (event_notifier_test_and_clear(notifier)) {
924 msix_set_pending(dev, vector);
925 }
926 }
927 }
928
929 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
930 bool with_irqfd)
931 {
932 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
933 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
934 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
935 VirtQueue *vq = virtio_get_queue(vdev, n);
936 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
937
938 if (assign) {
939 int r = event_notifier_init(notifier, 0);
940 if (r < 0) {
941 return r;
942 }
943 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
944 } else {
945 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
946 event_notifier_cleanup(notifier);
947 }
948
949 if (!msix_enabled(&proxy->pci_dev) &&
950 vdev->use_guest_notifier_mask &&
951 vdc->guest_notifier_mask) {
952 vdc->guest_notifier_mask(vdev, n, !assign);
953 }
954
955 return 0;
956 }
957
958 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
959 {
960 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
961 return msix_enabled(&proxy->pci_dev);
962 }
963
964 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
965 {
966 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
967 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
968 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
969 int r, n;
970 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
971 kvm_msi_via_irqfd_enabled();
972
973 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
974
975 /* When deassigning, pass a consistent nvqs value
976 * to avoid leaking notifiers.
977 */
978 assert(assign || nvqs == proxy->nvqs_with_notifiers);
979
980 proxy->nvqs_with_notifiers = nvqs;
981
982 /* Must unset vector notifier while guest notifier is still assigned */
983 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
984 msix_unset_vector_notifiers(&proxy->pci_dev);
985 if (proxy->vector_irqfd) {
986 kvm_virtio_pci_vector_release(proxy, nvqs);
987 g_free(proxy->vector_irqfd);
988 proxy->vector_irqfd = NULL;
989 }
990 }
991
992 for (n = 0; n < nvqs; n++) {
993 if (!virtio_queue_get_num(vdev, n)) {
994 break;
995 }
996
997 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
998 if (r < 0) {
999 goto assign_error;
1000 }
1001 }
1002
1003 /* Must set vector notifier after guest notifier has been assigned */
1004 if ((with_irqfd || k->guest_notifier_mask) && assign) {
1005 if (with_irqfd) {
1006 proxy->vector_irqfd =
1007 g_malloc0(sizeof(*proxy->vector_irqfd) *
1008 msix_nr_vectors_allocated(&proxy->pci_dev));
1009 r = kvm_virtio_pci_vector_use(proxy, nvqs);
1010 if (r < 0) {
1011 goto assign_error;
1012 }
1013 }
1014 r = msix_set_vector_notifiers(&proxy->pci_dev,
1015 virtio_pci_vector_unmask,
1016 virtio_pci_vector_mask,
1017 virtio_pci_vector_poll);
1018 if (r < 0) {
1019 goto notifiers_error;
1020 }
1021 }
1022
1023 return 0;
1024
1025 notifiers_error:
1026 if (with_irqfd) {
1027 assert(assign);
1028 kvm_virtio_pci_vector_release(proxy, nvqs);
1029 }
1030
1031 assign_error:
1032 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1033 assert(assign);
1034 while (--n >= 0) {
1035 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
1036 }
1037 return r;
1038 }
1039
1040 static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n,
1041 MemoryRegion *mr, bool assign)
1042 {
1043 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1044 int offset;
1045
1046 if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) ||
1047 virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) {
1048 return -1;
1049 }
1050
1051 if (assign) {
1052 offset = virtio_pci_queue_mem_mult(proxy) * n;
1053 memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1);
1054 } else {
1055 memory_region_del_subregion(&proxy->notify.mr, mr);
1056 }
1057
1058 return 0;
1059 }
1060
1061 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
1062 {
1063 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1064 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1065
1066 if (running) {
1067 /* Old QEMU versions did not set bus master enable on status write.
1068 * Detect DRIVER set and enable it.
1069 */
1070 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
1071 (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
1072 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1073 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
1074 proxy->pci_dev.config[PCI_COMMAND] |
1075 PCI_COMMAND_MASTER, 1);
1076 }
1077 virtio_pci_start_ioeventfd(proxy);
1078 } else {
1079 virtio_pci_stop_ioeventfd(proxy);
1080 }
1081 }
1082
1083 #ifdef CONFIG_VIRTFS
1084 static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1085 {
1086 V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev);
1087 DeviceState *vdev = DEVICE(&dev->vdev);
1088
1089 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1090 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1091 }
1092
1093 static Property virtio_9p_pci_properties[] = {
1094 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1095 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1096 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1097 DEFINE_PROP_END_OF_LIST(),
1098 };
1099
1100 static void virtio_9p_pci_class_init(ObjectClass *klass, void *data)
1101 {
1102 DeviceClass *dc = DEVICE_CLASS(klass);
1103 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1104 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1105
1106 k->realize = virtio_9p_pci_realize;
1107 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1108 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
1109 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1110 pcidev_k->class_id = 0x2;
1111 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1112 dc->props = virtio_9p_pci_properties;
1113 }
1114
1115 static void virtio_9p_pci_instance_init(Object *obj)
1116 {
1117 V9fsPCIState *dev = VIRTIO_9P_PCI(obj);
1118
1119 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
1120 TYPE_VIRTIO_9P);
1121 }
1122
1123 static const TypeInfo virtio_9p_pci_info = {
1124 .name = TYPE_VIRTIO_9P_PCI,
1125 .parent = TYPE_VIRTIO_PCI,
1126 .instance_size = sizeof(V9fsPCIState),
1127 .instance_init = virtio_9p_pci_instance_init,
1128 .class_init = virtio_9p_pci_class_init,
1129 };
1130 #endif /* CONFIG_VIRTFS */
1131
1132 /*
1133 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1134 */
1135
1136 static int virtio_pci_query_nvectors(DeviceState *d)
1137 {
1138 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1139
1140 return proxy->nvectors;
1141 }
1142
1143 static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
1144 {
1145 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1146 PCIDevice *dev = &proxy->pci_dev;
1147
1148 return pci_get_address_space(dev);
1149 }
1150
1151 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
1152 struct virtio_pci_cap *cap)
1153 {
1154 PCIDevice *dev = &proxy->pci_dev;
1155 int offset;
1156
1157 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0,
1158 cap->cap_len, &error_abort);
1159
1160 assert(cap->cap_len >= sizeof *cap);
1161 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
1162 cap->cap_len - PCI_CAP_FLAGS);
1163
1164 return offset;
1165 }
1166
1167 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
1168 unsigned size)
1169 {
1170 VirtIOPCIProxy *proxy = opaque;
1171 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1172 uint32_t val = 0;
1173 int i;
1174
1175 switch (addr) {
1176 case VIRTIO_PCI_COMMON_DFSELECT:
1177 val = proxy->dfselect;
1178 break;
1179 case VIRTIO_PCI_COMMON_DF:
1180 if (proxy->dfselect <= 1) {
1181 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1182
1183 val = (vdev->host_features & ~vdc->legacy_features) >>
1184 (32 * proxy->dfselect);
1185 }
1186 break;
1187 case VIRTIO_PCI_COMMON_GFSELECT:
1188 val = proxy->gfselect;
1189 break;
1190 case VIRTIO_PCI_COMMON_GF:
1191 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1192 val = proxy->guest_features[proxy->gfselect];
1193 }
1194 break;
1195 case VIRTIO_PCI_COMMON_MSIX:
1196 val = vdev->config_vector;
1197 break;
1198 case VIRTIO_PCI_COMMON_NUMQ:
1199 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
1200 if (virtio_queue_get_num(vdev, i)) {
1201 val = i + 1;
1202 }
1203 }
1204 break;
1205 case VIRTIO_PCI_COMMON_STATUS:
1206 val = vdev->status;
1207 break;
1208 case VIRTIO_PCI_COMMON_CFGGENERATION:
1209 val = vdev->generation;
1210 break;
1211 case VIRTIO_PCI_COMMON_Q_SELECT:
1212 val = vdev->queue_sel;
1213 break;
1214 case VIRTIO_PCI_COMMON_Q_SIZE:
1215 val = virtio_queue_get_num(vdev, vdev->queue_sel);
1216 break;
1217 case VIRTIO_PCI_COMMON_Q_MSIX:
1218 val = virtio_queue_vector(vdev, vdev->queue_sel);
1219 break;
1220 case VIRTIO_PCI_COMMON_Q_ENABLE:
1221 val = proxy->vqs[vdev->queue_sel].enabled;
1222 break;
1223 case VIRTIO_PCI_COMMON_Q_NOFF:
1224 /* Simply map queues in order */
1225 val = vdev->queue_sel;
1226 break;
1227 case VIRTIO_PCI_COMMON_Q_DESCLO:
1228 val = proxy->vqs[vdev->queue_sel].desc[0];
1229 break;
1230 case VIRTIO_PCI_COMMON_Q_DESCHI:
1231 val = proxy->vqs[vdev->queue_sel].desc[1];
1232 break;
1233 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1234 val = proxy->vqs[vdev->queue_sel].avail[0];
1235 break;
1236 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1237 val = proxy->vqs[vdev->queue_sel].avail[1];
1238 break;
1239 case VIRTIO_PCI_COMMON_Q_USEDLO:
1240 val = proxy->vqs[vdev->queue_sel].used[0];
1241 break;
1242 case VIRTIO_PCI_COMMON_Q_USEDHI:
1243 val = proxy->vqs[vdev->queue_sel].used[1];
1244 break;
1245 default:
1246 val = 0;
1247 }
1248
1249 return val;
1250 }
1251
1252 static void virtio_pci_common_write(void *opaque, hwaddr addr,
1253 uint64_t val, unsigned size)
1254 {
1255 VirtIOPCIProxy *proxy = opaque;
1256 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1257
1258 switch (addr) {
1259 case VIRTIO_PCI_COMMON_DFSELECT:
1260 proxy->dfselect = val;
1261 break;
1262 case VIRTIO_PCI_COMMON_GFSELECT:
1263 proxy->gfselect = val;
1264 break;
1265 case VIRTIO_PCI_COMMON_GF:
1266 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1267 proxy->guest_features[proxy->gfselect] = val;
1268 virtio_set_features(vdev,
1269 (((uint64_t)proxy->guest_features[1]) << 32) |
1270 proxy->guest_features[0]);
1271 }
1272 break;
1273 case VIRTIO_PCI_COMMON_MSIX:
1274 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
1275 /* Make it possible for guest to discover an error took place. */
1276 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1277 val = VIRTIO_NO_VECTOR;
1278 }
1279 vdev->config_vector = val;
1280 break;
1281 case VIRTIO_PCI_COMMON_STATUS:
1282 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1283 virtio_pci_stop_ioeventfd(proxy);
1284 }
1285
1286 virtio_set_status(vdev, val & 0xFF);
1287
1288 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
1289 virtio_pci_start_ioeventfd(proxy);
1290 }
1291
1292 if (vdev->status == 0) {
1293 virtio_pci_reset(DEVICE(proxy));
1294 }
1295
1296 break;
1297 case VIRTIO_PCI_COMMON_Q_SELECT:
1298 if (val < VIRTIO_QUEUE_MAX) {
1299 vdev->queue_sel = val;
1300 }
1301 break;
1302 case VIRTIO_PCI_COMMON_Q_SIZE:
1303 proxy->vqs[vdev->queue_sel].num = val;
1304 break;
1305 case VIRTIO_PCI_COMMON_Q_MSIX:
1306 msix_vector_unuse(&proxy->pci_dev,
1307 virtio_queue_vector(vdev, vdev->queue_sel));
1308 /* Make it possible for guest to discover an error took place. */
1309 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1310 val = VIRTIO_NO_VECTOR;
1311 }
1312 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
1313 break;
1314 case VIRTIO_PCI_COMMON_Q_ENABLE:
1315 virtio_queue_set_num(vdev, vdev->queue_sel,
1316 proxy->vqs[vdev->queue_sel].num);
1317 virtio_queue_set_rings(vdev, vdev->queue_sel,
1318 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
1319 proxy->vqs[vdev->queue_sel].desc[0],
1320 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
1321 proxy->vqs[vdev->queue_sel].avail[0],
1322 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
1323 proxy->vqs[vdev->queue_sel].used[0]);
1324 proxy->vqs[vdev->queue_sel].enabled = 1;
1325 break;
1326 case VIRTIO_PCI_COMMON_Q_DESCLO:
1327 proxy->vqs[vdev->queue_sel].desc[0] = val;
1328 break;
1329 case VIRTIO_PCI_COMMON_Q_DESCHI:
1330 proxy->vqs[vdev->queue_sel].desc[1] = val;
1331 break;
1332 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1333 proxy->vqs[vdev->queue_sel].avail[0] = val;
1334 break;
1335 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1336 proxy->vqs[vdev->queue_sel].avail[1] = val;
1337 break;
1338 case VIRTIO_PCI_COMMON_Q_USEDLO:
1339 proxy->vqs[vdev->queue_sel].used[0] = val;
1340 break;
1341 case VIRTIO_PCI_COMMON_Q_USEDHI:
1342 proxy->vqs[vdev->queue_sel].used[1] = val;
1343 break;
1344 default:
1345 break;
1346 }
1347 }
1348
1349
1350 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
1351 unsigned size)
1352 {
1353 return 0;
1354 }
1355
1356 static void virtio_pci_notify_write(void *opaque, hwaddr addr,
1357 uint64_t val, unsigned size)
1358 {
1359 VirtIODevice *vdev = opaque;
1360 VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent);
1361 unsigned queue = addr / virtio_pci_queue_mem_mult(proxy);
1362
1363 if (queue < VIRTIO_QUEUE_MAX) {
1364 virtio_queue_notify(vdev, queue);
1365 }
1366 }
1367
1368 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
1369 uint64_t val, unsigned size)
1370 {
1371 VirtIODevice *vdev = opaque;
1372 unsigned queue = val;
1373
1374 if (queue < VIRTIO_QUEUE_MAX) {
1375 virtio_queue_notify(vdev, queue);
1376 }
1377 }
1378
1379 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
1380 unsigned size)
1381 {
1382 VirtIOPCIProxy *proxy = opaque;
1383 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1384 uint64_t val = atomic_xchg(&vdev->isr, 0);
1385 pci_irq_deassert(&proxy->pci_dev);
1386
1387 return val;
1388 }
1389
1390 static void virtio_pci_isr_write(void *opaque, hwaddr addr,
1391 uint64_t val, unsigned size)
1392 {
1393 }
1394
1395 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
1396 unsigned size)
1397 {
1398 VirtIODevice *vdev = opaque;
1399 uint64_t val = 0;
1400
1401 switch (size) {
1402 case 1:
1403 val = virtio_config_modern_readb(vdev, addr);
1404 break;
1405 case 2:
1406 val = virtio_config_modern_readw(vdev, addr);
1407 break;
1408 case 4:
1409 val = virtio_config_modern_readl(vdev, addr);
1410 break;
1411 }
1412 return val;
1413 }
1414
1415 static void virtio_pci_device_write(void *opaque, hwaddr addr,
1416 uint64_t val, unsigned size)
1417 {
1418 VirtIODevice *vdev = opaque;
1419 switch (size) {
1420 case 1:
1421 virtio_config_modern_writeb(vdev, addr, val);
1422 break;
1423 case 2:
1424 virtio_config_modern_writew(vdev, addr, val);
1425 break;
1426 case 4:
1427 virtio_config_modern_writel(vdev, addr, val);
1428 break;
1429 }
1430 }
1431
1432 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
1433 {
1434 static const MemoryRegionOps common_ops = {
1435 .read = virtio_pci_common_read,
1436 .write = virtio_pci_common_write,
1437 .impl = {
1438 .min_access_size = 1,
1439 .max_access_size = 4,
1440 },
1441 .endianness = DEVICE_LITTLE_ENDIAN,
1442 };
1443 static const MemoryRegionOps isr_ops = {
1444 .read = virtio_pci_isr_read,
1445 .write = virtio_pci_isr_write,
1446 .impl = {
1447 .min_access_size = 1,
1448 .max_access_size = 4,
1449 },
1450 .endianness = DEVICE_LITTLE_ENDIAN,
1451 };
1452 static const MemoryRegionOps device_ops = {
1453 .read = virtio_pci_device_read,
1454 .write = virtio_pci_device_write,
1455 .impl = {
1456 .min_access_size = 1,
1457 .max_access_size = 4,
1458 },
1459 .endianness = DEVICE_LITTLE_ENDIAN,
1460 };
1461 static const MemoryRegionOps notify_ops = {
1462 .read = virtio_pci_notify_read,
1463 .write = virtio_pci_notify_write,
1464 .impl = {
1465 .min_access_size = 1,
1466 .max_access_size = 4,
1467 },
1468 .endianness = DEVICE_LITTLE_ENDIAN,
1469 };
1470 static const MemoryRegionOps notify_pio_ops = {
1471 .read = virtio_pci_notify_read,
1472 .write = virtio_pci_notify_write_pio,
1473 .impl = {
1474 .min_access_size = 1,
1475 .max_access_size = 4,
1476 },
1477 .endianness = DEVICE_LITTLE_ENDIAN,
1478 };
1479
1480
1481 memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
1482 &common_ops,
1483 proxy,
1484 "virtio-pci-common",
1485 proxy->common.size);
1486
1487 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
1488 &isr_ops,
1489 proxy,
1490 "virtio-pci-isr",
1491 proxy->isr.size);
1492
1493 memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
1494 &device_ops,
1495 virtio_bus_get_device(&proxy->bus),
1496 "virtio-pci-device",
1497 proxy->device.size);
1498
1499 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
1500 &notify_ops,
1501 virtio_bus_get_device(&proxy->bus),
1502 "virtio-pci-notify",
1503 proxy->notify.size);
1504
1505 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
1506 &notify_pio_ops,
1507 virtio_bus_get_device(&proxy->bus),
1508 "virtio-pci-notify-pio",
1509 proxy->notify_pio.size);
1510 }
1511
1512 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
1513 VirtIOPCIRegion *region,
1514 struct virtio_pci_cap *cap,
1515 MemoryRegion *mr,
1516 uint8_t bar)
1517 {
1518 memory_region_add_subregion(mr, region->offset, &region->mr);
1519
1520 cap->cfg_type = region->type;
1521 cap->bar = bar;
1522 cap->offset = cpu_to_le32(region->offset);
1523 cap->length = cpu_to_le32(region->size);
1524 virtio_pci_add_mem_cap(proxy, cap);
1525
1526 }
1527
1528 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
1529 VirtIOPCIRegion *region,
1530 struct virtio_pci_cap *cap)
1531 {
1532 virtio_pci_modern_region_map(proxy, region, cap,
1533 &proxy->modern_bar, proxy->modern_mem_bar_idx);
1534 }
1535
1536 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
1537 VirtIOPCIRegion *region,
1538 struct virtio_pci_cap *cap)
1539 {
1540 virtio_pci_modern_region_map(proxy, region, cap,
1541 &proxy->io_bar, proxy->modern_io_bar_idx);
1542 }
1543
1544 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
1545 VirtIOPCIRegion *region)
1546 {
1547 memory_region_del_subregion(&proxy->modern_bar,
1548 &region->mr);
1549 }
1550
1551 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
1552 VirtIOPCIRegion *region)
1553 {
1554 memory_region_del_subregion(&proxy->io_bar,
1555 &region->mr);
1556 }
1557
1558 static void virtio_pci_pre_plugged(DeviceState *d, Error **errp)
1559 {
1560 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1561 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1562
1563 if (virtio_pci_modern(proxy)) {
1564 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1565 }
1566
1567 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
1568 }
1569
1570 /* This is called by virtio-bus just after the device is plugged. */
1571 static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
1572 {
1573 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1574 VirtioBusState *bus = &proxy->bus;
1575 bool legacy = virtio_pci_legacy(proxy);
1576 bool modern;
1577 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1578 uint8_t *config;
1579 uint32_t size;
1580 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1581
1582 /*
1583 * Virtio capabilities present without
1584 * VIRTIO_F_VERSION_1 confuses guests
1585 */
1586 if (!proxy->ignore_backend_features &&
1587 !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1588 virtio_pci_disable_modern(proxy);
1589
1590 if (!legacy) {
1591 error_setg(errp, "Device doesn't support modern mode, and legacy"
1592 " mode is disabled");
1593 error_append_hint(errp, "Set disable-legacy to off\n");
1594
1595 return;
1596 }
1597 }
1598
1599 modern = virtio_pci_modern(proxy);
1600
1601 config = proxy->pci_dev.config;
1602 if (proxy->class_code) {
1603 pci_config_set_class(config, proxy->class_code);
1604 }
1605
1606 if (legacy) {
1607 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1608 error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
1609 " neither legacy nor transitional device");
1610 return ;
1611 }
1612 /*
1613 * Legacy and transitional devices use specific subsystem IDs.
1614 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
1615 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
1616 */
1617 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
1618 } else {
1619 /* pure virtio-1.0 */
1620 pci_set_word(config + PCI_VENDOR_ID,
1621 PCI_VENDOR_ID_REDHAT_QUMRANET);
1622 pci_set_word(config + PCI_DEVICE_ID,
1623 0x1040 + virtio_bus_get_vdev_id(bus));
1624 pci_config_set_revision(config, 1);
1625 }
1626 config[PCI_INTERRUPT_PIN] = 1;
1627
1628
1629 if (modern) {
1630 struct virtio_pci_cap cap = {
1631 .cap_len = sizeof cap,
1632 };
1633 struct virtio_pci_notify_cap notify = {
1634 .cap.cap_len = sizeof notify,
1635 .notify_off_multiplier =
1636 cpu_to_le32(virtio_pci_queue_mem_mult(proxy)),
1637 };
1638 struct virtio_pci_cfg_cap cfg = {
1639 .cap.cap_len = sizeof cfg,
1640 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
1641 };
1642 struct virtio_pci_notify_cap notify_pio = {
1643 .cap.cap_len = sizeof notify,
1644 .notify_off_multiplier = cpu_to_le32(0x0),
1645 };
1646
1647 struct virtio_pci_cfg_cap *cfg_mask;
1648
1649 virtio_pci_modern_regions_init(proxy);
1650
1651 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
1652 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
1653 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
1654 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
1655
1656 if (modern_pio) {
1657 memory_region_init(&proxy->io_bar, OBJECT(proxy),
1658 "virtio-pci-io", 0x4);
1659
1660 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
1661 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
1662
1663 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
1664 &notify_pio.cap);
1665 }
1666
1667 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx,
1668 PCI_BASE_ADDRESS_SPACE_MEMORY |
1669 PCI_BASE_ADDRESS_MEM_PREFETCH |
1670 PCI_BASE_ADDRESS_MEM_TYPE_64,
1671 &proxy->modern_bar);
1672
1673 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
1674 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
1675 pci_set_byte(&cfg_mask->cap.bar, ~0x0);
1676 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
1677 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
1678 pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
1679 }
1680
1681 if (proxy->nvectors) {
1682 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
1683 proxy->msix_bar_idx, NULL);
1684 if (err) {
1685 /* Notice when a system that supports MSIx can't initialize it */
1686 if (err != -ENOTSUP) {
1687 error_report("unable to init msix vectors to %" PRIu32,
1688 proxy->nvectors);
1689 }
1690 proxy->nvectors = 0;
1691 }
1692 }
1693
1694 proxy->pci_dev.config_write = virtio_write_config;
1695 proxy->pci_dev.config_read = virtio_read_config;
1696
1697 if (legacy) {
1698 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
1699 + virtio_bus_get_vdev_config_len(bus);
1700 size = pow2ceil(size);
1701
1702 memory_region_init_io(&proxy->bar, OBJECT(proxy),
1703 &virtio_pci_config_ops,
1704 proxy, "virtio-pci", size);
1705
1706 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
1707 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
1708 }
1709 }
1710
1711 static void virtio_pci_device_unplugged(DeviceState *d)
1712 {
1713 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1714 bool modern = virtio_pci_modern(proxy);
1715 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1716
1717 virtio_pci_stop_ioeventfd(proxy);
1718
1719 if (modern) {
1720 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
1721 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
1722 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
1723 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
1724 if (modern_pio) {
1725 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
1726 }
1727 }
1728 }
1729
1730 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
1731 {
1732 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1733 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1734 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
1735 !pci_bus_is_root(pci_get_bus(pci_dev));
1736
1737 if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
1738 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
1739 }
1740
1741 /*
1742 * virtio pci bar layout used by default.
1743 * subclasses can re-arrange things if needed.
1744 *
1745 * region 0 -- virtio legacy io bar
1746 * region 1 -- msi-x bar
1747 * region 4+5 -- virtio modern memory (64bit) bar
1748 *
1749 */
1750 proxy->legacy_io_bar_idx = 0;
1751 proxy->msix_bar_idx = 1;
1752 proxy->modern_io_bar_idx = 2;
1753 proxy->modern_mem_bar_idx = 4;
1754
1755 proxy->common.offset = 0x0;
1756 proxy->common.size = 0x1000;
1757 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
1758
1759 proxy->isr.offset = 0x1000;
1760 proxy->isr.size = 0x1000;
1761 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
1762
1763 proxy->device.offset = 0x2000;
1764 proxy->device.size = 0x1000;
1765 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
1766
1767 proxy->notify.offset = 0x3000;
1768 proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
1769 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1770
1771 proxy->notify_pio.offset = 0x0;
1772 proxy->notify_pio.size = 0x4;
1773 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1774
1775 /* subclasses can enforce modern, so do this unconditionally */
1776 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
1777 /* PCI BAR regions must be powers of 2 */
1778 pow2ceil(proxy->notify.offset + proxy->notify.size));
1779
1780 if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
1781 proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
1782 }
1783
1784 if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) {
1785 error_setg(errp, "device cannot work as neither modern nor legacy mode"
1786 " is enabled");
1787 error_append_hint(errp, "Set either disable-modern or disable-legacy"
1788 " to off\n");
1789 return;
1790 }
1791
1792 if (pcie_port && pci_is_express(pci_dev)) {
1793 int pos;
1794
1795 pos = pcie_endpoint_cap_init(pci_dev, 0);
1796 assert(pos > 0);
1797
1798 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0,
1799 PCI_PM_SIZEOF, errp);
1800 if (pos < 0) {
1801 return;
1802 }
1803
1804 pci_dev->exp.pm_cap = pos;
1805
1806 /*
1807 * Indicates that this function complies with revision 1.2 of the
1808 * PCI Power Management Interface Specification.
1809 */
1810 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
1811
1812 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) {
1813 /* Init error enabling flags */
1814 pcie_cap_deverr_init(pci_dev);
1815 }
1816
1817 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) {
1818 /* Init Link Control Register */
1819 pcie_cap_lnkctl_init(pci_dev);
1820 }
1821
1822 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) {
1823 /* Init Power Management Control Register */
1824 pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL,
1825 PCI_PM_CTRL_STATE_MASK);
1826 }
1827
1828 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
1829 pcie_ats_init(pci_dev, 256);
1830 }
1831
1832 } else {
1833 /*
1834 * make future invocations of pci_is_express() return false
1835 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
1836 */
1837 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
1838 }
1839
1840 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
1841 if (k->realize) {
1842 k->realize(proxy, errp);
1843 }
1844 }
1845
1846 static void virtio_pci_exit(PCIDevice *pci_dev)
1847 {
1848 msix_uninit_exclusive_bar(pci_dev);
1849 }
1850
1851 static void virtio_pci_reset(DeviceState *qdev)
1852 {
1853 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1854 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1855 PCIDevice *dev = PCI_DEVICE(qdev);
1856 int i;
1857
1858 virtio_pci_stop_ioeventfd(proxy);
1859 virtio_bus_reset(bus);
1860 msix_unuse_all_vectors(&proxy->pci_dev);
1861
1862 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1863 proxy->vqs[i].enabled = 0;
1864 proxy->vqs[i].num = 0;
1865 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
1866 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
1867 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
1868 }
1869
1870 if (pci_is_express(dev)) {
1871 pcie_cap_deverr_reset(dev);
1872 pcie_cap_lnkctl_reset(dev);
1873
1874 pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0);
1875 }
1876 }
1877
1878 static Property virtio_pci_properties[] = {
1879 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
1880 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
1881 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
1882 ON_OFF_AUTO_AUTO),
1883 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
1884 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
1885 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
1886 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
1887 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
1888 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
1889 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
1890 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
1891 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
1892 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
1893 ignore_backend_features, false),
1894 DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
1895 VIRTIO_PCI_FLAG_ATS_BIT, false),
1896 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags,
1897 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true),
1898 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags,
1899 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true),
1900 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags,
1901 VIRTIO_PCI_FLAG_INIT_PM_BIT, true),
1902 DEFINE_PROP_END_OF_LIST(),
1903 };
1904
1905 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
1906 {
1907 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
1908 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1909 PCIDevice *pci_dev = &proxy->pci_dev;
1910
1911 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
1912 virtio_pci_modern(proxy)) {
1913 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
1914 }
1915
1916 vpciklass->parent_dc_realize(qdev, errp);
1917 }
1918
1919 static void virtio_pci_class_init(ObjectClass *klass, void *data)
1920 {
1921 DeviceClass *dc = DEVICE_CLASS(klass);
1922 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1923 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1924
1925 dc->props = virtio_pci_properties;
1926 k->realize = virtio_pci_realize;
1927 k->exit = virtio_pci_exit;
1928 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1929 k->revision = VIRTIO_PCI_ABI_VERSION;
1930 k->class_id = PCI_CLASS_OTHERS;
1931 device_class_set_parent_realize(dc, virtio_pci_dc_realize,
1932 &vpciklass->parent_dc_realize);
1933 dc->reset = virtio_pci_reset;
1934 }
1935
1936 static const TypeInfo virtio_pci_info = {
1937 .name = TYPE_VIRTIO_PCI,
1938 .parent = TYPE_PCI_DEVICE,
1939 .instance_size = sizeof(VirtIOPCIProxy),
1940 .class_init = virtio_pci_class_init,
1941 .class_size = sizeof(VirtioPCIClass),
1942 .abstract = true,
1943 .interfaces = (InterfaceInfo[]) {
1944 { INTERFACE_PCIE_DEVICE },
1945 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1946 { }
1947 },
1948 };
1949
1950 /* virtio-blk-pci */
1951
1952 static Property virtio_blk_pci_properties[] = {
1953 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
1954 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1955 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1956 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
1957 DEV_NVECTORS_UNSPECIFIED),
1958 DEFINE_PROP_END_OF_LIST(),
1959 };
1960
1961 static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1962 {
1963 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
1964 DeviceState *vdev = DEVICE(&dev->vdev);
1965
1966 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1967 vpci_dev->nvectors = dev->vdev.conf.num_queues + 1;
1968 }
1969
1970 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1971 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1972 }
1973
1974 static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
1975 {
1976 DeviceClass *dc = DEVICE_CLASS(klass);
1977 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1978 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1979
1980 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1981 dc->props = virtio_blk_pci_properties;
1982 k->realize = virtio_blk_pci_realize;
1983 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1984 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
1985 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1986 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1987 }
1988
1989 static void virtio_blk_pci_instance_init(Object *obj)
1990 {
1991 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
1992
1993 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
1994 TYPE_VIRTIO_BLK);
1995 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
1996 "bootindex", &error_abort);
1997 }
1998
1999 static const TypeInfo virtio_blk_pci_info = {
2000 .name = TYPE_VIRTIO_BLK_PCI,
2001 .parent = TYPE_VIRTIO_PCI,
2002 .instance_size = sizeof(VirtIOBlkPCI),
2003 .instance_init = virtio_blk_pci_instance_init,
2004 .class_init = virtio_blk_pci_class_init,
2005 };
2006
2007 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2008 /* vhost-user-blk */
2009
2010 static Property vhost_user_blk_pci_properties[] = {
2011 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2012 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2013 DEV_NVECTORS_UNSPECIFIED),
2014 DEFINE_PROP_END_OF_LIST(),
2015 };
2016
2017 static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2018 {
2019 VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(vpci_dev);
2020 DeviceState *vdev = DEVICE(&dev->vdev);
2021
2022 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2023 vpci_dev->nvectors = dev->vdev.num_queues + 1;
2024 }
2025
2026 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2027 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2028 }
2029
2030 static void vhost_user_blk_pci_class_init(ObjectClass *klass, void *data)
2031 {
2032 DeviceClass *dc = DEVICE_CLASS(klass);
2033 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2034 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2035
2036 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2037 dc->props = vhost_user_blk_pci_properties;
2038 k->realize = vhost_user_blk_pci_realize;
2039 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2040 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
2041 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2042 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2043 }
2044
2045 static void vhost_user_blk_pci_instance_init(Object *obj)
2046 {
2047 VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(obj);
2048
2049 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2050 TYPE_VHOST_USER_BLK);
2051 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2052 "bootindex", &error_abort);
2053 }
2054
2055 static const TypeInfo vhost_user_blk_pci_info = {
2056 .name = TYPE_VHOST_USER_BLK_PCI,
2057 .parent = TYPE_VIRTIO_PCI,
2058 .instance_size = sizeof(VHostUserBlkPCI),
2059 .instance_init = vhost_user_blk_pci_instance_init,
2060 .class_init = vhost_user_blk_pci_class_init,
2061 };
2062 #endif
2063
2064 /* virtio-scsi-pci */
2065
2066 static Property virtio_scsi_pci_properties[] = {
2067 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2068 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2069 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2070 DEV_NVECTORS_UNSPECIFIED),
2071 DEFINE_PROP_END_OF_LIST(),
2072 };
2073
2074 static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2075 {
2076 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
2077 DeviceState *vdev = DEVICE(&dev->vdev);
2078 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
2079 DeviceState *proxy = DEVICE(vpci_dev);
2080 char *bus_name;
2081
2082 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2083 vpci_dev->nvectors = vs->conf.num_queues + 3;
2084 }
2085
2086 /*
2087 * For command line compatibility, this sets the virtio-scsi-device bus
2088 * name as before.
2089 */
2090 if (proxy->id) {
2091 bus_name = g_strdup_printf("%s.0", proxy->id);
2092 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
2093 g_free(bus_name);
2094 }
2095
2096 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2097 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2098 }
2099
2100 static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
2101 {
2102 DeviceClass *dc = DEVICE_CLASS(klass);
2103 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2104 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2105
2106 k->realize = virtio_scsi_pci_realize;
2107 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2108 dc->props = virtio_scsi_pci_properties;
2109 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2110 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2111 pcidev_k->revision = 0x00;
2112 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2113 }
2114
2115 static void virtio_scsi_pci_instance_init(Object *obj)
2116 {
2117 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
2118
2119 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2120 TYPE_VIRTIO_SCSI);
2121 }
2122
2123 static const TypeInfo virtio_scsi_pci_info = {
2124 .name = TYPE_VIRTIO_SCSI_PCI,
2125 .parent = TYPE_VIRTIO_PCI,
2126 .instance_size = sizeof(VirtIOSCSIPCI),
2127 .instance_init = virtio_scsi_pci_instance_init,
2128 .class_init = virtio_scsi_pci_class_init,
2129 };
2130
2131 /* vhost-scsi-pci */
2132
2133 #ifdef CONFIG_VHOST_SCSI
2134 static Property vhost_scsi_pci_properties[] = {
2135 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2136 DEV_NVECTORS_UNSPECIFIED),
2137 DEFINE_PROP_END_OF_LIST(),
2138 };
2139
2140 static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2141 {
2142 VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev);
2143 DeviceState *vdev = DEVICE(&dev->vdev);
2144 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
2145
2146 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2147 vpci_dev->nvectors = vs->conf.num_queues + 3;
2148 }
2149
2150 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2151 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2152 }
2153
2154 static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
2155 {
2156 DeviceClass *dc = DEVICE_CLASS(klass);
2157 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2158 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2159 k->realize = vhost_scsi_pci_realize;
2160 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2161 dc->props = vhost_scsi_pci_properties;
2162 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2163 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2164 pcidev_k->revision = 0x00;
2165 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2166 }
2167
2168 static void vhost_scsi_pci_instance_init(Object *obj)
2169 {
2170 VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj);
2171
2172 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2173 TYPE_VHOST_SCSI);
2174 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2175 "bootindex", &error_abort);
2176 }
2177
2178 static const TypeInfo vhost_scsi_pci_info = {
2179 .name = TYPE_VHOST_SCSI_PCI,
2180 .parent = TYPE_VIRTIO_PCI,
2181 .instance_size = sizeof(VHostSCSIPCI),
2182 .instance_init = vhost_scsi_pci_instance_init,
2183 .class_init = vhost_scsi_pci_class_init,
2184 };
2185 #endif
2186
2187 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2188 /* vhost-user-scsi-pci */
2189 static Property vhost_user_scsi_pci_properties[] = {
2190 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2191 DEV_NVECTORS_UNSPECIFIED),
2192 DEFINE_PROP_END_OF_LIST(),
2193 };
2194
2195 static void vhost_user_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2196 {
2197 VHostUserSCSIPCI *dev = VHOST_USER_SCSI_PCI(vpci_dev);
2198 DeviceState *vdev = DEVICE(&dev->vdev);
2199 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
2200
2201 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2202 vpci_dev->nvectors = vs->conf.num_queues + 3;
2203 }
2204
2205 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2206 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2207 }
2208
2209 static void vhost_user_scsi_pci_class_init(ObjectClass *klass, void *data)
2210 {
2211 DeviceClass *dc = DEVICE_CLASS(klass);
2212 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2213 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2214 k->realize = vhost_user_scsi_pci_realize;
2215 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2216 dc->props = vhost_user_scsi_pci_properties;
2217 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2218 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2219 pcidev_k->revision = 0x00;
2220 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2221 }
2222
2223 static void vhost_user_scsi_pci_instance_init(Object *obj)
2224 {
2225 VHostUserSCSIPCI *dev = VHOST_USER_SCSI_PCI(obj);
2226
2227 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2228 TYPE_VHOST_USER_SCSI);
2229 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2230 "bootindex", &error_abort);
2231 }
2232
2233 static const TypeInfo vhost_user_scsi_pci_info = {
2234 .name = TYPE_VHOST_USER_SCSI_PCI,
2235 .parent = TYPE_VIRTIO_PCI,
2236 .instance_size = sizeof(VHostUserSCSIPCI),
2237 .instance_init = vhost_user_scsi_pci_instance_init,
2238 .class_init = vhost_user_scsi_pci_class_init,
2239 };
2240 #endif
2241
2242 /* vhost-vsock-pci */
2243
2244 #ifdef CONFIG_VHOST_VSOCK
2245 static Property vhost_vsock_pci_properties[] = {
2246 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
2247 DEFINE_PROP_END_OF_LIST(),
2248 };
2249
2250 static void vhost_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2251 {
2252 VHostVSockPCI *dev = VHOST_VSOCK_PCI(vpci_dev);
2253 DeviceState *vdev = DEVICE(&dev->vdev);
2254
2255 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2256 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2257 }
2258
2259 static void vhost_vsock_pci_class_init(ObjectClass *klass, void *data)
2260 {
2261 DeviceClass *dc = DEVICE_CLASS(klass);
2262 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2263 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2264 k->realize = vhost_vsock_pci_realize;
2265 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2266 dc->props = vhost_vsock_pci_properties;
2267 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2268 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_VSOCK;
2269 pcidev_k->revision = 0x00;
2270 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
2271 }
2272
2273 static void vhost_vsock_pci_instance_init(Object *obj)
2274 {
2275 VHostVSockPCI *dev = VHOST_VSOCK_PCI(obj);
2276
2277 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2278 TYPE_VHOST_VSOCK);
2279 }
2280
2281 static const TypeInfo vhost_vsock_pci_info = {
2282 .name = TYPE_VHOST_VSOCK_PCI,
2283 .parent = TYPE_VIRTIO_PCI,
2284 .instance_size = sizeof(VHostVSockPCI),
2285 .instance_init = vhost_vsock_pci_instance_init,
2286 .class_init = vhost_vsock_pci_class_init,
2287 };
2288 #endif
2289
2290 /* virtio-balloon-pci */
2291
2292 static Property virtio_balloon_pci_properties[] = {
2293 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2294 DEFINE_PROP_END_OF_LIST(),
2295 };
2296
2297 static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2298 {
2299 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
2300 DeviceState *vdev = DEVICE(&dev->vdev);
2301
2302 if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
2303 vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
2304 vpci_dev->class_code = PCI_CLASS_OTHERS;
2305 }
2306
2307 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2308 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2309 }
2310
2311 static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
2312 {
2313 DeviceClass *dc = DEVICE_CLASS(klass);
2314 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2315 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2316 k->realize = virtio_balloon_pci_realize;
2317 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2318 dc->props = virtio_balloon_pci_properties;
2319 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2320 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
2321 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2322 pcidev_k->class_id = PCI_CLASS_OTHERS;
2323 }
2324
2325 static void virtio_balloon_pci_instance_init(Object *obj)
2326 {
2327 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
2328
2329 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2330 TYPE_VIRTIO_BALLOON);
2331 object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev),
2332 "guest-stats", &error_abort);
2333 object_property_add_alias(obj, "guest-stats-polling-interval",
2334 OBJECT(&dev->vdev),
2335 "guest-stats-polling-interval", &error_abort);
2336 }
2337
2338 static const TypeInfo virtio_balloon_pci_info = {
2339 .name = TYPE_VIRTIO_BALLOON_PCI,
2340 .parent = TYPE_VIRTIO_PCI,
2341 .instance_size = sizeof(VirtIOBalloonPCI),
2342 .instance_init = virtio_balloon_pci_instance_init,
2343 .class_init = virtio_balloon_pci_class_init,
2344 };
2345
2346 /* virtio-serial-pci */
2347
2348 static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2349 {
2350 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
2351 DeviceState *vdev = DEVICE(&dev->vdev);
2352 DeviceState *proxy = DEVICE(vpci_dev);
2353 char *bus_name;
2354
2355 if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
2356 vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
2357 vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */
2358 vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
2359 }
2360
2361 /* backwards-compatibility with machines that were created with
2362 DEV_NVECTORS_UNSPECIFIED */
2363 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2364 vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
2365 }
2366
2367 /*
2368 * For command line compatibility, this sets the virtio-serial-device bus
2369 * name as before.
2370 */
2371 if (proxy->id) {
2372 bus_name = g_strdup_printf("%s.0", proxy->id);
2373 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
2374 g_free(bus_name);
2375 }
2376
2377 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2378 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2379 }
2380
2381 static Property virtio_serial_pci_properties[] = {
2382 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2383 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2384 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2385 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2386 DEFINE_PROP_END_OF_LIST(),
2387 };
2388
2389 static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
2390 {
2391 DeviceClass *dc = DEVICE_CLASS(klass);
2392 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2393 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2394 k->realize = virtio_serial_pci_realize;
2395 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2396 dc->props = virtio_serial_pci_properties;
2397 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2398 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
2399 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2400 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
2401 }
2402
2403 static void virtio_serial_pci_instance_init(Object *obj)
2404 {
2405 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
2406
2407 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2408 TYPE_VIRTIO_SERIAL);
2409 }
2410
2411 static const TypeInfo virtio_serial_pci_info = {
2412 .name = TYPE_VIRTIO_SERIAL_PCI,
2413 .parent = TYPE_VIRTIO_PCI,
2414 .instance_size = sizeof(VirtIOSerialPCI),
2415 .instance_init = virtio_serial_pci_instance_init,
2416 .class_init = virtio_serial_pci_class_init,
2417 };
2418
2419 /* virtio-net-pci */
2420
2421 static Property virtio_net_properties[] = {
2422 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2423 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2424 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
2425 DEFINE_PROP_END_OF_LIST(),
2426 };
2427
2428 static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2429 {
2430 DeviceState *qdev = DEVICE(vpci_dev);
2431 VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev);
2432 DeviceState *vdev = DEVICE(&dev->vdev);
2433
2434 virtio_net_set_netclient_name(&dev->vdev, qdev->id,
2435 object_get_typename(OBJECT(qdev)));
2436 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2437 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2438 }
2439
2440 static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
2441 {
2442 DeviceClass *dc = DEVICE_CLASS(klass);
2443 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
2444 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
2445
2446 k->romfile = "efi-virtio.rom";
2447 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2448 k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
2449 k->revision = VIRTIO_PCI_ABI_VERSION;
2450 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
2451 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2452 dc->props = virtio_net_properties;
2453 vpciklass->realize = virtio_net_pci_realize;
2454 }
2455
2456 static void virtio_net_pci_instance_init(Object *obj)
2457 {
2458 VirtIONetPCI *dev = VIRTIO_NET_PCI(obj);
2459
2460 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2461 TYPE_VIRTIO_NET);
2462 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2463 "bootindex", &error_abort);
2464 }
2465
2466 static const TypeInfo virtio_net_pci_info = {
2467 .name = TYPE_VIRTIO_NET_PCI,
2468 .parent = TYPE_VIRTIO_PCI,
2469 .instance_size = sizeof(VirtIONetPCI),
2470 .instance_init = virtio_net_pci_instance_init,
2471 .class_init = virtio_net_pci_class_init,
2472 };
2473
2474 /* virtio-rng-pci */
2475
2476 static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2477 {
2478 VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev);
2479 DeviceState *vdev = DEVICE(&vrng->vdev);
2480 Error *err = NULL;
2481
2482 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2483 object_property_set_bool(OBJECT(vdev), true, "realized", &err);
2484 if (err) {
2485 error_propagate(errp, err);
2486 return;
2487 }
2488
2489 object_property_set_link(OBJECT(vrng),
2490 OBJECT(vrng->vdev.conf.rng), "rng",
2491 NULL);
2492 }
2493
2494 static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
2495 {
2496 DeviceClass *dc = DEVICE_CLASS(klass);
2497 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2498 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2499
2500 k->realize = virtio_rng_pci_realize;
2501 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2502
2503 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2504 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
2505 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2506 pcidev_k->class_id = PCI_CLASS_OTHERS;
2507 }
2508
2509 static void virtio_rng_initfn(Object *obj)
2510 {
2511 VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj);
2512
2513 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2514 TYPE_VIRTIO_RNG);
2515 }
2516
2517 static const TypeInfo virtio_rng_pci_info = {
2518 .name = TYPE_VIRTIO_RNG_PCI,
2519 .parent = TYPE_VIRTIO_PCI,
2520 .instance_size = sizeof(VirtIORngPCI),
2521 .instance_init = virtio_rng_initfn,
2522 .class_init = virtio_rng_pci_class_init,
2523 };
2524
2525 /* virtio-input-pci */
2526
2527 static Property virtio_input_pci_properties[] = {
2528 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2529 DEFINE_PROP_END_OF_LIST(),
2530 };
2531
2532 static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2533 {
2534 VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev);
2535 DeviceState *vdev = DEVICE(&vinput->vdev);
2536
2537 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2538 virtio_pci_force_virtio_1(vpci_dev);
2539 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2540 }
2541
2542 static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
2543 {
2544 DeviceClass *dc = DEVICE_CLASS(klass);
2545 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2546 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2547
2548 dc->props = virtio_input_pci_properties;
2549 k->realize = virtio_input_pci_realize;
2550 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2551
2552 pcidev_k->class_id = PCI_CLASS_INPUT_OTHER;
2553 }
2554
2555 static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
2556 {
2557 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2558
2559 pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD;
2560 }
2561
2562 static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass,
2563 void *data)
2564 {
2565 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2566
2567 pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE;
2568 }
2569
2570 static void virtio_keyboard_initfn(Object *obj)
2571 {
2572 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2573
2574 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2575 TYPE_VIRTIO_KEYBOARD);
2576 }
2577
2578 static void virtio_mouse_initfn(Object *obj)
2579 {
2580 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2581
2582 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2583 TYPE_VIRTIO_MOUSE);
2584 }
2585
2586 static void virtio_tablet_initfn(Object *obj)
2587 {
2588 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2589
2590 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2591 TYPE_VIRTIO_TABLET);
2592 }
2593
2594 static const TypeInfo virtio_input_pci_info = {
2595 .name = TYPE_VIRTIO_INPUT_PCI,
2596 .parent = TYPE_VIRTIO_PCI,
2597 .instance_size = sizeof(VirtIOInputPCI),
2598 .class_init = virtio_input_pci_class_init,
2599 .abstract = true,
2600 };
2601
2602 static const TypeInfo virtio_input_hid_pci_info = {
2603 .name = TYPE_VIRTIO_INPUT_HID_PCI,
2604 .parent = TYPE_VIRTIO_INPUT_PCI,
2605 .instance_size = sizeof(VirtIOInputHIDPCI),
2606 .abstract = true,
2607 };
2608
2609 static const TypeInfo virtio_keyboard_pci_info = {
2610 .name = TYPE_VIRTIO_KEYBOARD_PCI,
2611 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2612 .class_init = virtio_input_hid_kbd_pci_class_init,
2613 .instance_size = sizeof(VirtIOInputHIDPCI),
2614 .instance_init = virtio_keyboard_initfn,
2615 };
2616
2617 static const TypeInfo virtio_mouse_pci_info = {
2618 .name = TYPE_VIRTIO_MOUSE_PCI,
2619 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2620 .class_init = virtio_input_hid_mouse_pci_class_init,
2621 .instance_size = sizeof(VirtIOInputHIDPCI),
2622 .instance_init = virtio_mouse_initfn,
2623 };
2624
2625 static const TypeInfo virtio_tablet_pci_info = {
2626 .name = TYPE_VIRTIO_TABLET_PCI,
2627 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2628 .instance_size = sizeof(VirtIOInputHIDPCI),
2629 .instance_init = virtio_tablet_initfn,
2630 };
2631
2632 #ifdef CONFIG_LINUX
2633 static void virtio_host_initfn(Object *obj)
2634 {
2635 VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj);
2636
2637 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2638 TYPE_VIRTIO_INPUT_HOST);
2639 }
2640
2641 static const TypeInfo virtio_host_pci_info = {
2642 .name = TYPE_VIRTIO_INPUT_HOST_PCI,
2643 .parent = TYPE_VIRTIO_INPUT_PCI,
2644 .instance_size = sizeof(VirtIOInputHostPCI),
2645 .instance_init = virtio_host_initfn,
2646 };
2647 #endif
2648
2649 /* virtio-pci-bus */
2650
2651 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
2652 VirtIOPCIProxy *dev)
2653 {
2654 DeviceState *qdev = DEVICE(dev);
2655 char virtio_bus_name[] = "virtio-bus";
2656
2657 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
2658 virtio_bus_name);
2659 }
2660
2661 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
2662 {
2663 BusClass *bus_class = BUS_CLASS(klass);
2664 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
2665 bus_class->max_dev = 1;
2666 k->notify = virtio_pci_notify;
2667 k->save_config = virtio_pci_save_config;
2668 k->load_config = virtio_pci_load_config;
2669 k->save_queue = virtio_pci_save_queue;
2670 k->load_queue = virtio_pci_load_queue;
2671 k->save_extra_state = virtio_pci_save_extra_state;
2672 k->load_extra_state = virtio_pci_load_extra_state;
2673 k->has_extra_state = virtio_pci_has_extra_state;
2674 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
2675 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
2676 k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr;
2677 k->vmstate_change = virtio_pci_vmstate_change;
2678 k->pre_plugged = virtio_pci_pre_plugged;
2679 k->device_plugged = virtio_pci_device_plugged;
2680 k->device_unplugged = virtio_pci_device_unplugged;
2681 k->query_nvectors = virtio_pci_query_nvectors;
2682 k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
2683 k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
2684 k->get_dma_as = virtio_pci_get_dma_as;
2685 }
2686
2687 static const TypeInfo virtio_pci_bus_info = {
2688 .name = TYPE_VIRTIO_PCI_BUS,
2689 .parent = TYPE_VIRTIO_BUS,
2690 .instance_size = sizeof(VirtioPCIBusState),
2691 .class_init = virtio_pci_bus_class_init,
2692 };
2693
2694 static void virtio_pci_register_types(void)
2695 {
2696 type_register_static(&virtio_rng_pci_info);
2697 type_register_static(&virtio_input_pci_info);
2698 type_register_static(&virtio_input_hid_pci_info);
2699 type_register_static(&virtio_keyboard_pci_info);
2700 type_register_static(&virtio_mouse_pci_info);
2701 type_register_static(&virtio_tablet_pci_info);
2702 #ifdef CONFIG_LINUX
2703 type_register_static(&virtio_host_pci_info);
2704 #endif
2705 type_register_static(&virtio_pci_bus_info);
2706 type_register_static(&virtio_pci_info);
2707 #ifdef CONFIG_VIRTFS
2708 type_register_static(&virtio_9p_pci_info);
2709 #endif
2710 type_register_static(&virtio_blk_pci_info);
2711 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2712 type_register_static(&vhost_user_blk_pci_info);
2713 #endif
2714 type_register_static(&virtio_scsi_pci_info);
2715 type_register_static(&virtio_balloon_pci_info);
2716 type_register_static(&virtio_serial_pci_info);
2717 type_register_static(&virtio_net_pci_info);
2718 #ifdef CONFIG_VHOST_SCSI
2719 type_register_static(&vhost_scsi_pci_info);
2720 #endif
2721 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
2722 type_register_static(&vhost_user_scsi_pci_info);
2723 #endif
2724 #ifdef CONFIG_VHOST_VSOCK
2725 type_register_static(&vhost_vsock_pci_info);
2726 #endif
2727 }
2728
2729 type_init(virtio_pci_register_types)