2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
25 #include "hw/pci/msi.h"
26 #include "hw/pci/msix.h"
27 #include "hw/pci/pci_bridge.h"
28 #include "qemu/error-report.h"
29 #include "qemu/module.h"
30 #include "qemu/option.h"
31 #include "qemu/range.h"
32 #include "qemu/units.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
37 #include "qapi/error.h"
39 #define TYPE_VFIO_PCI "vfio-pci"
40 #define PCI_VFIO(obj) OBJECT_CHECK(VFIOPCIDevice, obj, TYPE_VFIO_PCI)
42 #define TYPE_VIFO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
44 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
);
45 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
);
48 * Disabling BAR mmaping can be slow, but toggling it around INTx can
49 * also be a huge overhead. We try to get the best of both worlds by
50 * waiting until an interrupt to disable mmaps (subsequent transitions
51 * to the same state are effectively no overhead). If the interrupt has
52 * been serviced and the time gap is long enough, we re-enable mmaps for
53 * performance. This works well for things like graphics cards, which
54 * may not use their interrupt at all and are penalized to an unusable
55 * level by read/write BAR traps. Other devices, like NICs, have more
56 * regular interrupts and see much better latency by staying in non-mmap
57 * mode. We therefore set the default mmap_timeout such that a ping
58 * is just enough to keep the mmap disabled. Users can experiment with
59 * other options with the x-intx-mmap-timeout-ms parameter (a value of
60 * zero disables the timer).
62 static void vfio_intx_mmap_enable(void *opaque
)
64 VFIOPCIDevice
*vdev
= opaque
;
66 if (vdev
->intx
.pending
) {
67 timer_mod(vdev
->intx
.mmap_timer
,
68 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
72 vfio_mmap_set_enabled(vdev
, true);
75 static void vfio_intx_interrupt(void *opaque
)
77 VFIOPCIDevice
*vdev
= opaque
;
79 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
83 trace_vfio_intx_interrupt(vdev
->vbasedev
.name
, 'A' + vdev
->intx
.pin
);
85 vdev
->intx
.pending
= true;
86 pci_irq_assert(&vdev
->pdev
);
87 vfio_mmap_set_enabled(vdev
, false);
88 if (vdev
->intx
.mmap_timeout
) {
89 timer_mod(vdev
->intx
.mmap_timer
,
90 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
94 static void vfio_intx_eoi(VFIODevice
*vbasedev
)
96 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
98 if (!vdev
->intx
.pending
) {
102 trace_vfio_intx_eoi(vbasedev
->name
);
104 vdev
->intx
.pending
= false;
105 pci_irq_deassert(&vdev
->pdev
);
106 vfio_unmask_single_irqindex(vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
109 static void vfio_intx_enable_kvm(VFIOPCIDevice
*vdev
, Error
**errp
)
112 struct kvm_irqfd irqfd
= {
113 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
114 .gsi
= vdev
->intx
.route
.irq
,
115 .flags
= KVM_IRQFD_FLAG_RESAMPLE
,
119 if (vdev
->no_kvm_intx
|| !kvm_irqfds_enabled() ||
120 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
121 !kvm_resamplefds_enabled()) {
125 /* Get to a known interrupt state */
126 qemu_set_fd_handler(irqfd
.fd
, NULL
, NULL
, vdev
);
127 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
128 vdev
->intx
.pending
= false;
129 pci_irq_deassert(&vdev
->pdev
);
131 /* Get an eventfd for resample/unmask */
132 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
133 error_setg(errp
, "event_notifier_init failed eoi");
137 /* KVM triggers it, VFIO listens for it */
138 irqfd
.resamplefd
= event_notifier_get_fd(&vdev
->intx
.unmask
);
140 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
141 error_setg_errno(errp
, errno
, "failed to setup resample irqfd");
145 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
, 0,
146 VFIO_IRQ_SET_ACTION_UNMASK
,
147 irqfd
.resamplefd
, &err
)) {
148 error_propagate(errp
, err
);
153 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
155 vdev
->intx
.kvm_accel
= true;
157 trace_vfio_intx_enable_kvm(vdev
->vbasedev
.name
);
162 irqfd
.flags
= KVM_IRQFD_FLAG_DEASSIGN
;
163 kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
);
165 event_notifier_cleanup(&vdev
->intx
.unmask
);
167 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
168 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
172 static void vfio_intx_disable_kvm(VFIOPCIDevice
*vdev
)
175 struct kvm_irqfd irqfd
= {
176 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
177 .gsi
= vdev
->intx
.route
.irq
,
178 .flags
= KVM_IRQFD_FLAG_DEASSIGN
,
181 if (!vdev
->intx
.kvm_accel
) {
186 * Get to a known state, hardware masked, QEMU ready to accept new
187 * interrupts, QEMU IRQ de-asserted.
189 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
190 vdev
->intx
.pending
= false;
191 pci_irq_deassert(&vdev
->pdev
);
193 /* Tell KVM to stop listening for an INTx irqfd */
194 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
195 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
198 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
199 event_notifier_cleanup(&vdev
->intx
.unmask
);
201 /* QEMU starts listening for interrupt events. */
202 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
204 vdev
->intx
.kvm_accel
= false;
206 /* If we've missed an event, let it re-fire through QEMU */
207 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
209 trace_vfio_intx_disable_kvm(vdev
->vbasedev
.name
);
213 static void vfio_intx_update(PCIDevice
*pdev
)
215 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
219 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
223 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
225 if (!pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
226 return; /* Nothing changed */
229 trace_vfio_intx_update(vdev
->vbasedev
.name
,
230 vdev
->intx
.route
.irq
, route
.irq
);
232 vfio_intx_disable_kvm(vdev
);
234 vdev
->intx
.route
= route
;
236 if (route
.mode
!= PCI_INTX_ENABLED
) {
240 vfio_intx_enable_kvm(vdev
, &err
);
242 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
245 /* Re-enable the interrupt in cased we missed an EOI */
246 vfio_intx_eoi(&vdev
->vbasedev
);
249 static int vfio_intx_enable(VFIOPCIDevice
*vdev
, Error
**errp
)
251 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
261 vfio_disable_interrupts(vdev
);
263 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
264 pci_config_set_interrupt_pin(vdev
->pdev
.config
, pin
);
268 * Only conditional to avoid generating error messages on platforms
269 * where we won't actually use the result anyway.
271 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
272 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
277 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
279 error_setg_errno(errp
, -ret
, "event_notifier_init failed");
282 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
283 qemu_set_fd_handler(fd
, vfio_intx_interrupt
, NULL
, vdev
);
285 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
, 0,
286 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
287 error_propagate(errp
, err
);
288 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
289 event_notifier_cleanup(&vdev
->intx
.interrupt
);
293 vfio_intx_enable_kvm(vdev
, &err
);
295 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
298 vdev
->interrupt
= VFIO_INT_INTx
;
300 trace_vfio_intx_enable(vdev
->vbasedev
.name
);
304 static void vfio_intx_disable(VFIOPCIDevice
*vdev
)
308 timer_del(vdev
->intx
.mmap_timer
);
309 vfio_intx_disable_kvm(vdev
);
310 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
311 vdev
->intx
.pending
= false;
312 pci_irq_deassert(&vdev
->pdev
);
313 vfio_mmap_set_enabled(vdev
, true);
315 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
316 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
317 event_notifier_cleanup(&vdev
->intx
.interrupt
);
319 vdev
->interrupt
= VFIO_INT_NONE
;
321 trace_vfio_intx_disable(vdev
->vbasedev
.name
);
327 static void vfio_msi_interrupt(void *opaque
)
329 VFIOMSIVector
*vector
= opaque
;
330 VFIOPCIDevice
*vdev
= vector
->vdev
;
331 MSIMessage (*get_msg
)(PCIDevice
*dev
, unsigned vector
);
332 void (*notify
)(PCIDevice
*dev
, unsigned vector
);
334 int nr
= vector
- vdev
->msi_vectors
;
336 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
340 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
341 get_msg
= msix_get_message
;
342 notify
= msix_notify
;
344 /* A masked vector firing needs to use the PBA, enable it */
345 if (msix_is_masked(&vdev
->pdev
, nr
)) {
346 set_bit(nr
, vdev
->msix
->pending
);
347 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, true);
348 trace_vfio_msix_pba_enable(vdev
->vbasedev
.name
);
350 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
351 get_msg
= msi_get_message
;
357 msg
= get_msg(&vdev
->pdev
, nr
);
358 trace_vfio_msi_interrupt(vdev
->vbasedev
.name
, nr
, msg
.address
, msg
.data
);
359 notify(&vdev
->pdev
, nr
);
362 static int vfio_enable_vectors(VFIOPCIDevice
*vdev
, bool msix
)
364 struct vfio_irq_set
*irq_set
;
365 int ret
= 0, i
, argsz
;
368 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
370 irq_set
= g_malloc0(argsz
);
371 irq_set
->argsz
= argsz
;
372 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
373 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
375 irq_set
->count
= vdev
->nr_vectors
;
376 fds
= (int32_t *)&irq_set
->data
;
378 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
382 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
383 * bits, therefore we always use the KVM signaling path when setup.
384 * MSI-X mask and pending bits are emulated, so we want to use the
385 * KVM signaling path only when configured and unmasked.
387 if (vdev
->msi_vectors
[i
].use
) {
388 if (vdev
->msi_vectors
[i
].virq
< 0 ||
389 (msix
&& msix_is_masked(&vdev
->pdev
, i
))) {
390 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
392 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].kvm_interrupt
);
399 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
406 static void vfio_add_kvm_msi_virq(VFIOPCIDevice
*vdev
, VFIOMSIVector
*vector
,
407 int vector_n
, bool msix
)
411 if ((msix
&& vdev
->no_kvm_msix
) || (!msix
&& vdev
->no_kvm_msi
)) {
415 if (event_notifier_init(&vector
->kvm_interrupt
, 0)) {
419 virq
= kvm_irqchip_add_msi_route(kvm_state
, vector_n
, &vdev
->pdev
);
421 event_notifier_cleanup(&vector
->kvm_interrupt
);
425 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, &vector
->kvm_interrupt
,
427 kvm_irqchip_release_virq(kvm_state
, virq
);
428 event_notifier_cleanup(&vector
->kvm_interrupt
);
435 static void vfio_remove_kvm_msi_virq(VFIOMSIVector
*vector
)
437 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vector
->kvm_interrupt
,
439 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
441 event_notifier_cleanup(&vector
->kvm_interrupt
);
444 static void vfio_update_kvm_msi_virq(VFIOMSIVector
*vector
, MSIMessage msg
,
447 kvm_irqchip_update_msi_route(kvm_state
, vector
->virq
, msg
, pdev
);
448 kvm_irqchip_commit_routes(kvm_state
);
451 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
452 MSIMessage
*msg
, IOHandler
*handler
)
454 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
455 VFIOMSIVector
*vector
;
458 trace_vfio_msix_vector_do_use(vdev
->vbasedev
.name
, nr
);
460 vector
= &vdev
->msi_vectors
[nr
];
465 if (event_notifier_init(&vector
->interrupt
, 0)) {
466 error_report("vfio: Error: event_notifier_init failed");
469 msix_vector_use(pdev
, nr
);
472 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
473 handler
, NULL
, vector
);
476 * Attempt to enable route through KVM irqchip,
477 * default to userspace handling if unavailable.
479 if (vector
->virq
>= 0) {
481 vfio_remove_kvm_msi_virq(vector
);
483 vfio_update_kvm_msi_virq(vector
, *msg
, pdev
);
487 vfio_add_kvm_msi_virq(vdev
, vector
, nr
, true);
492 * We don't want to have the host allocate all possible MSI vectors
493 * for a device if they're not in use, so we shutdown and incrementally
494 * increase them as needed.
496 if (vdev
->nr_vectors
< nr
+ 1) {
497 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
498 vdev
->nr_vectors
= nr
+ 1;
499 ret
= vfio_enable_vectors(vdev
, true);
501 error_report("vfio: failed to enable vectors, %d", ret
);
507 if (vector
->virq
>= 0) {
508 fd
= event_notifier_get_fd(&vector
->kvm_interrupt
);
510 fd
= event_notifier_get_fd(&vector
->interrupt
);
513 if (vfio_set_irq_signaling(&vdev
->vbasedev
,
514 VFIO_PCI_MSIX_IRQ_INDEX
, nr
,
515 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
516 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
520 /* Disable PBA emulation when nothing more is pending. */
521 clear_bit(nr
, vdev
->msix
->pending
);
522 if (find_first_bit(vdev
->msix
->pending
,
523 vdev
->nr_vectors
) == vdev
->nr_vectors
) {
524 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, false);
525 trace_vfio_msix_pba_disable(vdev
->vbasedev
.name
);
531 static int vfio_msix_vector_use(PCIDevice
*pdev
,
532 unsigned int nr
, MSIMessage msg
)
534 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
537 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
539 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
540 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
542 trace_vfio_msix_vector_release(vdev
->vbasedev
.name
, nr
);
545 * There are still old guests that mask and unmask vectors on every
546 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
547 * the KVM setup in place, simply switch VFIO to use the non-bypass
548 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
549 * core will mask the interrupt and set pending bits, allowing it to
550 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
552 if (vector
->virq
>= 0) {
553 int32_t fd
= event_notifier_get_fd(&vector
->interrupt
);
556 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
, nr
,
557 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
558 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
563 static void vfio_msix_enable(VFIOPCIDevice
*vdev
)
565 vfio_disable_interrupts(vdev
);
567 vdev
->msi_vectors
= g_new0(VFIOMSIVector
, vdev
->msix
->entries
);
569 vdev
->interrupt
= VFIO_INT_MSIX
;
572 * Some communication channels between VF & PF or PF & fw rely on the
573 * physical state of the device and expect that enabling MSI-X from the
574 * guest enables the same on the host. When our guest is Linux, the
575 * guest driver call to pci_enable_msix() sets the enabling bit in the
576 * MSI-X capability, but leaves the vector table masked. We therefore
577 * can't rely on a vector_use callback (from request_irq() in the guest)
578 * to switch the physical device into MSI-X mode because that may come a
579 * long time after pci_enable_msix(). This code enables vector 0 with
580 * triggering to userspace, then immediately release the vector, leaving
581 * the physical device with no vectors enabled, but MSI-X enabled, just
582 * like the guest view.
584 vfio_msix_vector_do_use(&vdev
->pdev
, 0, NULL
, NULL
);
585 vfio_msix_vector_release(&vdev
->pdev
, 0);
587 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
588 vfio_msix_vector_release
, NULL
)) {
589 error_report("vfio: msix_set_vector_notifiers failed");
592 trace_vfio_msix_enable(vdev
->vbasedev
.name
);
595 static void vfio_msi_enable(VFIOPCIDevice
*vdev
)
599 vfio_disable_interrupts(vdev
);
601 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
603 vdev
->msi_vectors
= g_new0(VFIOMSIVector
, vdev
->nr_vectors
);
605 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
606 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
612 if (event_notifier_init(&vector
->interrupt
, 0)) {
613 error_report("vfio: Error: event_notifier_init failed");
616 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
617 vfio_msi_interrupt
, NULL
, vector
);
620 * Attempt to enable route through KVM irqchip,
621 * default to userspace handling if unavailable.
623 vfio_add_kvm_msi_virq(vdev
, vector
, i
, false);
626 /* Set interrupt type prior to possible interrupts */
627 vdev
->interrupt
= VFIO_INT_MSI
;
629 ret
= vfio_enable_vectors(vdev
, false);
632 error_report("vfio: Error: Failed to setup MSI fds: %m");
633 } else if (ret
!= vdev
->nr_vectors
) {
634 error_report("vfio: Error: Failed to enable %d "
635 "MSI vectors, retry with %d", vdev
->nr_vectors
, ret
);
638 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
639 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
640 if (vector
->virq
>= 0) {
641 vfio_remove_kvm_msi_virq(vector
);
643 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
645 event_notifier_cleanup(&vector
->interrupt
);
648 g_free(vdev
->msi_vectors
);
650 if (ret
> 0 && ret
!= vdev
->nr_vectors
) {
651 vdev
->nr_vectors
= ret
;
654 vdev
->nr_vectors
= 0;
657 * Failing to setup MSI doesn't really fall within any specification.
658 * Let's try leaving interrupts disabled and hope the guest figures
659 * out to fall back to INTx for this device.
661 error_report("vfio: Error: Failed to enable MSI");
662 vdev
->interrupt
= VFIO_INT_NONE
;
667 trace_vfio_msi_enable(vdev
->vbasedev
.name
, vdev
->nr_vectors
);
670 static void vfio_msi_disable_common(VFIOPCIDevice
*vdev
)
675 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
676 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
677 if (vdev
->msi_vectors
[i
].use
) {
678 if (vector
->virq
>= 0) {
679 vfio_remove_kvm_msi_virq(vector
);
681 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
683 event_notifier_cleanup(&vector
->interrupt
);
687 g_free(vdev
->msi_vectors
);
688 vdev
->msi_vectors
= NULL
;
689 vdev
->nr_vectors
= 0;
690 vdev
->interrupt
= VFIO_INT_NONE
;
692 vfio_intx_enable(vdev
, &err
);
694 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
698 static void vfio_msix_disable(VFIOPCIDevice
*vdev
)
702 msix_unset_vector_notifiers(&vdev
->pdev
);
705 * MSI-X will only release vectors if MSI-X is still enabled on the
706 * device, check through the rest and release it ourselves if necessary.
708 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
709 if (vdev
->msi_vectors
[i
].use
) {
710 vfio_msix_vector_release(&vdev
->pdev
, i
);
711 msix_vector_unuse(&vdev
->pdev
, i
);
715 if (vdev
->nr_vectors
) {
716 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
719 vfio_msi_disable_common(vdev
);
721 memset(vdev
->msix
->pending
, 0,
722 BITS_TO_LONGS(vdev
->msix
->entries
) * sizeof(unsigned long));
724 trace_vfio_msix_disable(vdev
->vbasedev
.name
);
727 static void vfio_msi_disable(VFIOPCIDevice
*vdev
)
729 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSI_IRQ_INDEX
);
730 vfio_msi_disable_common(vdev
);
732 trace_vfio_msi_disable(vdev
->vbasedev
.name
);
735 static void vfio_update_msi(VFIOPCIDevice
*vdev
)
739 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
740 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
743 if (!vector
->use
|| vector
->virq
< 0) {
747 msg
= msi_get_message(&vdev
->pdev
, i
);
748 vfio_update_kvm_msi_virq(vector
, msg
, &vdev
->pdev
);
752 static void vfio_pci_load_rom(VFIOPCIDevice
*vdev
)
754 struct vfio_region_info
*reg_info
;
759 if (vfio_get_region_info(&vdev
->vbasedev
,
760 VFIO_PCI_ROM_REGION_INDEX
, ®_info
)) {
761 error_report("vfio: Error getting ROM info: %m");
765 trace_vfio_pci_load_rom(vdev
->vbasedev
.name
, (unsigned long)reg_info
->size
,
766 (unsigned long)reg_info
->offset
,
767 (unsigned long)reg_info
->flags
);
769 vdev
->rom_size
= size
= reg_info
->size
;
770 vdev
->rom_offset
= reg_info
->offset
;
774 if (!vdev
->rom_size
) {
775 vdev
->rom_read_failed
= true;
776 error_report("vfio-pci: Cannot read device rom at "
777 "%s", vdev
->vbasedev
.name
);
778 error_printf("Device option ROM contents are probably invalid "
779 "(check dmesg).\nSkip option ROM probe with rombar=0, "
780 "or load from file with romfile=\n");
784 vdev
->rom
= g_malloc(size
);
785 memset(vdev
->rom
, 0xff, size
);
788 bytes
= pread(vdev
->vbasedev
.fd
, vdev
->rom
+ off
,
789 size
, vdev
->rom_offset
+ off
);
792 } else if (bytes
> 0) {
796 if (errno
== EINTR
|| errno
== EAGAIN
) {
799 error_report("vfio: Error reading device ROM: %m");
805 * Test the ROM signature against our device, if the vendor is correct
806 * but the device ID doesn't match, store the correct device ID and
807 * recompute the checksum. Intel IGD devices need this and are known
808 * to have bogus checksums so we can't simply adjust the checksum.
810 if (pci_get_word(vdev
->rom
) == 0xaa55 &&
811 pci_get_word(vdev
->rom
+ 0x18) + 8 < vdev
->rom_size
&&
812 !memcmp(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18), "PCIR", 4)) {
815 vid
= pci_get_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 4);
816 did
= pci_get_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 6);
818 if (vid
== vdev
->vendor_id
&& did
!= vdev
->device_id
) {
820 uint8_t csum
, *data
= vdev
->rom
;
822 pci_set_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 6,
826 for (csum
= 0, i
= 0; i
< vdev
->rom_size
; i
++) {
835 static uint64_t vfio_rom_read(void *opaque
, hwaddr addr
, unsigned size
)
837 VFIOPCIDevice
*vdev
= opaque
;
846 /* Load the ROM lazily when the guest tries to read it */
847 if (unlikely(!vdev
->rom
&& !vdev
->rom_read_failed
)) {
848 vfio_pci_load_rom(vdev
);
851 memcpy(&val
, vdev
->rom
+ addr
,
852 (addr
< vdev
->rom_size
) ? MIN(size
, vdev
->rom_size
- addr
) : 0);
859 data
= le16_to_cpu(val
.word
);
862 data
= le32_to_cpu(val
.dword
);
865 hw_error("vfio: unsupported read size, %d bytes\n", size
);
869 trace_vfio_rom_read(vdev
->vbasedev
.name
, addr
, size
, data
);
874 static void vfio_rom_write(void *opaque
, hwaddr addr
,
875 uint64_t data
, unsigned size
)
879 static const MemoryRegionOps vfio_rom_ops
= {
880 .read
= vfio_rom_read
,
881 .write
= vfio_rom_write
,
882 .endianness
= DEVICE_LITTLE_ENDIAN
,
885 static void vfio_pci_size_rom(VFIOPCIDevice
*vdev
)
887 uint32_t orig
, size
= cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK
);
888 off_t offset
= vdev
->config_offset
+ PCI_ROM_ADDRESS
;
889 DeviceState
*dev
= DEVICE(vdev
);
891 int fd
= vdev
->vbasedev
.fd
;
893 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
) {
894 /* Since pci handles romfile, just print a message and return */
895 if (vfio_blacklist_opt_rom(vdev
) && vdev
->pdev
.romfile
) {
896 warn_report("Device at %s is known to cause system instability"
897 " issues during option rom execution",
898 vdev
->vbasedev
.name
);
899 error_printf("Proceeding anyway since user specified romfile\n");
905 * Use the same size ROM BAR as the physical device. The contents
906 * will get filled in later when the guest tries to read it.
908 if (pread(fd
, &orig
, 4, offset
) != 4 ||
909 pwrite(fd
, &size
, 4, offset
) != 4 ||
910 pread(fd
, &size
, 4, offset
) != 4 ||
911 pwrite(fd
, &orig
, 4, offset
) != 4) {
912 error_report("%s(%s) failed: %m", __func__
, vdev
->vbasedev
.name
);
916 size
= ~(le32_to_cpu(size
) & PCI_ROM_ADDRESS_MASK
) + 1;
922 if (vfio_blacklist_opt_rom(vdev
)) {
923 if (dev
->opts
&& qemu_opt_get(dev
->opts
, "rombar")) {
924 warn_report("Device at %s is known to cause system instability"
925 " issues during option rom execution",
926 vdev
->vbasedev
.name
);
927 error_printf("Proceeding anyway since user specified"
928 " non zero value for rombar\n");
930 warn_report("Rom loading for device at %s has been disabled"
931 " due to system instability issues",
932 vdev
->vbasedev
.name
);
933 error_printf("Specify rombar=1 or romfile to force\n");
938 trace_vfio_pci_size_rom(vdev
->vbasedev
.name
, size
);
940 name
= g_strdup_printf("vfio[%s].rom", vdev
->vbasedev
.name
);
942 memory_region_init_io(&vdev
->pdev
.rom
, OBJECT(vdev
),
943 &vfio_rom_ops
, vdev
, name
, size
);
946 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
,
947 PCI_BASE_ADDRESS_SPACE_MEMORY
, &vdev
->pdev
.rom
);
949 vdev
->rom_read_failed
= false;
952 void vfio_vga_write(void *opaque
, hwaddr addr
,
953 uint64_t data
, unsigned size
)
955 VFIOVGARegion
*region
= opaque
;
956 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
963 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
970 buf
.word
= cpu_to_le16(data
);
973 buf
.dword
= cpu_to_le32(data
);
976 hw_error("vfio: unsupported write size, %d bytes", size
);
980 if (pwrite(vga
->fd
, &buf
, size
, offset
) != size
) {
981 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
982 __func__
, region
->offset
+ addr
, data
, size
);
985 trace_vfio_vga_write(region
->offset
+ addr
, data
, size
);
988 uint64_t vfio_vga_read(void *opaque
, hwaddr addr
, unsigned size
)
990 VFIOVGARegion
*region
= opaque
;
991 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
999 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1001 if (pread(vga
->fd
, &buf
, size
, offset
) != size
) {
1002 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1003 __func__
, region
->offset
+ addr
, size
);
1004 return (uint64_t)-1;
1012 data
= le16_to_cpu(buf
.word
);
1015 data
= le32_to_cpu(buf
.dword
);
1018 hw_error("vfio: unsupported read size, %d bytes", size
);
1022 trace_vfio_vga_read(region
->offset
+ addr
, size
, data
);
1027 static const MemoryRegionOps vfio_vga_ops
= {
1028 .read
= vfio_vga_read
,
1029 .write
= vfio_vga_write
,
1030 .endianness
= DEVICE_LITTLE_ENDIAN
,
1034 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1035 * size if the BAR is in an exclusive page in host so that we could map
1036 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1037 * page in guest. So we should set the priority of the expanded memory
1038 * region to zero in case of overlap with BARs which share the same page
1039 * with the sub-page BAR in guest. Besides, we should also recover the
1040 * size of this sub-page BAR when its base address is changed in guest
1041 * and not page aligned any more.
1043 static void vfio_sub_page_bar_update_mapping(PCIDevice
*pdev
, int bar
)
1045 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
1046 VFIORegion
*region
= &vdev
->bars
[bar
].region
;
1047 MemoryRegion
*mmap_mr
, *region_mr
, *base_mr
;
1050 uint64_t size
= region
->size
;
1052 /* Make sure that the whole region is allowed to be mmapped */
1053 if (region
->nr_mmaps
!= 1 || !region
->mmaps
[0].mmap
||
1054 region
->mmaps
[0].size
!= region
->size
) {
1058 r
= &pdev
->io_regions
[bar
];
1060 base_mr
= vdev
->bars
[bar
].mr
;
1061 region_mr
= region
->mem
;
1062 mmap_mr
= ®ion
->mmaps
[0].mem
;
1064 /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1065 if (bar_addr
!= PCI_BAR_UNMAPPED
&&
1066 !(bar_addr
& ~qemu_real_host_page_mask
)) {
1067 size
= qemu_real_host_page_size
;
1070 memory_region_transaction_begin();
1072 if (vdev
->bars
[bar
].size
< size
) {
1073 memory_region_set_size(base_mr
, size
);
1075 memory_region_set_size(region_mr
, size
);
1076 memory_region_set_size(mmap_mr
, size
);
1077 if (size
!= vdev
->bars
[bar
].size
&& memory_region_is_mapped(base_mr
)) {
1078 memory_region_del_subregion(r
->address_space
, base_mr
);
1079 memory_region_add_subregion_overlap(r
->address_space
,
1080 bar_addr
, base_mr
, 0);
1083 memory_region_transaction_commit();
1089 uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
1091 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
1092 uint32_t emu_bits
= 0, emu_val
= 0, phys_val
= 0, val
;
1094 memcpy(&emu_bits
, vdev
->emulated_config_bits
+ addr
, len
);
1095 emu_bits
= le32_to_cpu(emu_bits
);
1098 emu_val
= pci_default_read_config(pdev
, addr
, len
);
1101 if (~emu_bits
& (0xffffffffU
>> (32 - len
* 8))) {
1104 ret
= pread(vdev
->vbasedev
.fd
, &phys_val
, len
,
1105 vdev
->config_offset
+ addr
);
1107 error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1108 __func__
, vdev
->vbasedev
.name
, addr
, len
);
1111 phys_val
= le32_to_cpu(phys_val
);
1114 val
= (emu_val
& emu_bits
) | (phys_val
& ~emu_bits
);
1116 trace_vfio_pci_read_config(vdev
->vbasedev
.name
, addr
, len
, val
);
1121 void vfio_pci_write_config(PCIDevice
*pdev
,
1122 uint32_t addr
, uint32_t val
, int len
)
1124 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
1125 uint32_t val_le
= cpu_to_le32(val
);
1127 trace_vfio_pci_write_config(vdev
->vbasedev
.name
, addr
, val
, len
);
1129 /* Write everything to VFIO, let it filter out what we can't write */
1130 if (pwrite(vdev
->vbasedev
.fd
, &val_le
, len
, vdev
->config_offset
+ addr
)
1132 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1133 __func__
, vdev
->vbasedev
.name
, addr
, val
, len
);
1136 /* MSI/MSI-X Enabling/Disabling */
1137 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
1138 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
1139 int is_enabled
, was_enabled
= msi_enabled(pdev
);
1141 pci_default_write_config(pdev
, addr
, val
, len
);
1143 is_enabled
= msi_enabled(pdev
);
1147 vfio_msi_enable(vdev
);
1151 vfio_msi_disable(vdev
);
1153 vfio_update_msi(vdev
);
1156 } else if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
1157 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
1158 int is_enabled
, was_enabled
= msix_enabled(pdev
);
1160 pci_default_write_config(pdev
, addr
, val
, len
);
1162 is_enabled
= msix_enabled(pdev
);
1164 if (!was_enabled
&& is_enabled
) {
1165 vfio_msix_enable(vdev
);
1166 } else if (was_enabled
&& !is_enabled
) {
1167 vfio_msix_disable(vdev
);
1169 } else if (ranges_overlap(addr
, len
, PCI_BASE_ADDRESS_0
, 24) ||
1170 range_covers_byte(addr
, len
, PCI_COMMAND
)) {
1171 pcibus_t old_addr
[PCI_NUM_REGIONS
- 1];
1174 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
1175 old_addr
[bar
] = pdev
->io_regions
[bar
].addr
;
1178 pci_default_write_config(pdev
, addr
, val
, len
);
1180 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
1181 if (old_addr
[bar
] != pdev
->io_regions
[bar
].addr
&&
1182 vdev
->bars
[bar
].region
.size
> 0 &&
1183 vdev
->bars
[bar
].region
.size
< qemu_real_host_page_size
) {
1184 vfio_sub_page_bar_update_mapping(pdev
, bar
);
1188 /* Write everything to QEMU to keep emulated bits correct */
1189 pci_default_write_config(pdev
, addr
, val
, len
);
1196 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
)
1199 * More complicated than it looks. Disabling MSI/X transitions the
1200 * device to INTx mode (if supported). Therefore we need to first
1201 * disable MSI/X and then cleanup by disabling INTx.
1203 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
1204 vfio_msix_disable(vdev
);
1205 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
1206 vfio_msi_disable(vdev
);
1209 if (vdev
->interrupt
== VFIO_INT_INTx
) {
1210 vfio_intx_disable(vdev
);
1214 static int vfio_msi_setup(VFIOPCIDevice
*vdev
, int pos
, Error
**errp
)
1217 bool msi_64bit
, msi_maskbit
;
1221 if (pread(vdev
->vbasedev
.fd
, &ctrl
, sizeof(ctrl
),
1222 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
1223 error_setg_errno(errp
, errno
, "failed reading MSI PCI_CAP_FLAGS");
1226 ctrl
= le16_to_cpu(ctrl
);
1228 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
1229 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
1230 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
1232 trace_vfio_msi_setup(vdev
->vbasedev
.name
, pos
);
1234 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
, &err
);
1236 if (ret
== -ENOTSUP
) {
1239 error_propagate_prepend(errp
, err
, "msi_init failed: ");
1242 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
1247 static void vfio_pci_fixup_msix_region(VFIOPCIDevice
*vdev
)
1250 VFIORegion
*region
= &vdev
->bars
[vdev
->msix
->table_bar
].region
;
1253 * If the host driver allows mapping of a MSIX data, we are going to
1254 * do map the entire BAR and emulate MSIX table on top of that.
1256 if (vfio_has_region_cap(&vdev
->vbasedev
, region
->nr
,
1257 VFIO_REGION_INFO_CAP_MSIX_MAPPABLE
)) {
1262 * We expect to find a single mmap covering the whole BAR, anything else
1263 * means it's either unsupported or already setup.
1265 if (region
->nr_mmaps
!= 1 || region
->mmaps
[0].offset
||
1266 region
->size
!= region
->mmaps
[0].size
) {
1270 /* MSI-X table start and end aligned to host page size */
1271 start
= vdev
->msix
->table_offset
& qemu_real_host_page_mask
;
1272 end
= REAL_HOST_PAGE_ALIGN((uint64_t)vdev
->msix
->table_offset
+
1273 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
1276 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1277 * NB - Host page size is necessarily a power of two and so is the PCI
1278 * BAR (not counting EA yet), therefore if we have host page aligned
1279 * @start and @end, then any remainder of the BAR before or after those
1280 * must be at least host page sized and therefore mmap'able.
1283 if (end
>= region
->size
) {
1284 region
->nr_mmaps
= 0;
1285 g_free(region
->mmaps
);
1286 region
->mmaps
= NULL
;
1287 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1288 vdev
->msix
->table_bar
, 0, 0);
1290 region
->mmaps
[0].offset
= end
;
1291 region
->mmaps
[0].size
= region
->size
- end
;
1292 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1293 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1294 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1297 /* Maybe it's aligned at the end of the BAR */
1298 } else if (end
>= region
->size
) {
1299 region
->mmaps
[0].size
= start
;
1300 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1301 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1302 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1304 /* Otherwise it must split the BAR */
1306 region
->nr_mmaps
= 2;
1307 region
->mmaps
= g_renew(VFIOMmap
, region
->mmaps
, 2);
1309 memcpy(®ion
->mmaps
[1], ®ion
->mmaps
[0], sizeof(VFIOMmap
));
1311 region
->mmaps
[0].size
= start
;
1312 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1313 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1314 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1316 region
->mmaps
[1].offset
= end
;
1317 region
->mmaps
[1].size
= region
->size
- end
;
1318 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1319 vdev
->msix
->table_bar
, region
->mmaps
[1].offset
,
1320 region
->mmaps
[1].offset
+ region
->mmaps
[1].size
);
1324 static void vfio_pci_relocate_msix(VFIOPCIDevice
*vdev
, Error
**errp
)
1326 int target_bar
= -1;
1329 if (!vdev
->msix
|| vdev
->msix_relo
== OFF_AUTOPCIBAR_OFF
) {
1333 /* The actual minimum size of MSI-X structures */
1334 msix_sz
= (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
) +
1335 (QEMU_ALIGN_UP(vdev
->msix
->entries
, 64) / 8);
1336 /* Round up to host pages, we don't want to share a page */
1337 msix_sz
= REAL_HOST_PAGE_ALIGN(msix_sz
);
1338 /* PCI BARs must be a power of 2 */
1339 msix_sz
= pow2ceil(msix_sz
);
1341 if (vdev
->msix_relo
== OFF_AUTOPCIBAR_AUTO
) {
1343 * TODO: Lookup table for known devices.
1345 * Logically we might use an algorithm here to select the BAR adding
1346 * the least additional MMIO space, but we cannot programatically
1347 * predict the driver dependency on BAR ordering or sizing, therefore
1348 * 'auto' becomes a lookup for combinations reported to work.
1350 if (target_bar
< 0) {
1351 error_setg(errp
, "No automatic MSI-X relocation available for "
1352 "device %04x:%04x", vdev
->vendor_id
, vdev
->device_id
);
1356 target_bar
= (int)(vdev
->msix_relo
- OFF_AUTOPCIBAR_BAR0
);
1359 /* I/O port BARs cannot host MSI-X structures */
1360 if (vdev
->bars
[target_bar
].ioport
) {
1361 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1362 "I/O port BAR", target_bar
);
1366 /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1367 if (!vdev
->bars
[target_bar
].size
&&
1368 target_bar
> 0 && vdev
->bars
[target_bar
- 1].mem64
) {
1369 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1370 "consumed by 64-bit BAR %d", target_bar
, target_bar
- 1);
1374 /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
1375 if (vdev
->bars
[target_bar
].size
> 1 * GiB
&&
1376 !vdev
->bars
[target_bar
].mem64
) {
1377 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1378 "no space to extend 32-bit BAR", target_bar
);
1383 * If adding a new BAR, test if we can make it 64bit. We make it
1384 * prefetchable since QEMU MSI-X emulation has no read side effects
1385 * and doing so makes mapping more flexible.
1387 if (!vdev
->bars
[target_bar
].size
) {
1388 if (target_bar
< (PCI_ROM_SLOT
- 1) &&
1389 !vdev
->bars
[target_bar
+ 1].size
) {
1390 vdev
->bars
[target_bar
].mem64
= true;
1391 vdev
->bars
[target_bar
].type
= PCI_BASE_ADDRESS_MEM_TYPE_64
;
1393 vdev
->bars
[target_bar
].type
|= PCI_BASE_ADDRESS_MEM_PREFETCH
;
1394 vdev
->bars
[target_bar
].size
= msix_sz
;
1395 vdev
->msix
->table_offset
= 0;
1397 vdev
->bars
[target_bar
].size
= MAX(vdev
->bars
[target_bar
].size
* 2,
1400 * Due to above size calc, MSI-X always starts halfway into the BAR,
1401 * which will always be a separate host page.
1403 vdev
->msix
->table_offset
= vdev
->bars
[target_bar
].size
/ 2;
1406 vdev
->msix
->table_bar
= target_bar
;
1407 vdev
->msix
->pba_bar
= target_bar
;
1408 /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1409 vdev
->msix
->pba_offset
= vdev
->msix
->table_offset
+
1410 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
);
1412 trace_vfio_msix_relo(vdev
->vbasedev
.name
,
1413 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
);
1417 * We don't have any control over how pci_add_capability() inserts
1418 * capabilities into the chain. In order to setup MSI-X we need a
1419 * MemoryRegion for the BAR. In order to setup the BAR and not
1420 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1421 * need to first look for where the MSI-X table lives. So we
1422 * unfortunately split MSI-X setup across two functions.
1424 static void vfio_msix_early_setup(VFIOPCIDevice
*vdev
, Error
**errp
)
1428 uint32_t table
, pba
;
1429 int fd
= vdev
->vbasedev
.fd
;
1432 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
1437 if (pread(fd
, &ctrl
, sizeof(ctrl
),
1438 vdev
->config_offset
+ pos
+ PCI_MSIX_FLAGS
) != sizeof(ctrl
)) {
1439 error_setg_errno(errp
, errno
, "failed to read PCI MSIX FLAGS");
1443 if (pread(fd
, &table
, sizeof(table
),
1444 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
1445 error_setg_errno(errp
, errno
, "failed to read PCI MSIX TABLE");
1449 if (pread(fd
, &pba
, sizeof(pba
),
1450 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
1451 error_setg_errno(errp
, errno
, "failed to read PCI MSIX PBA");
1455 ctrl
= le16_to_cpu(ctrl
);
1456 table
= le32_to_cpu(table
);
1457 pba
= le32_to_cpu(pba
);
1459 msix
= g_malloc0(sizeof(*msix
));
1460 msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
1461 msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
1462 msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
1463 msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
1464 msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
1467 * Test the size of the pba_offset variable and catch if it extends outside
1468 * of the specified BAR. If it is the case, we need to apply a hardware
1469 * specific quirk if the device is known or we have a broken configuration.
1471 if (msix
->pba_offset
>= vdev
->bars
[msix
->pba_bar
].region
.size
) {
1473 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1474 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1475 * the VF PBA offset while the BAR itself is only 8k. The correct value
1476 * is 0x1000, so we hard code that here.
1478 if (vdev
->vendor_id
== PCI_VENDOR_ID_CHELSIO
&&
1479 (vdev
->device_id
& 0xff00) == 0x5800) {
1480 msix
->pba_offset
= 0x1000;
1481 } else if (vdev
->msix_relo
== OFF_AUTOPCIBAR_OFF
) {
1482 error_setg(errp
, "hardware reports invalid configuration, "
1483 "MSIX PBA outside of specified BAR");
1489 trace_vfio_msix_early_setup(vdev
->vbasedev
.name
, pos
, msix
->table_bar
,
1490 msix
->table_offset
, msix
->entries
);
1493 vfio_pci_fixup_msix_region(vdev
);
1495 vfio_pci_relocate_msix(vdev
, errp
);
1498 static int vfio_msix_setup(VFIOPCIDevice
*vdev
, int pos
, Error
**errp
)
1503 vdev
->msix
->pending
= g_malloc0(BITS_TO_LONGS(vdev
->msix
->entries
) *
1504 sizeof(unsigned long));
1505 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
1506 vdev
->bars
[vdev
->msix
->table_bar
].mr
,
1507 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
1508 vdev
->bars
[vdev
->msix
->pba_bar
].mr
,
1509 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
,
1512 if (ret
== -ENOTSUP
) {
1513 warn_report_err(err
);
1517 error_propagate(errp
, err
);
1522 * The PCI spec suggests that devices provide additional alignment for
1523 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1524 * For an assigned device, this hopefully means that emulation of MSI-X
1525 * structures does not affect the performance of the device. If devices
1526 * fail to provide that alignment, a significant performance penalty may
1527 * result, for instance Mellanox MT27500 VFs:
1528 * http://www.spinics.net/lists/kvm/msg125881.html
1530 * The PBA is simply not that important for such a serious regression and
1531 * most drivers do not appear to look at it. The solution for this is to
1532 * disable the PBA MemoryRegion unless it's being used. We disable it
1533 * here and only enable it if a masked vector fires through QEMU. As the
1534 * vector-use notifier is called, which occurs on unmask, we test whether
1535 * PBA emulation is needed and again disable if not.
1537 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, false);
1540 * The emulated machine may provide a paravirt interface for MSIX setup
1541 * so it is not strictly necessary to emulate MSIX here. This becomes
1542 * helpful when frequently accessed MMIO registers are located in
1543 * subpages adjacent to the MSIX table but the MSIX data containing page
1544 * cannot be mapped because of a host page size bigger than the MSIX table
1547 if (object_property_get_bool(OBJECT(qdev_get_machine()),
1548 "vfio-no-msix-emulation", NULL
)) {
1549 memory_region_set_enabled(&vdev
->pdev
.msix_table_mmio
, false);
1555 static void vfio_teardown_msi(VFIOPCIDevice
*vdev
)
1557 msi_uninit(&vdev
->pdev
);
1560 msix_uninit(&vdev
->pdev
,
1561 vdev
->bars
[vdev
->msix
->table_bar
].mr
,
1562 vdev
->bars
[vdev
->msix
->pba_bar
].mr
);
1563 g_free(vdev
->msix
->pending
);
1570 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
)
1574 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1575 vfio_region_mmaps_set_enabled(&vdev
->bars
[i
].region
, enabled
);
1579 static void vfio_bar_prepare(VFIOPCIDevice
*vdev
, int nr
)
1581 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1586 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1587 if (!bar
->region
.size
) {
1591 /* Determine what type of BAR this is for registration */
1592 ret
= pread(vdev
->vbasedev
.fd
, &pci_bar
, sizeof(pci_bar
),
1593 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
1594 if (ret
!= sizeof(pci_bar
)) {
1595 error_report("vfio: Failed to read BAR %d (%m)", nr
);
1599 pci_bar
= le32_to_cpu(pci_bar
);
1600 bar
->ioport
= (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
);
1601 bar
->mem64
= bar
->ioport
? 0 : (pci_bar
& PCI_BASE_ADDRESS_MEM_TYPE_64
);
1602 bar
->type
= pci_bar
& (bar
->ioport
? ~PCI_BASE_ADDRESS_IO_MASK
:
1603 ~PCI_BASE_ADDRESS_MEM_MASK
);
1604 bar
->size
= bar
->region
.size
;
1607 static void vfio_bars_prepare(VFIOPCIDevice
*vdev
)
1611 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1612 vfio_bar_prepare(vdev
, i
);
1616 static void vfio_bar_register(VFIOPCIDevice
*vdev
, int nr
)
1618 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1625 bar
->mr
= g_new0(MemoryRegion
, 1);
1626 name
= g_strdup_printf("%s base BAR %d", vdev
->vbasedev
.name
, nr
);
1627 memory_region_init_io(bar
->mr
, OBJECT(vdev
), NULL
, NULL
, name
, bar
->size
);
1630 if (bar
->region
.size
) {
1631 memory_region_add_subregion(bar
->mr
, 0, bar
->region
.mem
);
1633 if (vfio_region_mmap(&bar
->region
)) {
1634 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1635 vdev
->vbasedev
.name
, nr
);
1639 pci_register_bar(&vdev
->pdev
, nr
, bar
->type
, bar
->mr
);
1642 static void vfio_bars_register(VFIOPCIDevice
*vdev
)
1646 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1647 vfio_bar_register(vdev
, i
);
1651 static void vfio_bars_exit(VFIOPCIDevice
*vdev
)
1655 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1656 VFIOBAR
*bar
= &vdev
->bars
[i
];
1658 vfio_bar_quirk_exit(vdev
, i
);
1659 vfio_region_exit(&bar
->region
);
1660 if (bar
->region
.size
) {
1661 memory_region_del_subregion(bar
->mr
, bar
->region
.mem
);
1666 pci_unregister_vga(&vdev
->pdev
);
1667 vfio_vga_quirk_exit(vdev
);
1671 static void vfio_bars_finalize(VFIOPCIDevice
*vdev
)
1675 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1676 VFIOBAR
*bar
= &vdev
->bars
[i
];
1678 vfio_bar_quirk_finalize(vdev
, i
);
1679 vfio_region_finalize(&bar
->region
);
1681 object_unparent(OBJECT(bar
->mr
));
1687 vfio_vga_quirk_finalize(vdev
);
1688 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
->region
); i
++) {
1689 object_unparent(OBJECT(&vdev
->vga
->region
[i
].mem
));
1698 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
1701 uint16_t next
= PCI_CONFIG_SPACE_SIZE
;
1703 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
1704 tmp
= pdev
->config
[tmp
+ PCI_CAP_LIST_NEXT
]) {
1705 if (tmp
> pos
&& tmp
< next
) {
1714 static uint16_t vfio_ext_cap_max_size(const uint8_t *config
, uint16_t pos
)
1716 uint16_t tmp
, next
= PCIE_CONFIG_SPACE_SIZE
;
1718 for (tmp
= PCI_CONFIG_SPACE_SIZE
; tmp
;
1719 tmp
= PCI_EXT_CAP_NEXT(pci_get_long(config
+ tmp
))) {
1720 if (tmp
> pos
&& tmp
< next
) {
1728 static void vfio_set_word_bits(uint8_t *buf
, uint16_t val
, uint16_t mask
)
1730 pci_set_word(buf
, (pci_get_word(buf
) & ~mask
) | val
);
1733 static void vfio_add_emulated_word(VFIOPCIDevice
*vdev
, int pos
,
1734 uint16_t val
, uint16_t mask
)
1736 vfio_set_word_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
1737 vfio_set_word_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
1738 vfio_set_word_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
1741 static void vfio_set_long_bits(uint8_t *buf
, uint32_t val
, uint32_t mask
)
1743 pci_set_long(buf
, (pci_get_long(buf
) & ~mask
) | val
);
1746 static void vfio_add_emulated_long(VFIOPCIDevice
*vdev
, int pos
,
1747 uint32_t val
, uint32_t mask
)
1749 vfio_set_long_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
1750 vfio_set_long_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
1751 vfio_set_long_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
1754 static int vfio_setup_pcie_cap(VFIOPCIDevice
*vdev
, int pos
, uint8_t size
,
1760 flags
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_CAP_FLAGS
);
1761 type
= (flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
1763 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
1764 type
!= PCI_EXP_TYPE_LEG_END
&&
1765 type
!= PCI_EXP_TYPE_RC_END
) {
1767 error_setg(errp
, "assignment of PCIe type 0x%x "
1768 "devices is not currently supported", type
);
1772 if (!pci_bus_is_express(pci_get_bus(&vdev
->pdev
))) {
1773 PCIBus
*bus
= pci_get_bus(&vdev
->pdev
);
1777 * Traditionally PCI device assignment exposes the PCIe capability
1778 * as-is on non-express buses. The reason being that some drivers
1779 * simply assume that it's there, for example tg3. However when
1780 * we're running on a native PCIe machine type, like Q35, we need
1781 * to hide the PCIe capability. The reason for this is twofold;
1782 * first Windows guests get a Code 10 error when the PCIe capability
1783 * is exposed in this configuration. Therefore express devices won't
1784 * work at all unless they're attached to express buses in the VM.
1785 * Second, a native PCIe machine introduces the possibility of fine
1786 * granularity IOMMUs supporting both translation and isolation.
1787 * Guest code to discover the IOMMU visibility of a device, such as
1788 * IOMMU grouping code on Linux, is very aware of device types and
1789 * valid transitions between bus types. An express device on a non-
1790 * express bus is not a valid combination on bare metal systems.
1792 * Drivers that require a PCIe capability to make the device
1793 * functional are simply going to need to have their devices placed
1794 * on a PCIe bus in the VM.
1796 while (!pci_bus_is_root(bus
)) {
1797 bridge
= pci_bridge_get_device(bus
);
1798 bus
= pci_get_bus(bridge
);
1801 if (pci_bus_is_express(bus
)) {
1805 } else if (pci_bus_is_root(pci_get_bus(&vdev
->pdev
))) {
1807 * On a Root Complex bus Endpoints become Root Complex Integrated
1808 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1810 if (type
== PCI_EXP_TYPE_ENDPOINT
) {
1811 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1812 PCI_EXP_TYPE_RC_END
<< 4,
1813 PCI_EXP_FLAGS_TYPE
);
1815 /* Link Capabilities, Status, and Control goes away */
1816 if (size
> PCI_EXP_LNKCTL
) {
1817 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
, 0, ~0);
1818 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
1819 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
, 0, ~0);
1821 #ifndef PCI_EXP_LNKCAP2
1822 #define PCI_EXP_LNKCAP2 44
1824 #ifndef PCI_EXP_LNKSTA2
1825 #define PCI_EXP_LNKSTA2 50
1827 /* Link 2 Capabilities, Status, and Control goes away */
1828 if (size
> PCI_EXP_LNKCAP2
) {
1829 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP2
, 0, ~0);
1830 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL2
, 0, ~0);
1831 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA2
, 0, ~0);
1835 } else if (type
== PCI_EXP_TYPE_LEG_END
) {
1837 * Legacy endpoints don't belong on the root complex. Windows
1838 * seems to be happier with devices if we skip the capability.
1845 * Convert Root Complex Integrated Endpoints to regular endpoints.
1846 * These devices don't support LNK/LNK2 capabilities, so make them up.
1848 if (type
== PCI_EXP_TYPE_RC_END
) {
1849 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1850 PCI_EXP_TYPE_ENDPOINT
<< 4,
1851 PCI_EXP_FLAGS_TYPE
);
1852 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
,
1853 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1
) |
1854 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT
), ~0);
1855 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
1860 * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
1861 * (Niantic errate #35) causing Windows to error with a Code 10 for the
1862 * device on Q35. Fixup any such devices to report version 1. If we
1863 * were to remove the capability entirely the guest would lose extended
1866 if ((flags
& PCI_EXP_FLAGS_VERS
) == 0) {
1867 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1868 1, PCI_EXP_FLAGS_VERS
);
1871 pos
= pci_add_capability(&vdev
->pdev
, PCI_CAP_ID_EXP
, pos
, size
,
1877 vdev
->pdev
.exp
.exp_cap
= pos
;
1882 static void vfio_check_pcie_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
1884 uint32_t cap
= pci_get_long(vdev
->pdev
.config
+ pos
+ PCI_EXP_DEVCAP
);
1886 if (cap
& PCI_EXP_DEVCAP_FLR
) {
1887 trace_vfio_check_pcie_flr(vdev
->vbasedev
.name
);
1888 vdev
->has_flr
= true;
1892 static void vfio_check_pm_reset(VFIOPCIDevice
*vdev
, uint8_t pos
)
1894 uint16_t csr
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_PM_CTRL
);
1896 if (!(csr
& PCI_PM_CTRL_NO_SOFT_RESET
)) {
1897 trace_vfio_check_pm_reset(vdev
->vbasedev
.name
);
1898 vdev
->has_pm_reset
= true;
1902 static void vfio_check_af_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
1904 uint8_t cap
= pci_get_byte(vdev
->pdev
.config
+ pos
+ PCI_AF_CAP
);
1906 if ((cap
& PCI_AF_CAP_TP
) && (cap
& PCI_AF_CAP_FLR
)) {
1907 trace_vfio_check_af_flr(vdev
->vbasedev
.name
);
1908 vdev
->has_flr
= true;
1912 static int vfio_add_std_cap(VFIOPCIDevice
*vdev
, uint8_t pos
, Error
**errp
)
1914 PCIDevice
*pdev
= &vdev
->pdev
;
1915 uint8_t cap_id
, next
, size
;
1918 cap_id
= pdev
->config
[pos
];
1919 next
= pdev
->config
[pos
+ PCI_CAP_LIST_NEXT
];
1922 * If it becomes important to configure capabilities to their actual
1923 * size, use this as the default when it's something we don't recognize.
1924 * Since QEMU doesn't actually handle many of the config accesses,
1925 * exact size doesn't seem worthwhile.
1927 size
= vfio_std_cap_max_size(pdev
, pos
);
1930 * pci_add_capability always inserts the new capability at the head
1931 * of the chain. Therefore to end up with a chain that matches the
1932 * physical device, we insert from the end by making this recursive.
1933 * This is also why we pre-calculate size above as cached config space
1934 * will be changed as we unwind the stack.
1937 ret
= vfio_add_std_cap(vdev
, next
, errp
);
1942 /* Begin the rebuild, use QEMU emulated list bits */
1943 pdev
->config
[PCI_CAPABILITY_LIST
] = 0;
1944 vdev
->emulated_config_bits
[PCI_CAPABILITY_LIST
] = 0xff;
1945 vdev
->emulated_config_bits
[PCI_STATUS
] |= PCI_STATUS_CAP_LIST
;
1947 ret
= vfio_add_virt_caps(vdev
, errp
);
1953 /* Scale down size, esp in case virt caps were added above */
1954 size
= MIN(size
, vfio_std_cap_max_size(pdev
, pos
));
1956 /* Use emulated next pointer to allow dropping caps */
1957 pci_set_byte(vdev
->emulated_config_bits
+ pos
+ PCI_CAP_LIST_NEXT
, 0xff);
1960 case PCI_CAP_ID_MSI
:
1961 ret
= vfio_msi_setup(vdev
, pos
, errp
);
1963 case PCI_CAP_ID_EXP
:
1964 vfio_check_pcie_flr(vdev
, pos
);
1965 ret
= vfio_setup_pcie_cap(vdev
, pos
, size
, errp
);
1967 case PCI_CAP_ID_MSIX
:
1968 ret
= vfio_msix_setup(vdev
, pos
, errp
);
1971 vfio_check_pm_reset(vdev
, pos
);
1973 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1976 vfio_check_af_flr(vdev
, pos
);
1977 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1980 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1986 "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
1994 static void vfio_add_ext_cap(VFIOPCIDevice
*vdev
)
1996 PCIDevice
*pdev
= &vdev
->pdev
;
1998 uint16_t cap_id
, next
, size
;
2002 /* Only add extended caps if we have them and the guest can see them */
2003 if (!pci_is_express(pdev
) || !pci_bus_is_express(pci_get_bus(pdev
)) ||
2004 !pci_get_long(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
)) {
2009 * pcie_add_capability always inserts the new capability at the tail
2010 * of the chain. Therefore to end up with a chain that matches the
2011 * physical device, we cache the config space to avoid overwriting
2012 * the original config space when we parse the extended capabilities.
2014 config
= g_memdup(pdev
->config
, vdev
->config_size
);
2017 * Extended capabilities are chained with each pointing to the next, so we
2018 * can drop anything other than the head of the chain simply by modifying
2019 * the previous next pointer. Seed the head of the chain here such that
2020 * we can simply skip any capabilities we want to drop below, regardless
2021 * of their position in the chain. If this stub capability still exists
2022 * after we add the capabilities we want to expose, update the capability
2023 * ID to zero. Note that we cannot seed with the capability header being
2024 * zero as this conflicts with definition of an absent capability chain
2025 * and prevents capabilities beyond the head of the list from being added.
2026 * By replacing the dummy capability ID with zero after walking the device
2027 * chain, we also transparently mark extended capabilities as absent if
2028 * no capabilities were added. Note that the PCIe spec defines an absence
2029 * of extended capabilities to be determined by a value of zero for the
2030 * capability ID, version, AND next pointer. A non-zero next pointer
2031 * should be sufficient to indicate additional capabilities are present,
2032 * which will occur if we call pcie_add_capability() below. The entire
2033 * first dword is emulated to support this.
2035 * NB. The kernel side does similar masking, so be prepared that our
2036 * view of the device may also contain a capability ID zero in the head
2037 * of the chain. Skip it for the same reason that we cannot seed the
2038 * chain with a zero capability.
2040 pci_set_long(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
,
2041 PCI_EXT_CAP(0xFFFF, 0, 0));
2042 pci_set_long(pdev
->wmask
+ PCI_CONFIG_SPACE_SIZE
, 0);
2043 pci_set_long(vdev
->emulated_config_bits
+ PCI_CONFIG_SPACE_SIZE
, ~0);
2045 for (next
= PCI_CONFIG_SPACE_SIZE
; next
;
2046 next
= PCI_EXT_CAP_NEXT(pci_get_long(config
+ next
))) {
2047 header
= pci_get_long(config
+ next
);
2048 cap_id
= PCI_EXT_CAP_ID(header
);
2049 cap_ver
= PCI_EXT_CAP_VER(header
);
2052 * If it becomes important to configure extended capabilities to their
2053 * actual size, use this as the default when it's something we don't
2054 * recognize. Since QEMU doesn't actually handle many of the config
2055 * accesses, exact size doesn't seem worthwhile.
2057 size
= vfio_ext_cap_max_size(config
, next
);
2059 /* Use emulated next pointer to allow dropping extended caps */
2060 pci_long_test_and_set_mask(vdev
->emulated_config_bits
+ next
,
2061 PCI_EXT_CAP_NEXT_MASK
);
2064 case 0: /* kernel masked capability */
2065 case PCI_EXT_CAP_ID_SRIOV
: /* Read-only VF BARs confuse OVMF */
2066 case PCI_EXT_CAP_ID_ARI
: /* XXX Needs next function virtualization */
2067 case PCI_EXT_CAP_ID_REBAR
: /* Can't expose read-only */
2068 trace_vfio_add_ext_cap_dropped(vdev
->vbasedev
.name
, cap_id
, next
);
2071 pcie_add_capability(pdev
, cap_id
, cap_ver
, next
, size
);
2076 /* Cleanup chain head ID if necessary */
2077 if (pci_get_word(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
) == 0xFFFF) {
2078 pci_set_word(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
, 0);
2085 static int vfio_add_capabilities(VFIOPCIDevice
*vdev
, Error
**errp
)
2087 PCIDevice
*pdev
= &vdev
->pdev
;
2090 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
2091 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
2092 return 0; /* Nothing to add */
2095 ret
= vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
], errp
);
2100 vfio_add_ext_cap(vdev
);
2104 static void vfio_pci_pre_reset(VFIOPCIDevice
*vdev
)
2106 PCIDevice
*pdev
= &vdev
->pdev
;
2109 vfio_disable_interrupts(vdev
);
2111 /* Make sure the device is in D0 */
2116 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2117 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2119 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2120 vfio_pci_write_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
, 2);
2121 /* vfio handles the necessary delay here */
2122 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2123 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2125 error_report("vfio: Unable to power on device, stuck in D%d",
2132 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2133 * Also put INTx Disable in known state.
2135 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
2136 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
2137 PCI_COMMAND_INTX_DISABLE
);
2138 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
2141 static void vfio_pci_post_reset(VFIOPCIDevice
*vdev
)
2146 vfio_intx_enable(vdev
, &err
);
2148 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2151 for (nr
= 0; nr
< PCI_NUM_REGIONS
- 1; ++nr
) {
2152 off_t addr
= vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
);
2154 uint32_t len
= sizeof(val
);
2156 if (pwrite(vdev
->vbasedev
.fd
, &val
, len
, addr
) != len
) {
2157 error_report("%s(%s) reset bar %d failed: %m", __func__
,
2158 vdev
->vbasedev
.name
, nr
);
2162 vfio_quirk_reset(vdev
);
2165 static bool vfio_pci_host_match(PCIHostDeviceAddress
*addr
, const char *name
)
2169 sprintf(tmp
, "%04x:%02x:%02x.%1x", addr
->domain
,
2170 addr
->bus
, addr
->slot
, addr
->function
);
2172 return (strcmp(tmp
, name
) == 0);
2175 static int vfio_pci_hot_reset(VFIOPCIDevice
*vdev
, bool single
)
2178 struct vfio_pci_hot_reset_info
*info
;
2179 struct vfio_pci_dependent_device
*devices
;
2180 struct vfio_pci_hot_reset
*reset
;
2185 trace_vfio_pci_hot_reset(vdev
->vbasedev
.name
, single
? "one" : "multi");
2188 vfio_pci_pre_reset(vdev
);
2190 vdev
->vbasedev
.needs_reset
= false;
2192 info
= g_malloc0(sizeof(*info
));
2193 info
->argsz
= sizeof(*info
);
2195 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2196 if (ret
&& errno
!= ENOSPC
) {
2198 if (!vdev
->has_pm_reset
) {
2199 error_report("vfio: Cannot reset device %s, "
2200 "no available reset mechanism.", vdev
->vbasedev
.name
);
2205 count
= info
->count
;
2206 info
= g_realloc(info
, sizeof(*info
) + (count
* sizeof(*devices
)));
2207 info
->argsz
= sizeof(*info
) + (count
* sizeof(*devices
));
2208 devices
= &info
->devices
[0];
2210 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2213 error_report("vfio: hot reset info failed: %m");
2217 trace_vfio_pci_hot_reset_has_dep_devices(vdev
->vbasedev
.name
);
2219 /* Verify that we have all the groups required */
2220 for (i
= 0; i
< info
->count
; i
++) {
2221 PCIHostDeviceAddress host
;
2223 VFIODevice
*vbasedev_iter
;
2225 host
.domain
= devices
[i
].segment
;
2226 host
.bus
= devices
[i
].bus
;
2227 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
2228 host
.function
= PCI_FUNC(devices
[i
].devfn
);
2230 trace_vfio_pci_hot_reset_dep_devices(host
.domain
,
2231 host
.bus
, host
.slot
, host
.function
, devices
[i
].group_id
);
2233 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
2237 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2238 if (group
->groupid
== devices
[i
].group_id
) {
2244 if (!vdev
->has_pm_reset
) {
2245 error_report("vfio: Cannot reset device %s, "
2246 "depends on group %d which is not owned.",
2247 vdev
->vbasedev
.name
, devices
[i
].group_id
);
2253 /* Prep dependent devices for reset and clear our marker. */
2254 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2255 if (!vbasedev_iter
->dev
->realized
||
2256 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
2259 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
2260 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
2265 vfio_pci_pre_reset(tmp
);
2266 tmp
->vbasedev
.needs_reset
= false;
2273 if (!single
&& !multi
) {
2278 /* Determine how many group fds need to be passed */
2280 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2281 for (i
= 0; i
< info
->count
; i
++) {
2282 if (group
->groupid
== devices
[i
].group_id
) {
2289 reset
= g_malloc0(sizeof(*reset
) + (count
* sizeof(*fds
)));
2290 reset
->argsz
= sizeof(*reset
) + (count
* sizeof(*fds
));
2291 fds
= &reset
->group_fds
[0];
2293 /* Fill in group fds */
2294 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2295 for (i
= 0; i
< info
->count
; i
++) {
2296 if (group
->groupid
== devices
[i
].group_id
) {
2297 fds
[reset
->count
++] = group
->fd
;
2304 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_PCI_HOT_RESET
, reset
);
2307 trace_vfio_pci_hot_reset_result(vdev
->vbasedev
.name
,
2308 ret
? "%m" : "Success");
2311 /* Re-enable INTx on affected devices */
2312 for (i
= 0; i
< info
->count
; i
++) {
2313 PCIHostDeviceAddress host
;
2315 VFIODevice
*vbasedev_iter
;
2317 host
.domain
= devices
[i
].segment
;
2318 host
.bus
= devices
[i
].bus
;
2319 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
2320 host
.function
= PCI_FUNC(devices
[i
].devfn
);
2322 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
2326 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2327 if (group
->groupid
== devices
[i
].group_id
) {
2336 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2337 if (!vbasedev_iter
->dev
->realized
||
2338 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
2341 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
2342 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
2343 vfio_pci_post_reset(tmp
);
2350 vfio_pci_post_reset(vdev
);
2358 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2359 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2360 * of doing hot resets when there is only a single device per bus. The in-use
2361 * here refers to how many VFIODevices are affected. A hot reset that affects
2362 * multiple devices, but only a single in-use device, means that we can call
2363 * it from our bus ->reset() callback since the extent is effectively a single
2364 * device. This allows us to make use of it in the hotplug path. When there
2365 * are multiple in-use devices, we can only trigger the hot reset during a
2366 * system reset and thus from our reset handler. We separate _one vs _multi
2367 * here so that we don't overlap and do a double reset on the system reset
2368 * path where both our reset handler and ->reset() callback are used. Calling
2369 * _one() will only do a hot reset for the one in-use devices case, calling
2370 * _multi() will do nothing if a _one() would have been sufficient.
2372 static int vfio_pci_hot_reset_one(VFIOPCIDevice
*vdev
)
2374 return vfio_pci_hot_reset(vdev
, true);
2377 static int vfio_pci_hot_reset_multi(VFIODevice
*vbasedev
)
2379 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2380 return vfio_pci_hot_reset(vdev
, false);
2383 static void vfio_pci_compute_needs_reset(VFIODevice
*vbasedev
)
2385 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2386 if (!vbasedev
->reset_works
|| (!vdev
->has_flr
&& vdev
->has_pm_reset
)) {
2387 vbasedev
->needs_reset
= true;
2391 static VFIODeviceOps vfio_pci_ops
= {
2392 .vfio_compute_needs_reset
= vfio_pci_compute_needs_reset
,
2393 .vfio_hot_reset_multi
= vfio_pci_hot_reset_multi
,
2394 .vfio_eoi
= vfio_intx_eoi
,
2397 int vfio_populate_vga(VFIOPCIDevice
*vdev
, Error
**errp
)
2399 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2400 struct vfio_region_info
*reg_info
;
2403 ret
= vfio_get_region_info(vbasedev
, VFIO_PCI_VGA_REGION_INDEX
, ®_info
);
2405 error_setg_errno(errp
, -ret
,
2406 "failed getting region info for VGA region index %d",
2407 VFIO_PCI_VGA_REGION_INDEX
);
2411 if (!(reg_info
->flags
& VFIO_REGION_INFO_FLAG_READ
) ||
2412 !(reg_info
->flags
& VFIO_REGION_INFO_FLAG_WRITE
) ||
2413 reg_info
->size
< 0xbffff + 1) {
2414 error_setg(errp
, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2415 (unsigned long)reg_info
->flags
,
2416 (unsigned long)reg_info
->size
);
2421 vdev
->vga
= g_new0(VFIOVGA
, 1);
2423 vdev
->vga
->fd_offset
= reg_info
->offset
;
2424 vdev
->vga
->fd
= vdev
->vbasedev
.fd
;
2428 vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].offset
= QEMU_PCI_VGA_MEM_BASE
;
2429 vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].nr
= QEMU_PCI_VGA_MEM
;
2430 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].quirks
);
2432 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].mem
,
2433 OBJECT(vdev
), &vfio_vga_ops
,
2434 &vdev
->vga
->region
[QEMU_PCI_VGA_MEM
],
2435 "vfio-vga-mmio@0xa0000",
2436 QEMU_PCI_VGA_MEM_SIZE
);
2438 vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].offset
= QEMU_PCI_VGA_IO_LO_BASE
;
2439 vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].nr
= QEMU_PCI_VGA_IO_LO
;
2440 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].quirks
);
2442 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].mem
,
2443 OBJECT(vdev
), &vfio_vga_ops
,
2444 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
],
2445 "vfio-vga-io@0x3b0",
2446 QEMU_PCI_VGA_IO_LO_SIZE
);
2448 vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].offset
= QEMU_PCI_VGA_IO_HI_BASE
;
2449 vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].nr
= QEMU_PCI_VGA_IO_HI
;
2450 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].quirks
);
2452 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].mem
,
2453 OBJECT(vdev
), &vfio_vga_ops
,
2454 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
],
2455 "vfio-vga-io@0x3c0",
2456 QEMU_PCI_VGA_IO_HI_SIZE
);
2458 pci_register_vga(&vdev
->pdev
, &vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].mem
,
2459 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].mem
,
2460 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].mem
);
2465 static void vfio_populate_device(VFIOPCIDevice
*vdev
, Error
**errp
)
2467 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2468 struct vfio_region_info
*reg_info
;
2469 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
) };
2472 /* Sanity check device */
2473 if (!(vbasedev
->flags
& VFIO_DEVICE_FLAGS_PCI
)) {
2474 error_setg(errp
, "this isn't a PCI device");
2478 if (vbasedev
->num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
2479 error_setg(errp
, "unexpected number of io regions %u",
2480 vbasedev
->num_regions
);
2484 if (vbasedev
->num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
2485 error_setg(errp
, "unexpected number of irqs %u", vbasedev
->num_irqs
);
2489 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
2490 char *name
= g_strdup_printf("%s BAR %d", vbasedev
->name
, i
);
2492 ret
= vfio_region_setup(OBJECT(vdev
), vbasedev
,
2493 &vdev
->bars
[i
].region
, i
, name
);
2497 error_setg_errno(errp
, -ret
, "failed to get region %d info", i
);
2501 QLIST_INIT(&vdev
->bars
[i
].quirks
);
2504 ret
= vfio_get_region_info(vbasedev
,
2505 VFIO_PCI_CONFIG_REGION_INDEX
, ®_info
);
2507 error_setg_errno(errp
, -ret
, "failed to get config info");
2511 trace_vfio_populate_device_config(vdev
->vbasedev
.name
,
2512 (unsigned long)reg_info
->size
,
2513 (unsigned long)reg_info
->offset
,
2514 (unsigned long)reg_info
->flags
);
2516 vdev
->config_size
= reg_info
->size
;
2517 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
2518 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
2520 vdev
->config_offset
= reg_info
->offset
;
2524 if (vdev
->features
& VFIO_FEATURE_ENABLE_VGA
) {
2525 ret
= vfio_populate_vga(vdev
, errp
);
2527 error_append_hint(errp
, "device does not support "
2528 "requested feature x-vga\n");
2533 irq_info
.index
= VFIO_PCI_ERR_IRQ_INDEX
;
2535 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
);
2537 /* This can fail for an old kernel or legacy PCI dev */
2538 trace_vfio_populate_device_get_irq_info_failure(strerror(errno
));
2539 } else if (irq_info
.count
== 1) {
2540 vdev
->pci_aer
= true;
2542 warn_report(VFIO_MSG_PREFIX
2543 "Could not enable error recovery for the device",
2548 static void vfio_put_device(VFIOPCIDevice
*vdev
)
2550 g_free(vdev
->vbasedev
.name
);
2553 vfio_put_base_device(&vdev
->vbasedev
);
2556 static void vfio_err_notifier_handler(void *opaque
)
2558 VFIOPCIDevice
*vdev
= opaque
;
2560 if (!event_notifier_test_and_clear(&vdev
->err_notifier
)) {
2565 * TBD. Retrieve the error details and decide what action
2566 * needs to be taken. One of the actions could be to pass
2567 * the error to the guest and have the guest driver recover
2568 * from the error. This requires that PCIe capabilities be
2569 * exposed to the guest. For now, we just terminate the
2570 * guest to contain the error.
2573 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__
, vdev
->vbasedev
.name
);
2575 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2579 * Registers error notifier for devices supporting error recovery.
2580 * If we encounter a failure in this function, we report an error
2581 * and continue after disabling error recovery support for the
2584 static void vfio_register_err_notifier(VFIOPCIDevice
*vdev
)
2589 if (!vdev
->pci_aer
) {
2593 if (event_notifier_init(&vdev
->err_notifier
, 0)) {
2594 error_report("vfio: Unable to init event notifier for error detection");
2595 vdev
->pci_aer
= false;
2599 fd
= event_notifier_get_fd(&vdev
->err_notifier
);
2600 qemu_set_fd_handler(fd
, vfio_err_notifier_handler
, NULL
, vdev
);
2602 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_ERR_IRQ_INDEX
, 0,
2603 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
2604 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2605 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
2606 event_notifier_cleanup(&vdev
->err_notifier
);
2607 vdev
->pci_aer
= false;
2611 static void vfio_unregister_err_notifier(VFIOPCIDevice
*vdev
)
2615 if (!vdev
->pci_aer
) {
2619 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_ERR_IRQ_INDEX
, 0,
2620 VFIO_IRQ_SET_ACTION_TRIGGER
, -1, &err
)) {
2621 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2623 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->err_notifier
),
2625 event_notifier_cleanup(&vdev
->err_notifier
);
2628 static void vfio_req_notifier_handler(void *opaque
)
2630 VFIOPCIDevice
*vdev
= opaque
;
2633 if (!event_notifier_test_and_clear(&vdev
->req_notifier
)) {
2637 qdev_unplug(DEVICE(vdev
), &err
);
2639 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2643 static void vfio_register_req_notifier(VFIOPCIDevice
*vdev
)
2645 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
),
2646 .index
= VFIO_PCI_REQ_IRQ_INDEX
};
2650 if (!(vdev
->features
& VFIO_FEATURE_ENABLE_REQ
)) {
2654 if (ioctl(vdev
->vbasedev
.fd
,
2655 VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
) < 0 || irq_info
.count
< 1) {
2659 if (event_notifier_init(&vdev
->req_notifier
, 0)) {
2660 error_report("vfio: Unable to init event notifier for device request");
2664 fd
= event_notifier_get_fd(&vdev
->req_notifier
);
2665 qemu_set_fd_handler(fd
, vfio_req_notifier_handler
, NULL
, vdev
);
2667 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_REQ_IRQ_INDEX
, 0,
2668 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
2669 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2670 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
2671 event_notifier_cleanup(&vdev
->req_notifier
);
2673 vdev
->req_enabled
= true;
2677 static void vfio_unregister_req_notifier(VFIOPCIDevice
*vdev
)
2681 if (!vdev
->req_enabled
) {
2685 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_REQ_IRQ_INDEX
, 0,
2686 VFIO_IRQ_SET_ACTION_TRIGGER
, -1, &err
)) {
2687 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2689 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->req_notifier
),
2691 event_notifier_cleanup(&vdev
->req_notifier
);
2693 vdev
->req_enabled
= false;
2696 static void vfio_realize(PCIDevice
*pdev
, Error
**errp
)
2698 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
2699 VFIODevice
*vbasedev_iter
;
2701 char *tmp
, *subsys
, group_path
[PATH_MAX
], *group_name
;
2709 if (!vdev
->vbasedev
.sysfsdev
) {
2710 if (!(~vdev
->host
.domain
|| ~vdev
->host
.bus
||
2711 ~vdev
->host
.slot
|| ~vdev
->host
.function
)) {
2712 error_setg(errp
, "No provided host device");
2713 error_append_hint(errp
, "Use -device vfio-pci,host=DDDD:BB:DD.F "
2714 "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
2717 vdev
->vbasedev
.sysfsdev
=
2718 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2719 vdev
->host
.domain
, vdev
->host
.bus
,
2720 vdev
->host
.slot
, vdev
->host
.function
);
2723 if (stat(vdev
->vbasedev
.sysfsdev
, &st
) < 0) {
2724 error_setg_errno(errp
, errno
, "no such host device");
2725 error_prepend(errp
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.sysfsdev
);
2729 vdev
->vbasedev
.name
= g_path_get_basename(vdev
->vbasedev
.sysfsdev
);
2730 vdev
->vbasedev
.ops
= &vfio_pci_ops
;
2731 vdev
->vbasedev
.type
= VFIO_DEVICE_TYPE_PCI
;
2732 vdev
->vbasedev
.dev
= DEVICE(vdev
);
2734 tmp
= g_strdup_printf("%s/iommu_group", vdev
->vbasedev
.sysfsdev
);
2735 len
= readlink(tmp
, group_path
, sizeof(group_path
));
2738 if (len
<= 0 || len
>= sizeof(group_path
)) {
2739 error_setg_errno(errp
, len
< 0 ? errno
: ENAMETOOLONG
,
2740 "no iommu_group found");
2744 group_path
[len
] = 0;
2746 group_name
= basename(group_path
);
2747 if (sscanf(group_name
, "%d", &groupid
) != 1) {
2748 error_setg_errno(errp
, errno
, "failed to read %s", group_path
);
2752 trace_vfio_realize(vdev
->vbasedev
.name
, groupid
);
2754 group
= vfio_get_group(groupid
, pci_device_iommu_address_space(pdev
), errp
);
2759 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2760 if (strcmp(vbasedev_iter
->name
, vdev
->vbasedev
.name
) == 0) {
2761 error_setg(errp
, "device is already attached");
2762 vfio_put_group(group
);
2768 * Mediated devices *might* operate compatibly with memory ballooning, but
2769 * we cannot know for certain, it depends on whether the mdev vendor driver
2770 * stays in sync with the active working set of the guest driver. Prevent
2771 * the x-balloon-allowed option unless this is minimally an mdev device.
2773 tmp
= g_strdup_printf("%s/subsystem", vdev
->vbasedev
.sysfsdev
);
2774 subsys
= realpath(tmp
, NULL
);
2776 is_mdev
= subsys
&& (strcmp(subsys
, "/sys/bus/mdev") == 0);
2779 trace_vfio_mdev(vdev
->vbasedev
.name
, is_mdev
);
2781 if (vdev
->vbasedev
.balloon_allowed
&& !is_mdev
) {
2782 error_setg(errp
, "x-balloon-allowed only potentially compatible "
2783 "with mdev devices");
2784 vfio_put_group(group
);
2788 ret
= vfio_get_device(group
, vdev
->vbasedev
.name
, &vdev
->vbasedev
, errp
);
2790 vfio_put_group(group
);
2794 vfio_populate_device(vdev
, &err
);
2796 error_propagate(errp
, err
);
2800 /* Get a copy of config space */
2801 ret
= pread(vdev
->vbasedev
.fd
, vdev
->pdev
.config
,
2802 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
2803 vdev
->config_offset
);
2804 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
2805 ret
= ret
< 0 ? -errno
: -EFAULT
;
2806 error_setg_errno(errp
, -ret
, "failed to read device config space");
2810 /* vfio emulates a lot for us, but some bits need extra love */
2811 vdev
->emulated_config_bits
= g_malloc0(vdev
->config_size
);
2813 /* QEMU can choose to expose the ROM or not */
2814 memset(vdev
->emulated_config_bits
+ PCI_ROM_ADDRESS
, 0xff, 4);
2815 /* QEMU can also add or extend BARs */
2816 memset(vdev
->emulated_config_bits
+ PCI_BASE_ADDRESS_0
, 0xff, 6 * 4);
2819 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
2820 * device ID is managed by the vendor and need only be a 16-bit value.
2821 * Allow any 16-bit value for subsystem so they can be hidden or changed.
2823 if (vdev
->vendor_id
!= PCI_ANY_ID
) {
2824 if (vdev
->vendor_id
>= 0xffff) {
2825 error_setg(errp
, "invalid PCI vendor ID provided");
2828 vfio_add_emulated_word(vdev
, PCI_VENDOR_ID
, vdev
->vendor_id
, ~0);
2829 trace_vfio_pci_emulated_vendor_id(vdev
->vbasedev
.name
, vdev
->vendor_id
);
2831 vdev
->vendor_id
= pci_get_word(pdev
->config
+ PCI_VENDOR_ID
);
2834 if (vdev
->device_id
!= PCI_ANY_ID
) {
2835 if (vdev
->device_id
> 0xffff) {
2836 error_setg(errp
, "invalid PCI device ID provided");
2839 vfio_add_emulated_word(vdev
, PCI_DEVICE_ID
, vdev
->device_id
, ~0);
2840 trace_vfio_pci_emulated_device_id(vdev
->vbasedev
.name
, vdev
->device_id
);
2842 vdev
->device_id
= pci_get_word(pdev
->config
+ PCI_DEVICE_ID
);
2845 if (vdev
->sub_vendor_id
!= PCI_ANY_ID
) {
2846 if (vdev
->sub_vendor_id
> 0xffff) {
2847 error_setg(errp
, "invalid PCI subsystem vendor ID provided");
2850 vfio_add_emulated_word(vdev
, PCI_SUBSYSTEM_VENDOR_ID
,
2851 vdev
->sub_vendor_id
, ~0);
2852 trace_vfio_pci_emulated_sub_vendor_id(vdev
->vbasedev
.name
,
2853 vdev
->sub_vendor_id
);
2856 if (vdev
->sub_device_id
!= PCI_ANY_ID
) {
2857 if (vdev
->sub_device_id
> 0xffff) {
2858 error_setg(errp
, "invalid PCI subsystem device ID provided");
2861 vfio_add_emulated_word(vdev
, PCI_SUBSYSTEM_ID
, vdev
->sub_device_id
, ~0);
2862 trace_vfio_pci_emulated_sub_device_id(vdev
->vbasedev
.name
,
2863 vdev
->sub_device_id
);
2866 /* QEMU can change multi-function devices to single function, or reverse */
2867 vdev
->emulated_config_bits
[PCI_HEADER_TYPE
] =
2868 PCI_HEADER_TYPE_MULTI_FUNCTION
;
2870 /* Restore or clear multifunction, this is always controlled by QEMU */
2871 if (vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
2872 vdev
->pdev
.config
[PCI_HEADER_TYPE
] |= PCI_HEADER_TYPE_MULTI_FUNCTION
;
2874 vdev
->pdev
.config
[PCI_HEADER_TYPE
] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION
;
2878 * Clear host resource mapping info. If we choose not to register a
2879 * BAR, such as might be the case with the option ROM, we can get
2880 * confusing, unwritable, residual addresses from the host here.
2882 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
2883 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
2885 vfio_pci_size_rom(vdev
);
2887 vfio_bars_prepare(vdev
);
2889 vfio_msix_early_setup(vdev
, &err
);
2891 error_propagate(errp
, err
);
2895 vfio_bars_register(vdev
);
2897 ret
= vfio_add_capabilities(vdev
, errp
);
2903 vfio_vga_quirk_setup(vdev
);
2906 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2907 vfio_bar_quirk_setup(vdev
, i
);
2910 if (!vdev
->igd_opregion
&&
2911 vdev
->features
& VFIO_FEATURE_ENABLE_IGD_OPREGION
) {
2912 struct vfio_region_info
*opregion
;
2914 if (vdev
->pdev
.qdev
.hotplugged
) {
2916 "cannot support IGD OpRegion feature on hotplugged "
2921 ret
= vfio_get_dev_region_info(&vdev
->vbasedev
,
2922 VFIO_REGION_TYPE_PCI_VENDOR_TYPE
| PCI_VENDOR_ID_INTEL
,
2923 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION
, &opregion
);
2925 error_setg_errno(errp
, -ret
,
2926 "does not support requested IGD OpRegion feature");
2930 ret
= vfio_pci_igd_opregion_init(vdev
, opregion
, errp
);
2937 /* QEMU emulates all of MSI & MSIX */
2938 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
) {
2939 memset(vdev
->emulated_config_bits
+ pdev
->msix_cap
, 0xff,
2943 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
) {
2944 memset(vdev
->emulated_config_bits
+ pdev
->msi_cap
, 0xff,
2945 vdev
->msi_cap_size
);
2948 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
2949 vdev
->intx
.mmap_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
2950 vfio_intx_mmap_enable
, vdev
);
2951 pci_device_set_intx_routing_notifier(&vdev
->pdev
, vfio_intx_update
);
2952 ret
= vfio_intx_enable(vdev
, errp
);
2958 if (vdev
->display
!= ON_OFF_AUTO_OFF
) {
2959 ret
= vfio_display_probe(vdev
, errp
);
2964 if (vdev
->enable_ramfb
&& vdev
->dpy
== NULL
) {
2965 error_setg(errp
, "ramfb=on requires display=on");
2968 if (vdev
->display_xres
|| vdev
->display_yres
) {
2969 if (vdev
->dpy
== NULL
) {
2970 error_setg(errp
, "xres and yres properties require display=on");
2973 if (vdev
->dpy
->edid_regs
== NULL
) {
2974 error_setg(errp
, "xres and yres properties need edid support");
2979 if (vdev
->vendor_id
== PCI_VENDOR_ID_NVIDIA
) {
2980 ret
= vfio_pci_nvidia_v100_ram_init(vdev
, errp
);
2981 if (ret
&& ret
!= -ENODEV
) {
2982 error_report("Failed to setup NVIDIA V100 GPU RAM");
2986 if (vdev
->vendor_id
== PCI_VENDOR_ID_IBM
) {
2987 ret
= vfio_pci_nvlink2_init(vdev
, errp
);
2988 if (ret
&& ret
!= -ENODEV
) {
2989 error_report("Failed to setup NVlink2 bridge");
2993 vfio_register_err_notifier(vdev
);
2994 vfio_register_req_notifier(vdev
);
2995 vfio_setup_resetfn_quirk(vdev
);
3000 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3001 vfio_teardown_msi(vdev
);
3002 vfio_bars_exit(vdev
);
3004 error_prepend(errp
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
3007 static void vfio_instance_finalize(Object
*obj
)
3009 VFIOPCIDevice
*vdev
= PCI_VFIO(obj
);
3010 VFIOGroup
*group
= vdev
->vbasedev
.group
;
3012 vfio_display_finalize(vdev
);
3013 vfio_bars_finalize(vdev
);
3014 g_free(vdev
->emulated_config_bits
);
3017 * XXX Leaking igd_opregion is not an oversight, we can't remove the
3018 * fw_cfg entry therefore leaking this allocation seems like the safest
3021 * g_free(vdev->igd_opregion);
3023 vfio_put_device(vdev
);
3024 vfio_put_group(group
);
3027 static void vfio_exitfn(PCIDevice
*pdev
)
3029 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
3031 vfio_unregister_req_notifier(vdev
);
3032 vfio_unregister_err_notifier(vdev
);
3033 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3034 vfio_disable_interrupts(vdev
);
3035 if (vdev
->intx
.mmap_timer
) {
3036 timer_free(vdev
->intx
.mmap_timer
);
3038 vfio_teardown_msi(vdev
);
3039 vfio_bars_exit(vdev
);
3042 static void vfio_pci_reset(DeviceState
*dev
)
3044 VFIOPCIDevice
*vdev
= PCI_VFIO(dev
);
3046 trace_vfio_pci_reset(vdev
->vbasedev
.name
);
3048 vfio_pci_pre_reset(vdev
);
3050 if (vdev
->display
!= ON_OFF_AUTO_OFF
) {
3051 vfio_display_reset(vdev
);
3054 if (vdev
->resetfn
&& !vdev
->resetfn(vdev
)) {
3058 if (vdev
->vbasedev
.reset_works
&&
3059 (vdev
->has_flr
|| !vdev
->has_pm_reset
) &&
3060 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3061 trace_vfio_pci_reset_flr(vdev
->vbasedev
.name
);
3065 /* See if we can do our own bus reset */
3066 if (!vfio_pci_hot_reset_one(vdev
)) {
3070 /* If nothing else works and the device supports PM reset, use it */
3071 if (vdev
->vbasedev
.reset_works
&& vdev
->has_pm_reset
&&
3072 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3073 trace_vfio_pci_reset_pm(vdev
->vbasedev
.name
);
3078 vfio_pci_post_reset(vdev
);
3081 static void vfio_instance_init(Object
*obj
)
3083 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
3084 VFIOPCIDevice
*vdev
= PCI_VFIO(obj
);
3086 device_add_bootindex_property(obj
, &vdev
->bootindex
,
3088 &pci_dev
->qdev
, NULL
);
3089 vdev
->host
.domain
= ~0U;
3090 vdev
->host
.bus
= ~0U;
3091 vdev
->host
.slot
= ~0U;
3092 vdev
->host
.function
= ~0U;
3094 vdev
->nv_gpudirect_clique
= 0xFF;
3096 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
3097 * line, therefore, no need to wait to realize like other devices */
3098 pci_dev
->cap_present
|= QEMU_PCI_CAP_EXPRESS
;
3101 static Property vfio_pci_dev_properties
[] = {
3102 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice
, host
),
3103 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice
, vbasedev
.sysfsdev
),
3104 DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice
,
3105 display
, ON_OFF_AUTO_OFF
),
3106 DEFINE_PROP_UINT32("xres", VFIOPCIDevice
, display_xres
, 0),
3107 DEFINE_PROP_UINT32("yres", VFIOPCIDevice
, display_yres
, 0),
3108 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice
,
3109 intx
.mmap_timeout
, 1100),
3110 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice
, features
,
3111 VFIO_FEATURE_ENABLE_VGA_BIT
, false),
3112 DEFINE_PROP_BIT("x-req", VFIOPCIDevice
, features
,
3113 VFIO_FEATURE_ENABLE_REQ_BIT
, true),
3114 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice
, features
,
3115 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT
, false),
3116 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice
, vbasedev
.no_mmap
, false),
3117 DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice
,
3118 vbasedev
.balloon_allowed
, false),
3119 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice
, no_kvm_intx
, false),
3120 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice
, no_kvm_msi
, false),
3121 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice
, no_kvm_msix
, false),
3122 DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice
,
3123 no_geforce_quirks
, false),
3124 DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice
, no_kvm_ioeventfd
,
3126 DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice
, no_vfio_ioeventfd
,
3128 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice
, vendor_id
, PCI_ANY_ID
),
3129 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice
, device_id
, PCI_ANY_ID
),
3130 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice
,
3131 sub_vendor_id
, PCI_ANY_ID
),
3132 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice
,
3133 sub_device_id
, PCI_ANY_ID
),
3134 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice
, igd_gms
, 0),
3135 DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice
,
3136 nv_gpudirect_clique
,
3137 qdev_prop_nv_gpudirect_clique
, uint8_t),
3138 DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice
, msix_relo
,
3139 OFF_AUTOPCIBAR_OFF
),
3141 * TODO - support passed fds... is this necessary?
3142 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
3143 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
3145 DEFINE_PROP_END_OF_LIST(),
3148 static const VMStateDescription vfio_pci_vmstate
= {
3153 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
3155 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3156 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
3158 dc
->reset
= vfio_pci_reset
;
3159 dc
->props
= vfio_pci_dev_properties
;
3160 dc
->vmsd
= &vfio_pci_vmstate
;
3161 dc
->desc
= "VFIO-based PCI device assignment";
3162 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
3163 pdc
->realize
= vfio_realize
;
3164 pdc
->exit
= vfio_exitfn
;
3165 pdc
->config_read
= vfio_pci_read_config
;
3166 pdc
->config_write
= vfio_pci_write_config
;
3169 static const TypeInfo vfio_pci_dev_info
= {
3170 .name
= TYPE_VFIO_PCI
,
3171 .parent
= TYPE_PCI_DEVICE
,
3172 .instance_size
= sizeof(VFIOPCIDevice
),
3173 .class_init
= vfio_pci_dev_class_init
,
3174 .instance_init
= vfio_instance_init
,
3175 .instance_finalize
= vfio_instance_finalize
,
3176 .interfaces
= (InterfaceInfo
[]) {
3177 { INTERFACE_PCIE_DEVICE
},
3178 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
3183 static Property vfio_pci_dev_nohotplug_properties
[] = {
3184 DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice
, enable_ramfb
, false),
3185 DEFINE_PROP_END_OF_LIST(),
3188 static void vfio_pci_nohotplug_dev_class_init(ObjectClass
*klass
, void *data
)
3190 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3192 dc
->props
= vfio_pci_dev_nohotplug_properties
;
3193 dc
->hotpluggable
= false;
3196 static const TypeInfo vfio_pci_nohotplug_dev_info
= {
3197 .name
= TYPE_VIFO_PCI_NOHOTPLUG
,
3198 .parent
= TYPE_VFIO_PCI
,
3199 .instance_size
= sizeof(VFIOPCIDevice
),
3200 .class_init
= vfio_pci_nohotplug_dev_class_init
,
3203 static void register_vfio_pci_dev_type(void)
3205 type_register_static(&vfio_pci_dev_info
);
3206 type_register_static(&vfio_pci_nohotplug_dev_info
);
3209 type_init(register_vfio_pci_dev_type
)