2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
25 #include "hw/pci/msi.h"
26 #include "hw/pci/msix.h"
27 #include "hw/pci/pci_bridge.h"
28 #include "qemu/error-report.h"
29 #include "qemu/module.h"
30 #include "qemu/option.h"
31 #include "qemu/range.h"
32 #include "qemu/units.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
37 #include "qapi/error.h"
39 #define TYPE_VFIO_PCI "vfio-pci"
40 #define PCI_VFIO(obj) OBJECT_CHECK(VFIOPCIDevice, obj, TYPE_VFIO_PCI)
42 #define TYPE_VIFO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
44 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
);
45 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
);
48 * Disabling BAR mmaping can be slow, but toggling it around INTx can
49 * also be a huge overhead. We try to get the best of both worlds by
50 * waiting until an interrupt to disable mmaps (subsequent transitions
51 * to the same state are effectively no overhead). If the interrupt has
52 * been serviced and the time gap is long enough, we re-enable mmaps for
53 * performance. This works well for things like graphics cards, which
54 * may not use their interrupt at all and are penalized to an unusable
55 * level by read/write BAR traps. Other devices, like NICs, have more
56 * regular interrupts and see much better latency by staying in non-mmap
57 * mode. We therefore set the default mmap_timeout such that a ping
58 * is just enough to keep the mmap disabled. Users can experiment with
59 * other options with the x-intx-mmap-timeout-ms parameter (a value of
60 * zero disables the timer).
62 static void vfio_intx_mmap_enable(void *opaque
)
64 VFIOPCIDevice
*vdev
= opaque
;
66 if (vdev
->intx
.pending
) {
67 timer_mod(vdev
->intx
.mmap_timer
,
68 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
72 vfio_mmap_set_enabled(vdev
, true);
75 static void vfio_intx_interrupt(void *opaque
)
77 VFIOPCIDevice
*vdev
= opaque
;
79 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
83 trace_vfio_intx_interrupt(vdev
->vbasedev
.name
, 'A' + vdev
->intx
.pin
);
85 vdev
->intx
.pending
= true;
86 pci_irq_assert(&vdev
->pdev
);
87 vfio_mmap_set_enabled(vdev
, false);
88 if (vdev
->intx
.mmap_timeout
) {
89 timer_mod(vdev
->intx
.mmap_timer
,
90 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
94 static void vfio_intx_eoi(VFIODevice
*vbasedev
)
96 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
98 if (!vdev
->intx
.pending
) {
102 trace_vfio_intx_eoi(vbasedev
->name
);
104 vdev
->intx
.pending
= false;
105 pci_irq_deassert(&vdev
->pdev
);
106 vfio_unmask_single_irqindex(vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
109 static void vfio_intx_enable_kvm(VFIOPCIDevice
*vdev
, Error
**errp
)
112 struct kvm_irqfd irqfd
= {
113 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
114 .gsi
= vdev
->intx
.route
.irq
,
115 .flags
= KVM_IRQFD_FLAG_RESAMPLE
,
119 if (vdev
->no_kvm_intx
|| !kvm_irqfds_enabled() ||
120 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
121 !kvm_resamplefds_enabled()) {
125 /* Get to a known interrupt state */
126 qemu_set_fd_handler(irqfd
.fd
, NULL
, NULL
, vdev
);
127 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
128 vdev
->intx
.pending
= false;
129 pci_irq_deassert(&vdev
->pdev
);
131 /* Get an eventfd for resample/unmask */
132 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
133 error_setg(errp
, "event_notifier_init failed eoi");
137 /* KVM triggers it, VFIO listens for it */
138 irqfd
.resamplefd
= event_notifier_get_fd(&vdev
->intx
.unmask
);
140 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
141 error_setg_errno(errp
, errno
, "failed to setup resample irqfd");
145 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
, 0,
146 VFIO_IRQ_SET_ACTION_UNMASK
,
147 irqfd
.resamplefd
, &err
)) {
148 error_propagate(errp
, err
);
153 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
155 vdev
->intx
.kvm_accel
= true;
157 trace_vfio_intx_enable_kvm(vdev
->vbasedev
.name
);
162 irqfd
.flags
= KVM_IRQFD_FLAG_DEASSIGN
;
163 kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
);
165 event_notifier_cleanup(&vdev
->intx
.unmask
);
167 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
168 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
172 static void vfio_intx_disable_kvm(VFIOPCIDevice
*vdev
)
175 struct kvm_irqfd irqfd
= {
176 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
177 .gsi
= vdev
->intx
.route
.irq
,
178 .flags
= KVM_IRQFD_FLAG_DEASSIGN
,
181 if (!vdev
->intx
.kvm_accel
) {
186 * Get to a known state, hardware masked, QEMU ready to accept new
187 * interrupts, QEMU IRQ de-asserted.
189 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
190 vdev
->intx
.pending
= false;
191 pci_irq_deassert(&vdev
->pdev
);
193 /* Tell KVM to stop listening for an INTx irqfd */
194 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
195 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
198 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
199 event_notifier_cleanup(&vdev
->intx
.unmask
);
201 /* QEMU starts listening for interrupt events. */
202 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
204 vdev
->intx
.kvm_accel
= false;
206 /* If we've missed an event, let it re-fire through QEMU */
207 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
209 trace_vfio_intx_disable_kvm(vdev
->vbasedev
.name
);
213 static void vfio_intx_update(PCIDevice
*pdev
)
215 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
219 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
223 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
225 if (!pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
226 return; /* Nothing changed */
229 trace_vfio_intx_update(vdev
->vbasedev
.name
,
230 vdev
->intx
.route
.irq
, route
.irq
);
232 vfio_intx_disable_kvm(vdev
);
234 vdev
->intx
.route
= route
;
236 if (route
.mode
!= PCI_INTX_ENABLED
) {
240 vfio_intx_enable_kvm(vdev
, &err
);
242 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
245 /* Re-enable the interrupt in cased we missed an EOI */
246 vfio_intx_eoi(&vdev
->vbasedev
);
249 static int vfio_intx_enable(VFIOPCIDevice
*vdev
, Error
**errp
)
251 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
261 vfio_disable_interrupts(vdev
);
263 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
264 pci_config_set_interrupt_pin(vdev
->pdev
.config
, pin
);
268 * Only conditional to avoid generating error messages on platforms
269 * where we won't actually use the result anyway.
271 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
272 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
277 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
279 error_setg_errno(errp
, -ret
, "event_notifier_init failed");
282 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
283 qemu_set_fd_handler(fd
, vfio_intx_interrupt
, NULL
, vdev
);
285 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
, 0,
286 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
287 error_propagate(errp
, err
);
288 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
289 event_notifier_cleanup(&vdev
->intx
.interrupt
);
293 vfio_intx_enable_kvm(vdev
, &err
);
295 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
298 vdev
->interrupt
= VFIO_INT_INTx
;
300 trace_vfio_intx_enable(vdev
->vbasedev
.name
);
304 static void vfio_intx_disable(VFIOPCIDevice
*vdev
)
308 timer_del(vdev
->intx
.mmap_timer
);
309 vfio_intx_disable_kvm(vdev
);
310 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
311 vdev
->intx
.pending
= false;
312 pci_irq_deassert(&vdev
->pdev
);
313 vfio_mmap_set_enabled(vdev
, true);
315 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
316 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
317 event_notifier_cleanup(&vdev
->intx
.interrupt
);
319 vdev
->interrupt
= VFIO_INT_NONE
;
321 trace_vfio_intx_disable(vdev
->vbasedev
.name
);
327 static void vfio_msi_interrupt(void *opaque
)
329 VFIOMSIVector
*vector
= opaque
;
330 VFIOPCIDevice
*vdev
= vector
->vdev
;
331 MSIMessage (*get_msg
)(PCIDevice
*dev
, unsigned vector
);
332 void (*notify
)(PCIDevice
*dev
, unsigned vector
);
334 int nr
= vector
- vdev
->msi_vectors
;
336 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
340 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
341 get_msg
= msix_get_message
;
342 notify
= msix_notify
;
344 /* A masked vector firing needs to use the PBA, enable it */
345 if (msix_is_masked(&vdev
->pdev
, nr
)) {
346 set_bit(nr
, vdev
->msix
->pending
);
347 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, true);
348 trace_vfio_msix_pba_enable(vdev
->vbasedev
.name
);
350 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
351 get_msg
= msi_get_message
;
357 msg
= get_msg(&vdev
->pdev
, nr
);
358 trace_vfio_msi_interrupt(vdev
->vbasedev
.name
, nr
, msg
.address
, msg
.data
);
359 notify(&vdev
->pdev
, nr
);
362 static int vfio_enable_vectors(VFIOPCIDevice
*vdev
, bool msix
)
364 struct vfio_irq_set
*irq_set
;
365 int ret
= 0, i
, argsz
;
368 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
370 irq_set
= g_malloc0(argsz
);
371 irq_set
->argsz
= argsz
;
372 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
373 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
375 irq_set
->count
= vdev
->nr_vectors
;
376 fds
= (int32_t *)&irq_set
->data
;
378 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
382 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
383 * bits, therefore we always use the KVM signaling path when setup.
384 * MSI-X mask and pending bits are emulated, so we want to use the
385 * KVM signaling path only when configured and unmasked.
387 if (vdev
->msi_vectors
[i
].use
) {
388 if (vdev
->msi_vectors
[i
].virq
< 0 ||
389 (msix
&& msix_is_masked(&vdev
->pdev
, i
))) {
390 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
392 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].kvm_interrupt
);
399 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
406 static void vfio_add_kvm_msi_virq(VFIOPCIDevice
*vdev
, VFIOMSIVector
*vector
,
407 int vector_n
, bool msix
)
411 if ((msix
&& vdev
->no_kvm_msix
) || (!msix
&& vdev
->no_kvm_msi
)) {
415 if (event_notifier_init(&vector
->kvm_interrupt
, 0)) {
419 virq
= kvm_irqchip_add_msi_route(kvm_state
, vector_n
, &vdev
->pdev
);
421 event_notifier_cleanup(&vector
->kvm_interrupt
);
425 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, &vector
->kvm_interrupt
,
427 kvm_irqchip_release_virq(kvm_state
, virq
);
428 event_notifier_cleanup(&vector
->kvm_interrupt
);
435 static void vfio_remove_kvm_msi_virq(VFIOMSIVector
*vector
)
437 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vector
->kvm_interrupt
,
439 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
441 event_notifier_cleanup(&vector
->kvm_interrupt
);
444 static void vfio_update_kvm_msi_virq(VFIOMSIVector
*vector
, MSIMessage msg
,
447 kvm_irqchip_update_msi_route(kvm_state
, vector
->virq
, msg
, pdev
);
448 kvm_irqchip_commit_routes(kvm_state
);
451 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
452 MSIMessage
*msg
, IOHandler
*handler
)
454 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
455 VFIOMSIVector
*vector
;
458 trace_vfio_msix_vector_do_use(vdev
->vbasedev
.name
, nr
);
460 vector
= &vdev
->msi_vectors
[nr
];
465 if (event_notifier_init(&vector
->interrupt
, 0)) {
466 error_report("vfio: Error: event_notifier_init failed");
469 msix_vector_use(pdev
, nr
);
472 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
473 handler
, NULL
, vector
);
476 * Attempt to enable route through KVM irqchip,
477 * default to userspace handling if unavailable.
479 if (vector
->virq
>= 0) {
481 vfio_remove_kvm_msi_virq(vector
);
483 vfio_update_kvm_msi_virq(vector
, *msg
, pdev
);
487 vfio_add_kvm_msi_virq(vdev
, vector
, nr
, true);
492 * We don't want to have the host allocate all possible MSI vectors
493 * for a device if they're not in use, so we shutdown and incrementally
494 * increase them as needed.
496 if (vdev
->nr_vectors
< nr
+ 1) {
497 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
498 vdev
->nr_vectors
= nr
+ 1;
499 ret
= vfio_enable_vectors(vdev
, true);
501 error_report("vfio: failed to enable vectors, %d", ret
);
507 if (vector
->virq
>= 0) {
508 fd
= event_notifier_get_fd(&vector
->kvm_interrupt
);
510 fd
= event_notifier_get_fd(&vector
->interrupt
);
513 if (vfio_set_irq_signaling(&vdev
->vbasedev
,
514 VFIO_PCI_MSIX_IRQ_INDEX
, nr
,
515 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
516 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
520 /* Disable PBA emulation when nothing more is pending. */
521 clear_bit(nr
, vdev
->msix
->pending
);
522 if (find_first_bit(vdev
->msix
->pending
,
523 vdev
->nr_vectors
) == vdev
->nr_vectors
) {
524 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, false);
525 trace_vfio_msix_pba_disable(vdev
->vbasedev
.name
);
531 static int vfio_msix_vector_use(PCIDevice
*pdev
,
532 unsigned int nr
, MSIMessage msg
)
534 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
537 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
539 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
540 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
542 trace_vfio_msix_vector_release(vdev
->vbasedev
.name
, nr
);
545 * There are still old guests that mask and unmask vectors on every
546 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
547 * the KVM setup in place, simply switch VFIO to use the non-bypass
548 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
549 * core will mask the interrupt and set pending bits, allowing it to
550 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
552 if (vector
->virq
>= 0) {
553 int32_t fd
= event_notifier_get_fd(&vector
->interrupt
);
555 vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
, nr
,
556 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, NULL
);
560 static void vfio_msix_enable(VFIOPCIDevice
*vdev
)
562 vfio_disable_interrupts(vdev
);
564 vdev
->msi_vectors
= g_new0(VFIOMSIVector
, vdev
->msix
->entries
);
566 vdev
->interrupt
= VFIO_INT_MSIX
;
569 * Some communication channels between VF & PF or PF & fw rely on the
570 * physical state of the device and expect that enabling MSI-X from the
571 * guest enables the same on the host. When our guest is Linux, the
572 * guest driver call to pci_enable_msix() sets the enabling bit in the
573 * MSI-X capability, but leaves the vector table masked. We therefore
574 * can't rely on a vector_use callback (from request_irq() in the guest)
575 * to switch the physical device into MSI-X mode because that may come a
576 * long time after pci_enable_msix(). This code enables vector 0 with
577 * triggering to userspace, then immediately release the vector, leaving
578 * the physical device with no vectors enabled, but MSI-X enabled, just
579 * like the guest view.
581 vfio_msix_vector_do_use(&vdev
->pdev
, 0, NULL
, NULL
);
582 vfio_msix_vector_release(&vdev
->pdev
, 0);
584 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
585 vfio_msix_vector_release
, NULL
)) {
586 error_report("vfio: msix_set_vector_notifiers failed");
589 trace_vfio_msix_enable(vdev
->vbasedev
.name
);
592 static void vfio_msi_enable(VFIOPCIDevice
*vdev
)
596 vfio_disable_interrupts(vdev
);
598 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
600 vdev
->msi_vectors
= g_new0(VFIOMSIVector
, vdev
->nr_vectors
);
602 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
603 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
609 if (event_notifier_init(&vector
->interrupt
, 0)) {
610 error_report("vfio: Error: event_notifier_init failed");
613 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
614 vfio_msi_interrupt
, NULL
, vector
);
617 * Attempt to enable route through KVM irqchip,
618 * default to userspace handling if unavailable.
620 vfio_add_kvm_msi_virq(vdev
, vector
, i
, false);
623 /* Set interrupt type prior to possible interrupts */
624 vdev
->interrupt
= VFIO_INT_MSI
;
626 ret
= vfio_enable_vectors(vdev
, false);
629 error_report("vfio: Error: Failed to setup MSI fds: %m");
630 } else if (ret
!= vdev
->nr_vectors
) {
631 error_report("vfio: Error: Failed to enable %d "
632 "MSI vectors, retry with %d", vdev
->nr_vectors
, ret
);
635 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
636 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
637 if (vector
->virq
>= 0) {
638 vfio_remove_kvm_msi_virq(vector
);
640 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
642 event_notifier_cleanup(&vector
->interrupt
);
645 g_free(vdev
->msi_vectors
);
647 if (ret
> 0 && ret
!= vdev
->nr_vectors
) {
648 vdev
->nr_vectors
= ret
;
651 vdev
->nr_vectors
= 0;
654 * Failing to setup MSI doesn't really fall within any specification.
655 * Let's try leaving interrupts disabled and hope the guest figures
656 * out to fall back to INTx for this device.
658 error_report("vfio: Error: Failed to enable MSI");
659 vdev
->interrupt
= VFIO_INT_NONE
;
664 trace_vfio_msi_enable(vdev
->vbasedev
.name
, vdev
->nr_vectors
);
667 static void vfio_msi_disable_common(VFIOPCIDevice
*vdev
)
672 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
673 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
674 if (vdev
->msi_vectors
[i
].use
) {
675 if (vector
->virq
>= 0) {
676 vfio_remove_kvm_msi_virq(vector
);
678 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
680 event_notifier_cleanup(&vector
->interrupt
);
684 g_free(vdev
->msi_vectors
);
685 vdev
->msi_vectors
= NULL
;
686 vdev
->nr_vectors
= 0;
687 vdev
->interrupt
= VFIO_INT_NONE
;
689 vfio_intx_enable(vdev
, &err
);
691 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
695 static void vfio_msix_disable(VFIOPCIDevice
*vdev
)
699 msix_unset_vector_notifiers(&vdev
->pdev
);
702 * MSI-X will only release vectors if MSI-X is still enabled on the
703 * device, check through the rest and release it ourselves if necessary.
705 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
706 if (vdev
->msi_vectors
[i
].use
) {
707 vfio_msix_vector_release(&vdev
->pdev
, i
);
708 msix_vector_unuse(&vdev
->pdev
, i
);
712 if (vdev
->nr_vectors
) {
713 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
716 vfio_msi_disable_common(vdev
);
718 memset(vdev
->msix
->pending
, 0,
719 BITS_TO_LONGS(vdev
->msix
->entries
) * sizeof(unsigned long));
721 trace_vfio_msix_disable(vdev
->vbasedev
.name
);
724 static void vfio_msi_disable(VFIOPCIDevice
*vdev
)
726 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSI_IRQ_INDEX
);
727 vfio_msi_disable_common(vdev
);
729 trace_vfio_msi_disable(vdev
->vbasedev
.name
);
732 static void vfio_update_msi(VFIOPCIDevice
*vdev
)
736 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
737 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
740 if (!vector
->use
|| vector
->virq
< 0) {
744 msg
= msi_get_message(&vdev
->pdev
, i
);
745 vfio_update_kvm_msi_virq(vector
, msg
, &vdev
->pdev
);
749 static void vfio_pci_load_rom(VFIOPCIDevice
*vdev
)
751 struct vfio_region_info
*reg_info
;
756 if (vfio_get_region_info(&vdev
->vbasedev
,
757 VFIO_PCI_ROM_REGION_INDEX
, ®_info
)) {
758 error_report("vfio: Error getting ROM info: %m");
762 trace_vfio_pci_load_rom(vdev
->vbasedev
.name
, (unsigned long)reg_info
->size
,
763 (unsigned long)reg_info
->offset
,
764 (unsigned long)reg_info
->flags
);
766 vdev
->rom_size
= size
= reg_info
->size
;
767 vdev
->rom_offset
= reg_info
->offset
;
771 if (!vdev
->rom_size
) {
772 vdev
->rom_read_failed
= true;
773 error_report("vfio-pci: Cannot read device rom at "
774 "%s", vdev
->vbasedev
.name
);
775 error_printf("Device option ROM contents are probably invalid "
776 "(check dmesg).\nSkip option ROM probe with rombar=0, "
777 "or load from file with romfile=\n");
781 vdev
->rom
= g_malloc(size
);
782 memset(vdev
->rom
, 0xff, size
);
785 bytes
= pread(vdev
->vbasedev
.fd
, vdev
->rom
+ off
,
786 size
, vdev
->rom_offset
+ off
);
789 } else if (bytes
> 0) {
793 if (errno
== EINTR
|| errno
== EAGAIN
) {
796 error_report("vfio: Error reading device ROM: %m");
802 * Test the ROM signature against our device, if the vendor is correct
803 * but the device ID doesn't match, store the correct device ID and
804 * recompute the checksum. Intel IGD devices need this and are known
805 * to have bogus checksums so we can't simply adjust the checksum.
807 if (pci_get_word(vdev
->rom
) == 0xaa55 &&
808 pci_get_word(vdev
->rom
+ 0x18) + 8 < vdev
->rom_size
&&
809 !memcmp(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18), "PCIR", 4)) {
812 vid
= pci_get_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 4);
813 did
= pci_get_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 6);
815 if (vid
== vdev
->vendor_id
&& did
!= vdev
->device_id
) {
817 uint8_t csum
, *data
= vdev
->rom
;
819 pci_set_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 6,
823 for (csum
= 0, i
= 0; i
< vdev
->rom_size
; i
++) {
832 static uint64_t vfio_rom_read(void *opaque
, hwaddr addr
, unsigned size
)
834 VFIOPCIDevice
*vdev
= opaque
;
843 /* Load the ROM lazily when the guest tries to read it */
844 if (unlikely(!vdev
->rom
&& !vdev
->rom_read_failed
)) {
845 vfio_pci_load_rom(vdev
);
848 memcpy(&val
, vdev
->rom
+ addr
,
849 (addr
< vdev
->rom_size
) ? MIN(size
, vdev
->rom_size
- addr
) : 0);
856 data
= le16_to_cpu(val
.word
);
859 data
= le32_to_cpu(val
.dword
);
862 hw_error("vfio: unsupported read size, %d bytes\n", size
);
866 trace_vfio_rom_read(vdev
->vbasedev
.name
, addr
, size
, data
);
871 static void vfio_rom_write(void *opaque
, hwaddr addr
,
872 uint64_t data
, unsigned size
)
876 static const MemoryRegionOps vfio_rom_ops
= {
877 .read
= vfio_rom_read
,
878 .write
= vfio_rom_write
,
879 .endianness
= DEVICE_LITTLE_ENDIAN
,
882 static void vfio_pci_size_rom(VFIOPCIDevice
*vdev
)
884 uint32_t orig
, size
= cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK
);
885 off_t offset
= vdev
->config_offset
+ PCI_ROM_ADDRESS
;
886 DeviceState
*dev
= DEVICE(vdev
);
888 int fd
= vdev
->vbasedev
.fd
;
890 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
) {
891 /* Since pci handles romfile, just print a message and return */
892 if (vfio_blacklist_opt_rom(vdev
) && vdev
->pdev
.romfile
) {
893 warn_report("Device at %s is known to cause system instability"
894 " issues during option rom execution",
895 vdev
->vbasedev
.name
);
896 error_printf("Proceeding anyway since user specified romfile\n");
902 * Use the same size ROM BAR as the physical device. The contents
903 * will get filled in later when the guest tries to read it.
905 if (pread(fd
, &orig
, 4, offset
) != 4 ||
906 pwrite(fd
, &size
, 4, offset
) != 4 ||
907 pread(fd
, &size
, 4, offset
) != 4 ||
908 pwrite(fd
, &orig
, 4, offset
) != 4) {
909 error_report("%s(%s) failed: %m", __func__
, vdev
->vbasedev
.name
);
913 size
= ~(le32_to_cpu(size
) & PCI_ROM_ADDRESS_MASK
) + 1;
919 if (vfio_blacklist_opt_rom(vdev
)) {
920 if (dev
->opts
&& qemu_opt_get(dev
->opts
, "rombar")) {
921 warn_report("Device at %s is known to cause system instability"
922 " issues during option rom execution",
923 vdev
->vbasedev
.name
);
924 error_printf("Proceeding anyway since user specified"
925 " non zero value for rombar\n");
927 warn_report("Rom loading for device at %s has been disabled"
928 " due to system instability issues",
929 vdev
->vbasedev
.name
);
930 error_printf("Specify rombar=1 or romfile to force\n");
935 trace_vfio_pci_size_rom(vdev
->vbasedev
.name
, size
);
937 name
= g_strdup_printf("vfio[%s].rom", vdev
->vbasedev
.name
);
939 memory_region_init_io(&vdev
->pdev
.rom
, OBJECT(vdev
),
940 &vfio_rom_ops
, vdev
, name
, size
);
943 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
,
944 PCI_BASE_ADDRESS_SPACE_MEMORY
, &vdev
->pdev
.rom
);
946 vdev
->rom_read_failed
= false;
949 void vfio_vga_write(void *opaque
, hwaddr addr
,
950 uint64_t data
, unsigned size
)
952 VFIOVGARegion
*region
= opaque
;
953 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
960 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
967 buf
.word
= cpu_to_le16(data
);
970 buf
.dword
= cpu_to_le32(data
);
973 hw_error("vfio: unsupported write size, %d bytes", size
);
977 if (pwrite(vga
->fd
, &buf
, size
, offset
) != size
) {
978 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
979 __func__
, region
->offset
+ addr
, data
, size
);
982 trace_vfio_vga_write(region
->offset
+ addr
, data
, size
);
985 uint64_t vfio_vga_read(void *opaque
, hwaddr addr
, unsigned size
)
987 VFIOVGARegion
*region
= opaque
;
988 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
996 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
998 if (pread(vga
->fd
, &buf
, size
, offset
) != size
) {
999 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1000 __func__
, region
->offset
+ addr
, size
);
1001 return (uint64_t)-1;
1009 data
= le16_to_cpu(buf
.word
);
1012 data
= le32_to_cpu(buf
.dword
);
1015 hw_error("vfio: unsupported read size, %d bytes", size
);
1019 trace_vfio_vga_read(region
->offset
+ addr
, size
, data
);
1024 static const MemoryRegionOps vfio_vga_ops
= {
1025 .read
= vfio_vga_read
,
1026 .write
= vfio_vga_write
,
1027 .endianness
= DEVICE_LITTLE_ENDIAN
,
1031 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1032 * size if the BAR is in an exclusive page in host so that we could map
1033 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1034 * page in guest. So we should set the priority of the expanded memory
1035 * region to zero in case of overlap with BARs which share the same page
1036 * with the sub-page BAR in guest. Besides, we should also recover the
1037 * size of this sub-page BAR when its base address is changed in guest
1038 * and not page aligned any more.
1040 static void vfio_sub_page_bar_update_mapping(PCIDevice
*pdev
, int bar
)
1042 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
1043 VFIORegion
*region
= &vdev
->bars
[bar
].region
;
1044 MemoryRegion
*mmap_mr
, *region_mr
, *base_mr
;
1047 uint64_t size
= region
->size
;
1049 /* Make sure that the whole region is allowed to be mmapped */
1050 if (region
->nr_mmaps
!= 1 || !region
->mmaps
[0].mmap
||
1051 region
->mmaps
[0].size
!= region
->size
) {
1055 r
= &pdev
->io_regions
[bar
];
1057 base_mr
= vdev
->bars
[bar
].mr
;
1058 region_mr
= region
->mem
;
1059 mmap_mr
= ®ion
->mmaps
[0].mem
;
1061 /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1062 if (bar_addr
!= PCI_BAR_UNMAPPED
&&
1063 !(bar_addr
& ~qemu_real_host_page_mask
)) {
1064 size
= qemu_real_host_page_size
;
1067 memory_region_transaction_begin();
1069 if (vdev
->bars
[bar
].size
< size
) {
1070 memory_region_set_size(base_mr
, size
);
1072 memory_region_set_size(region_mr
, size
);
1073 memory_region_set_size(mmap_mr
, size
);
1074 if (size
!= vdev
->bars
[bar
].size
&& memory_region_is_mapped(base_mr
)) {
1075 memory_region_del_subregion(r
->address_space
, base_mr
);
1076 memory_region_add_subregion_overlap(r
->address_space
,
1077 bar_addr
, base_mr
, 0);
1080 memory_region_transaction_commit();
1086 uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
1088 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
1089 uint32_t emu_bits
= 0, emu_val
= 0, phys_val
= 0, val
;
1091 memcpy(&emu_bits
, vdev
->emulated_config_bits
+ addr
, len
);
1092 emu_bits
= le32_to_cpu(emu_bits
);
1095 emu_val
= pci_default_read_config(pdev
, addr
, len
);
1098 if (~emu_bits
& (0xffffffffU
>> (32 - len
* 8))) {
1101 ret
= pread(vdev
->vbasedev
.fd
, &phys_val
, len
,
1102 vdev
->config_offset
+ addr
);
1104 error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1105 __func__
, vdev
->vbasedev
.name
, addr
, len
);
1108 phys_val
= le32_to_cpu(phys_val
);
1111 val
= (emu_val
& emu_bits
) | (phys_val
& ~emu_bits
);
1113 trace_vfio_pci_read_config(vdev
->vbasedev
.name
, addr
, len
, val
);
1118 void vfio_pci_write_config(PCIDevice
*pdev
,
1119 uint32_t addr
, uint32_t val
, int len
)
1121 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
1122 uint32_t val_le
= cpu_to_le32(val
);
1124 trace_vfio_pci_write_config(vdev
->vbasedev
.name
, addr
, val
, len
);
1126 /* Write everything to VFIO, let it filter out what we can't write */
1127 if (pwrite(vdev
->vbasedev
.fd
, &val_le
, len
, vdev
->config_offset
+ addr
)
1129 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1130 __func__
, vdev
->vbasedev
.name
, addr
, val
, len
);
1133 /* MSI/MSI-X Enabling/Disabling */
1134 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
1135 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
1136 int is_enabled
, was_enabled
= msi_enabled(pdev
);
1138 pci_default_write_config(pdev
, addr
, val
, len
);
1140 is_enabled
= msi_enabled(pdev
);
1144 vfio_msi_enable(vdev
);
1148 vfio_msi_disable(vdev
);
1150 vfio_update_msi(vdev
);
1153 } else if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
1154 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
1155 int is_enabled
, was_enabled
= msix_enabled(pdev
);
1157 pci_default_write_config(pdev
, addr
, val
, len
);
1159 is_enabled
= msix_enabled(pdev
);
1161 if (!was_enabled
&& is_enabled
) {
1162 vfio_msix_enable(vdev
);
1163 } else if (was_enabled
&& !is_enabled
) {
1164 vfio_msix_disable(vdev
);
1166 } else if (ranges_overlap(addr
, len
, PCI_BASE_ADDRESS_0
, 24) ||
1167 range_covers_byte(addr
, len
, PCI_COMMAND
)) {
1168 pcibus_t old_addr
[PCI_NUM_REGIONS
- 1];
1171 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
1172 old_addr
[bar
] = pdev
->io_regions
[bar
].addr
;
1175 pci_default_write_config(pdev
, addr
, val
, len
);
1177 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
1178 if (old_addr
[bar
] != pdev
->io_regions
[bar
].addr
&&
1179 vdev
->bars
[bar
].region
.size
> 0 &&
1180 vdev
->bars
[bar
].region
.size
< qemu_real_host_page_size
) {
1181 vfio_sub_page_bar_update_mapping(pdev
, bar
);
1185 /* Write everything to QEMU to keep emulated bits correct */
1186 pci_default_write_config(pdev
, addr
, val
, len
);
1193 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
)
1196 * More complicated than it looks. Disabling MSI/X transitions the
1197 * device to INTx mode (if supported). Therefore we need to first
1198 * disable MSI/X and then cleanup by disabling INTx.
1200 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
1201 vfio_msix_disable(vdev
);
1202 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
1203 vfio_msi_disable(vdev
);
1206 if (vdev
->interrupt
== VFIO_INT_INTx
) {
1207 vfio_intx_disable(vdev
);
1211 static int vfio_msi_setup(VFIOPCIDevice
*vdev
, int pos
, Error
**errp
)
1214 bool msi_64bit
, msi_maskbit
;
1218 if (pread(vdev
->vbasedev
.fd
, &ctrl
, sizeof(ctrl
),
1219 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
1220 error_setg_errno(errp
, errno
, "failed reading MSI PCI_CAP_FLAGS");
1223 ctrl
= le16_to_cpu(ctrl
);
1225 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
1226 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
1227 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
1229 trace_vfio_msi_setup(vdev
->vbasedev
.name
, pos
);
1231 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
, &err
);
1233 if (ret
== -ENOTSUP
) {
1236 error_propagate_prepend(errp
, err
, "msi_init failed: ");
1239 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
1244 static void vfio_pci_fixup_msix_region(VFIOPCIDevice
*vdev
)
1247 VFIORegion
*region
= &vdev
->bars
[vdev
->msix
->table_bar
].region
;
1250 * If the host driver allows mapping of a MSIX data, we are going to
1251 * do map the entire BAR and emulate MSIX table on top of that.
1253 if (vfio_has_region_cap(&vdev
->vbasedev
, region
->nr
,
1254 VFIO_REGION_INFO_CAP_MSIX_MAPPABLE
)) {
1259 * We expect to find a single mmap covering the whole BAR, anything else
1260 * means it's either unsupported or already setup.
1262 if (region
->nr_mmaps
!= 1 || region
->mmaps
[0].offset
||
1263 region
->size
!= region
->mmaps
[0].size
) {
1267 /* MSI-X table start and end aligned to host page size */
1268 start
= vdev
->msix
->table_offset
& qemu_real_host_page_mask
;
1269 end
= REAL_HOST_PAGE_ALIGN((uint64_t)vdev
->msix
->table_offset
+
1270 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
1273 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1274 * NB - Host page size is necessarily a power of two and so is the PCI
1275 * BAR (not counting EA yet), therefore if we have host page aligned
1276 * @start and @end, then any remainder of the BAR before or after those
1277 * must be at least host page sized and therefore mmap'able.
1280 if (end
>= region
->size
) {
1281 region
->nr_mmaps
= 0;
1282 g_free(region
->mmaps
);
1283 region
->mmaps
= NULL
;
1284 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1285 vdev
->msix
->table_bar
, 0, 0);
1287 region
->mmaps
[0].offset
= end
;
1288 region
->mmaps
[0].size
= region
->size
- end
;
1289 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1290 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1291 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1294 /* Maybe it's aligned at the end of the BAR */
1295 } else if (end
>= region
->size
) {
1296 region
->mmaps
[0].size
= start
;
1297 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1298 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1299 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1301 /* Otherwise it must split the BAR */
1303 region
->nr_mmaps
= 2;
1304 region
->mmaps
= g_renew(VFIOMmap
, region
->mmaps
, 2);
1306 memcpy(®ion
->mmaps
[1], ®ion
->mmaps
[0], sizeof(VFIOMmap
));
1308 region
->mmaps
[0].size
= start
;
1309 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1310 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1311 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1313 region
->mmaps
[1].offset
= end
;
1314 region
->mmaps
[1].size
= region
->size
- end
;
1315 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1316 vdev
->msix
->table_bar
, region
->mmaps
[1].offset
,
1317 region
->mmaps
[1].offset
+ region
->mmaps
[1].size
);
1321 static void vfio_pci_relocate_msix(VFIOPCIDevice
*vdev
, Error
**errp
)
1323 int target_bar
= -1;
1326 if (!vdev
->msix
|| vdev
->msix_relo
== OFF_AUTOPCIBAR_OFF
) {
1330 /* The actual minimum size of MSI-X structures */
1331 msix_sz
= (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
) +
1332 (QEMU_ALIGN_UP(vdev
->msix
->entries
, 64) / 8);
1333 /* Round up to host pages, we don't want to share a page */
1334 msix_sz
= REAL_HOST_PAGE_ALIGN(msix_sz
);
1335 /* PCI BARs must be a power of 2 */
1336 msix_sz
= pow2ceil(msix_sz
);
1338 if (vdev
->msix_relo
== OFF_AUTOPCIBAR_AUTO
) {
1340 * TODO: Lookup table for known devices.
1342 * Logically we might use an algorithm here to select the BAR adding
1343 * the least additional MMIO space, but we cannot programatically
1344 * predict the driver dependency on BAR ordering or sizing, therefore
1345 * 'auto' becomes a lookup for combinations reported to work.
1347 if (target_bar
< 0) {
1348 error_setg(errp
, "No automatic MSI-X relocation available for "
1349 "device %04x:%04x", vdev
->vendor_id
, vdev
->device_id
);
1353 target_bar
= (int)(vdev
->msix_relo
- OFF_AUTOPCIBAR_BAR0
);
1356 /* I/O port BARs cannot host MSI-X structures */
1357 if (vdev
->bars
[target_bar
].ioport
) {
1358 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1359 "I/O port BAR", target_bar
);
1363 /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1364 if (!vdev
->bars
[target_bar
].size
&&
1365 target_bar
> 0 && vdev
->bars
[target_bar
- 1].mem64
) {
1366 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1367 "consumed by 64-bit BAR %d", target_bar
, target_bar
- 1);
1371 /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
1372 if (vdev
->bars
[target_bar
].size
> 1 * GiB
&&
1373 !vdev
->bars
[target_bar
].mem64
) {
1374 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1375 "no space to extend 32-bit BAR", target_bar
);
1380 * If adding a new BAR, test if we can make it 64bit. We make it
1381 * prefetchable since QEMU MSI-X emulation has no read side effects
1382 * and doing so makes mapping more flexible.
1384 if (!vdev
->bars
[target_bar
].size
) {
1385 if (target_bar
< (PCI_ROM_SLOT
- 1) &&
1386 !vdev
->bars
[target_bar
+ 1].size
) {
1387 vdev
->bars
[target_bar
].mem64
= true;
1388 vdev
->bars
[target_bar
].type
= PCI_BASE_ADDRESS_MEM_TYPE_64
;
1390 vdev
->bars
[target_bar
].type
|= PCI_BASE_ADDRESS_MEM_PREFETCH
;
1391 vdev
->bars
[target_bar
].size
= msix_sz
;
1392 vdev
->msix
->table_offset
= 0;
1394 vdev
->bars
[target_bar
].size
= MAX(vdev
->bars
[target_bar
].size
* 2,
1397 * Due to above size calc, MSI-X always starts halfway into the BAR,
1398 * which will always be a separate host page.
1400 vdev
->msix
->table_offset
= vdev
->bars
[target_bar
].size
/ 2;
1403 vdev
->msix
->table_bar
= target_bar
;
1404 vdev
->msix
->pba_bar
= target_bar
;
1405 /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1406 vdev
->msix
->pba_offset
= vdev
->msix
->table_offset
+
1407 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
);
1409 trace_vfio_msix_relo(vdev
->vbasedev
.name
,
1410 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
);
1414 * We don't have any control over how pci_add_capability() inserts
1415 * capabilities into the chain. In order to setup MSI-X we need a
1416 * MemoryRegion for the BAR. In order to setup the BAR and not
1417 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1418 * need to first look for where the MSI-X table lives. So we
1419 * unfortunately split MSI-X setup across two functions.
1421 static void vfio_msix_early_setup(VFIOPCIDevice
*vdev
, Error
**errp
)
1425 uint32_t table
, pba
;
1426 int fd
= vdev
->vbasedev
.fd
;
1429 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
1434 if (pread(fd
, &ctrl
, sizeof(ctrl
),
1435 vdev
->config_offset
+ pos
+ PCI_MSIX_FLAGS
) != sizeof(ctrl
)) {
1436 error_setg_errno(errp
, errno
, "failed to read PCI MSIX FLAGS");
1440 if (pread(fd
, &table
, sizeof(table
),
1441 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
1442 error_setg_errno(errp
, errno
, "failed to read PCI MSIX TABLE");
1446 if (pread(fd
, &pba
, sizeof(pba
),
1447 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
1448 error_setg_errno(errp
, errno
, "failed to read PCI MSIX PBA");
1452 ctrl
= le16_to_cpu(ctrl
);
1453 table
= le32_to_cpu(table
);
1454 pba
= le32_to_cpu(pba
);
1456 msix
= g_malloc0(sizeof(*msix
));
1457 msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
1458 msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
1459 msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
1460 msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
1461 msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
1464 * Test the size of the pba_offset variable and catch if it extends outside
1465 * of the specified BAR. If it is the case, we need to apply a hardware
1466 * specific quirk if the device is known or we have a broken configuration.
1468 if (msix
->pba_offset
>= vdev
->bars
[msix
->pba_bar
].region
.size
) {
1470 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1471 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1472 * the VF PBA offset while the BAR itself is only 8k. The correct value
1473 * is 0x1000, so we hard code that here.
1475 if (vdev
->vendor_id
== PCI_VENDOR_ID_CHELSIO
&&
1476 (vdev
->device_id
& 0xff00) == 0x5800) {
1477 msix
->pba_offset
= 0x1000;
1478 } else if (vdev
->msix_relo
== OFF_AUTOPCIBAR_OFF
) {
1479 error_setg(errp
, "hardware reports invalid configuration, "
1480 "MSIX PBA outside of specified BAR");
1486 trace_vfio_msix_early_setup(vdev
->vbasedev
.name
, pos
, msix
->table_bar
,
1487 msix
->table_offset
, msix
->entries
);
1490 vfio_pci_fixup_msix_region(vdev
);
1492 vfio_pci_relocate_msix(vdev
, errp
);
1495 static int vfio_msix_setup(VFIOPCIDevice
*vdev
, int pos
, Error
**errp
)
1500 vdev
->msix
->pending
= g_malloc0(BITS_TO_LONGS(vdev
->msix
->entries
) *
1501 sizeof(unsigned long));
1502 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
1503 vdev
->bars
[vdev
->msix
->table_bar
].mr
,
1504 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
1505 vdev
->bars
[vdev
->msix
->pba_bar
].mr
,
1506 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
,
1509 if (ret
== -ENOTSUP
) {
1510 warn_report_err(err
);
1514 error_propagate(errp
, err
);
1519 * The PCI spec suggests that devices provide additional alignment for
1520 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1521 * For an assigned device, this hopefully means that emulation of MSI-X
1522 * structures does not affect the performance of the device. If devices
1523 * fail to provide that alignment, a significant performance penalty may
1524 * result, for instance Mellanox MT27500 VFs:
1525 * http://www.spinics.net/lists/kvm/msg125881.html
1527 * The PBA is simply not that important for such a serious regression and
1528 * most drivers do not appear to look at it. The solution for this is to
1529 * disable the PBA MemoryRegion unless it's being used. We disable it
1530 * here and only enable it if a masked vector fires through QEMU. As the
1531 * vector-use notifier is called, which occurs on unmask, we test whether
1532 * PBA emulation is needed and again disable if not.
1534 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, false);
1537 * The emulated machine may provide a paravirt interface for MSIX setup
1538 * so it is not strictly necessary to emulate MSIX here. This becomes
1539 * helpful when frequently accessed MMIO registers are located in
1540 * subpages adjacent to the MSIX table but the MSIX data containing page
1541 * cannot be mapped because of a host page size bigger than the MSIX table
1544 if (object_property_get_bool(OBJECT(qdev_get_machine()),
1545 "vfio-no-msix-emulation", NULL
)) {
1546 memory_region_set_enabled(&vdev
->pdev
.msix_table_mmio
, false);
1552 static void vfio_teardown_msi(VFIOPCIDevice
*vdev
)
1554 msi_uninit(&vdev
->pdev
);
1557 msix_uninit(&vdev
->pdev
,
1558 vdev
->bars
[vdev
->msix
->table_bar
].mr
,
1559 vdev
->bars
[vdev
->msix
->pba_bar
].mr
);
1560 g_free(vdev
->msix
->pending
);
1567 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
)
1571 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1572 vfio_region_mmaps_set_enabled(&vdev
->bars
[i
].region
, enabled
);
1576 static void vfio_bar_prepare(VFIOPCIDevice
*vdev
, int nr
)
1578 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1583 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1584 if (!bar
->region
.size
) {
1588 /* Determine what type of BAR this is for registration */
1589 ret
= pread(vdev
->vbasedev
.fd
, &pci_bar
, sizeof(pci_bar
),
1590 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
1591 if (ret
!= sizeof(pci_bar
)) {
1592 error_report("vfio: Failed to read BAR %d (%m)", nr
);
1596 pci_bar
= le32_to_cpu(pci_bar
);
1597 bar
->ioport
= (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
);
1598 bar
->mem64
= bar
->ioport
? 0 : (pci_bar
& PCI_BASE_ADDRESS_MEM_TYPE_64
);
1599 bar
->type
= pci_bar
& (bar
->ioport
? ~PCI_BASE_ADDRESS_IO_MASK
:
1600 ~PCI_BASE_ADDRESS_MEM_MASK
);
1601 bar
->size
= bar
->region
.size
;
1604 static void vfio_bars_prepare(VFIOPCIDevice
*vdev
)
1608 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1609 vfio_bar_prepare(vdev
, i
);
1613 static void vfio_bar_register(VFIOPCIDevice
*vdev
, int nr
)
1615 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1622 bar
->mr
= g_new0(MemoryRegion
, 1);
1623 name
= g_strdup_printf("%s base BAR %d", vdev
->vbasedev
.name
, nr
);
1624 memory_region_init_io(bar
->mr
, OBJECT(vdev
), NULL
, NULL
, name
, bar
->size
);
1627 if (bar
->region
.size
) {
1628 memory_region_add_subregion(bar
->mr
, 0, bar
->region
.mem
);
1630 if (vfio_region_mmap(&bar
->region
)) {
1631 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1632 vdev
->vbasedev
.name
, nr
);
1636 pci_register_bar(&vdev
->pdev
, nr
, bar
->type
, bar
->mr
);
1639 static void vfio_bars_register(VFIOPCIDevice
*vdev
)
1643 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1644 vfio_bar_register(vdev
, i
);
1648 static void vfio_bars_exit(VFIOPCIDevice
*vdev
)
1652 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1653 VFIOBAR
*bar
= &vdev
->bars
[i
];
1655 vfio_bar_quirk_exit(vdev
, i
);
1656 vfio_region_exit(&bar
->region
);
1657 if (bar
->region
.size
) {
1658 memory_region_del_subregion(bar
->mr
, bar
->region
.mem
);
1663 pci_unregister_vga(&vdev
->pdev
);
1664 vfio_vga_quirk_exit(vdev
);
1668 static void vfio_bars_finalize(VFIOPCIDevice
*vdev
)
1672 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1673 VFIOBAR
*bar
= &vdev
->bars
[i
];
1675 vfio_bar_quirk_finalize(vdev
, i
);
1676 vfio_region_finalize(&bar
->region
);
1678 object_unparent(OBJECT(bar
->mr
));
1684 vfio_vga_quirk_finalize(vdev
);
1685 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
->region
); i
++) {
1686 object_unparent(OBJECT(&vdev
->vga
->region
[i
].mem
));
1695 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
1698 uint16_t next
= PCI_CONFIG_SPACE_SIZE
;
1700 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
1701 tmp
= pdev
->config
[tmp
+ PCI_CAP_LIST_NEXT
]) {
1702 if (tmp
> pos
&& tmp
< next
) {
1711 static uint16_t vfio_ext_cap_max_size(const uint8_t *config
, uint16_t pos
)
1713 uint16_t tmp
, next
= PCIE_CONFIG_SPACE_SIZE
;
1715 for (tmp
= PCI_CONFIG_SPACE_SIZE
; tmp
;
1716 tmp
= PCI_EXT_CAP_NEXT(pci_get_long(config
+ tmp
))) {
1717 if (tmp
> pos
&& tmp
< next
) {
1725 static void vfio_set_word_bits(uint8_t *buf
, uint16_t val
, uint16_t mask
)
1727 pci_set_word(buf
, (pci_get_word(buf
) & ~mask
) | val
);
1730 static void vfio_add_emulated_word(VFIOPCIDevice
*vdev
, int pos
,
1731 uint16_t val
, uint16_t mask
)
1733 vfio_set_word_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
1734 vfio_set_word_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
1735 vfio_set_word_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
1738 static void vfio_set_long_bits(uint8_t *buf
, uint32_t val
, uint32_t mask
)
1740 pci_set_long(buf
, (pci_get_long(buf
) & ~mask
) | val
);
1743 static void vfio_add_emulated_long(VFIOPCIDevice
*vdev
, int pos
,
1744 uint32_t val
, uint32_t mask
)
1746 vfio_set_long_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
1747 vfio_set_long_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
1748 vfio_set_long_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
1751 static int vfio_setup_pcie_cap(VFIOPCIDevice
*vdev
, int pos
, uint8_t size
,
1757 flags
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_CAP_FLAGS
);
1758 type
= (flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
1760 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
1761 type
!= PCI_EXP_TYPE_LEG_END
&&
1762 type
!= PCI_EXP_TYPE_RC_END
) {
1764 error_setg(errp
, "assignment of PCIe type 0x%x "
1765 "devices is not currently supported", type
);
1769 if (!pci_bus_is_express(pci_get_bus(&vdev
->pdev
))) {
1770 PCIBus
*bus
= pci_get_bus(&vdev
->pdev
);
1774 * Traditionally PCI device assignment exposes the PCIe capability
1775 * as-is on non-express buses. The reason being that some drivers
1776 * simply assume that it's there, for example tg3. However when
1777 * we're running on a native PCIe machine type, like Q35, we need
1778 * to hide the PCIe capability. The reason for this is twofold;
1779 * first Windows guests get a Code 10 error when the PCIe capability
1780 * is exposed in this configuration. Therefore express devices won't
1781 * work at all unless they're attached to express buses in the VM.
1782 * Second, a native PCIe machine introduces the possibility of fine
1783 * granularity IOMMUs supporting both translation and isolation.
1784 * Guest code to discover the IOMMU visibility of a device, such as
1785 * IOMMU grouping code on Linux, is very aware of device types and
1786 * valid transitions between bus types. An express device on a non-
1787 * express bus is not a valid combination on bare metal systems.
1789 * Drivers that require a PCIe capability to make the device
1790 * functional are simply going to need to have their devices placed
1791 * on a PCIe bus in the VM.
1793 while (!pci_bus_is_root(bus
)) {
1794 bridge
= pci_bridge_get_device(bus
);
1795 bus
= pci_get_bus(bridge
);
1798 if (pci_bus_is_express(bus
)) {
1802 } else if (pci_bus_is_root(pci_get_bus(&vdev
->pdev
))) {
1804 * On a Root Complex bus Endpoints become Root Complex Integrated
1805 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1807 if (type
== PCI_EXP_TYPE_ENDPOINT
) {
1808 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1809 PCI_EXP_TYPE_RC_END
<< 4,
1810 PCI_EXP_FLAGS_TYPE
);
1812 /* Link Capabilities, Status, and Control goes away */
1813 if (size
> PCI_EXP_LNKCTL
) {
1814 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
, 0, ~0);
1815 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
1816 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
, 0, ~0);
1818 #ifndef PCI_EXP_LNKCAP2
1819 #define PCI_EXP_LNKCAP2 44
1821 #ifndef PCI_EXP_LNKSTA2
1822 #define PCI_EXP_LNKSTA2 50
1824 /* Link 2 Capabilities, Status, and Control goes away */
1825 if (size
> PCI_EXP_LNKCAP2
) {
1826 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP2
, 0, ~0);
1827 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL2
, 0, ~0);
1828 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA2
, 0, ~0);
1832 } else if (type
== PCI_EXP_TYPE_LEG_END
) {
1834 * Legacy endpoints don't belong on the root complex. Windows
1835 * seems to be happier with devices if we skip the capability.
1842 * Convert Root Complex Integrated Endpoints to regular endpoints.
1843 * These devices don't support LNK/LNK2 capabilities, so make them up.
1845 if (type
== PCI_EXP_TYPE_RC_END
) {
1846 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1847 PCI_EXP_TYPE_ENDPOINT
<< 4,
1848 PCI_EXP_FLAGS_TYPE
);
1849 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
,
1850 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1
) |
1851 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT
), ~0);
1852 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
1857 * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
1858 * (Niantic errate #35) causing Windows to error with a Code 10 for the
1859 * device on Q35. Fixup any such devices to report version 1. If we
1860 * were to remove the capability entirely the guest would lose extended
1863 if ((flags
& PCI_EXP_FLAGS_VERS
) == 0) {
1864 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
1865 1, PCI_EXP_FLAGS_VERS
);
1868 pos
= pci_add_capability(&vdev
->pdev
, PCI_CAP_ID_EXP
, pos
, size
,
1874 vdev
->pdev
.exp
.exp_cap
= pos
;
1879 static void vfio_check_pcie_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
1881 uint32_t cap
= pci_get_long(vdev
->pdev
.config
+ pos
+ PCI_EXP_DEVCAP
);
1883 if (cap
& PCI_EXP_DEVCAP_FLR
) {
1884 trace_vfio_check_pcie_flr(vdev
->vbasedev
.name
);
1885 vdev
->has_flr
= true;
1889 static void vfio_check_pm_reset(VFIOPCIDevice
*vdev
, uint8_t pos
)
1891 uint16_t csr
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_PM_CTRL
);
1893 if (!(csr
& PCI_PM_CTRL_NO_SOFT_RESET
)) {
1894 trace_vfio_check_pm_reset(vdev
->vbasedev
.name
);
1895 vdev
->has_pm_reset
= true;
1899 static void vfio_check_af_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
1901 uint8_t cap
= pci_get_byte(vdev
->pdev
.config
+ pos
+ PCI_AF_CAP
);
1903 if ((cap
& PCI_AF_CAP_TP
) && (cap
& PCI_AF_CAP_FLR
)) {
1904 trace_vfio_check_af_flr(vdev
->vbasedev
.name
);
1905 vdev
->has_flr
= true;
1909 static int vfio_add_std_cap(VFIOPCIDevice
*vdev
, uint8_t pos
, Error
**errp
)
1911 PCIDevice
*pdev
= &vdev
->pdev
;
1912 uint8_t cap_id
, next
, size
;
1915 cap_id
= pdev
->config
[pos
];
1916 next
= pdev
->config
[pos
+ PCI_CAP_LIST_NEXT
];
1919 * If it becomes important to configure capabilities to their actual
1920 * size, use this as the default when it's something we don't recognize.
1921 * Since QEMU doesn't actually handle many of the config accesses,
1922 * exact size doesn't seem worthwhile.
1924 size
= vfio_std_cap_max_size(pdev
, pos
);
1927 * pci_add_capability always inserts the new capability at the head
1928 * of the chain. Therefore to end up with a chain that matches the
1929 * physical device, we insert from the end by making this recursive.
1930 * This is also why we pre-calculate size above as cached config space
1931 * will be changed as we unwind the stack.
1934 ret
= vfio_add_std_cap(vdev
, next
, errp
);
1939 /* Begin the rebuild, use QEMU emulated list bits */
1940 pdev
->config
[PCI_CAPABILITY_LIST
] = 0;
1941 vdev
->emulated_config_bits
[PCI_CAPABILITY_LIST
] = 0xff;
1942 vdev
->emulated_config_bits
[PCI_STATUS
] |= PCI_STATUS_CAP_LIST
;
1944 ret
= vfio_add_virt_caps(vdev
, errp
);
1950 /* Scale down size, esp in case virt caps were added above */
1951 size
= MIN(size
, vfio_std_cap_max_size(pdev
, pos
));
1953 /* Use emulated next pointer to allow dropping caps */
1954 pci_set_byte(vdev
->emulated_config_bits
+ pos
+ PCI_CAP_LIST_NEXT
, 0xff);
1957 case PCI_CAP_ID_MSI
:
1958 ret
= vfio_msi_setup(vdev
, pos
, errp
);
1960 case PCI_CAP_ID_EXP
:
1961 vfio_check_pcie_flr(vdev
, pos
);
1962 ret
= vfio_setup_pcie_cap(vdev
, pos
, size
, errp
);
1964 case PCI_CAP_ID_MSIX
:
1965 ret
= vfio_msix_setup(vdev
, pos
, errp
);
1968 vfio_check_pm_reset(vdev
, pos
);
1970 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1973 vfio_check_af_flr(vdev
, pos
);
1974 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1977 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
);
1983 "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
1991 static void vfio_add_ext_cap(VFIOPCIDevice
*vdev
)
1993 PCIDevice
*pdev
= &vdev
->pdev
;
1995 uint16_t cap_id
, next
, size
;
1999 /* Only add extended caps if we have them and the guest can see them */
2000 if (!pci_is_express(pdev
) || !pci_bus_is_express(pci_get_bus(pdev
)) ||
2001 !pci_get_long(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
)) {
2006 * pcie_add_capability always inserts the new capability at the tail
2007 * of the chain. Therefore to end up with a chain that matches the
2008 * physical device, we cache the config space to avoid overwriting
2009 * the original config space when we parse the extended capabilities.
2011 config
= g_memdup(pdev
->config
, vdev
->config_size
);
2014 * Extended capabilities are chained with each pointing to the next, so we
2015 * can drop anything other than the head of the chain simply by modifying
2016 * the previous next pointer. Seed the head of the chain here such that
2017 * we can simply skip any capabilities we want to drop below, regardless
2018 * of their position in the chain. If this stub capability still exists
2019 * after we add the capabilities we want to expose, update the capability
2020 * ID to zero. Note that we cannot seed with the capability header being
2021 * zero as this conflicts with definition of an absent capability chain
2022 * and prevents capabilities beyond the head of the list from being added.
2023 * By replacing the dummy capability ID with zero after walking the device
2024 * chain, we also transparently mark extended capabilities as absent if
2025 * no capabilities were added. Note that the PCIe spec defines an absence
2026 * of extended capabilities to be determined by a value of zero for the
2027 * capability ID, version, AND next pointer. A non-zero next pointer
2028 * should be sufficient to indicate additional capabilities are present,
2029 * which will occur if we call pcie_add_capability() below. The entire
2030 * first dword is emulated to support this.
2032 * NB. The kernel side does similar masking, so be prepared that our
2033 * view of the device may also contain a capability ID zero in the head
2034 * of the chain. Skip it for the same reason that we cannot seed the
2035 * chain with a zero capability.
2037 pci_set_long(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
,
2038 PCI_EXT_CAP(0xFFFF, 0, 0));
2039 pci_set_long(pdev
->wmask
+ PCI_CONFIG_SPACE_SIZE
, 0);
2040 pci_set_long(vdev
->emulated_config_bits
+ PCI_CONFIG_SPACE_SIZE
, ~0);
2042 for (next
= PCI_CONFIG_SPACE_SIZE
; next
;
2043 next
= PCI_EXT_CAP_NEXT(pci_get_long(config
+ next
))) {
2044 header
= pci_get_long(config
+ next
);
2045 cap_id
= PCI_EXT_CAP_ID(header
);
2046 cap_ver
= PCI_EXT_CAP_VER(header
);
2049 * If it becomes important to configure extended capabilities to their
2050 * actual size, use this as the default when it's something we don't
2051 * recognize. Since QEMU doesn't actually handle many of the config
2052 * accesses, exact size doesn't seem worthwhile.
2054 size
= vfio_ext_cap_max_size(config
, next
);
2056 /* Use emulated next pointer to allow dropping extended caps */
2057 pci_long_test_and_set_mask(vdev
->emulated_config_bits
+ next
,
2058 PCI_EXT_CAP_NEXT_MASK
);
2061 case 0: /* kernel masked capability */
2062 case PCI_EXT_CAP_ID_SRIOV
: /* Read-only VF BARs confuse OVMF */
2063 case PCI_EXT_CAP_ID_ARI
: /* XXX Needs next function virtualization */
2064 case PCI_EXT_CAP_ID_REBAR
: /* Can't expose read-only */
2065 trace_vfio_add_ext_cap_dropped(vdev
->vbasedev
.name
, cap_id
, next
);
2068 pcie_add_capability(pdev
, cap_id
, cap_ver
, next
, size
);
2073 /* Cleanup chain head ID if necessary */
2074 if (pci_get_word(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
) == 0xFFFF) {
2075 pci_set_word(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
, 0);
2082 static int vfio_add_capabilities(VFIOPCIDevice
*vdev
, Error
**errp
)
2084 PCIDevice
*pdev
= &vdev
->pdev
;
2087 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
2088 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
2089 return 0; /* Nothing to add */
2092 ret
= vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
], errp
);
2097 vfio_add_ext_cap(vdev
);
2101 static void vfio_pci_pre_reset(VFIOPCIDevice
*vdev
)
2103 PCIDevice
*pdev
= &vdev
->pdev
;
2106 vfio_disable_interrupts(vdev
);
2108 /* Make sure the device is in D0 */
2113 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2114 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2116 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2117 vfio_pci_write_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
, 2);
2118 /* vfio handles the necessary delay here */
2119 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2120 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2122 error_report("vfio: Unable to power on device, stuck in D%d",
2129 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2130 * Also put INTx Disable in known state.
2132 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
2133 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
2134 PCI_COMMAND_INTX_DISABLE
);
2135 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
2138 static void vfio_pci_post_reset(VFIOPCIDevice
*vdev
)
2143 vfio_intx_enable(vdev
, &err
);
2145 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2148 for (nr
= 0; nr
< PCI_NUM_REGIONS
- 1; ++nr
) {
2149 off_t addr
= vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
);
2151 uint32_t len
= sizeof(val
);
2153 if (pwrite(vdev
->vbasedev
.fd
, &val
, len
, addr
) != len
) {
2154 error_report("%s(%s) reset bar %d failed: %m", __func__
,
2155 vdev
->vbasedev
.name
, nr
);
2159 vfio_quirk_reset(vdev
);
2162 static bool vfio_pci_host_match(PCIHostDeviceAddress
*addr
, const char *name
)
2166 sprintf(tmp
, "%04x:%02x:%02x.%1x", addr
->domain
,
2167 addr
->bus
, addr
->slot
, addr
->function
);
2169 return (strcmp(tmp
, name
) == 0);
2172 static int vfio_pci_hot_reset(VFIOPCIDevice
*vdev
, bool single
)
2175 struct vfio_pci_hot_reset_info
*info
;
2176 struct vfio_pci_dependent_device
*devices
;
2177 struct vfio_pci_hot_reset
*reset
;
2182 trace_vfio_pci_hot_reset(vdev
->vbasedev
.name
, single
? "one" : "multi");
2185 vfio_pci_pre_reset(vdev
);
2187 vdev
->vbasedev
.needs_reset
= false;
2189 info
= g_malloc0(sizeof(*info
));
2190 info
->argsz
= sizeof(*info
);
2192 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2193 if (ret
&& errno
!= ENOSPC
) {
2195 if (!vdev
->has_pm_reset
) {
2196 error_report("vfio: Cannot reset device %s, "
2197 "no available reset mechanism.", vdev
->vbasedev
.name
);
2202 count
= info
->count
;
2203 info
= g_realloc(info
, sizeof(*info
) + (count
* sizeof(*devices
)));
2204 info
->argsz
= sizeof(*info
) + (count
* sizeof(*devices
));
2205 devices
= &info
->devices
[0];
2207 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2210 error_report("vfio: hot reset info failed: %m");
2214 trace_vfio_pci_hot_reset_has_dep_devices(vdev
->vbasedev
.name
);
2216 /* Verify that we have all the groups required */
2217 for (i
= 0; i
< info
->count
; i
++) {
2218 PCIHostDeviceAddress host
;
2220 VFIODevice
*vbasedev_iter
;
2222 host
.domain
= devices
[i
].segment
;
2223 host
.bus
= devices
[i
].bus
;
2224 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
2225 host
.function
= PCI_FUNC(devices
[i
].devfn
);
2227 trace_vfio_pci_hot_reset_dep_devices(host
.domain
,
2228 host
.bus
, host
.slot
, host
.function
, devices
[i
].group_id
);
2230 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
2234 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2235 if (group
->groupid
== devices
[i
].group_id
) {
2241 if (!vdev
->has_pm_reset
) {
2242 error_report("vfio: Cannot reset device %s, "
2243 "depends on group %d which is not owned.",
2244 vdev
->vbasedev
.name
, devices
[i
].group_id
);
2250 /* Prep dependent devices for reset and clear our marker. */
2251 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2252 if (!vbasedev_iter
->dev
->realized
||
2253 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
2256 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
2257 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
2262 vfio_pci_pre_reset(tmp
);
2263 tmp
->vbasedev
.needs_reset
= false;
2270 if (!single
&& !multi
) {
2275 /* Determine how many group fds need to be passed */
2277 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2278 for (i
= 0; i
< info
->count
; i
++) {
2279 if (group
->groupid
== devices
[i
].group_id
) {
2286 reset
= g_malloc0(sizeof(*reset
) + (count
* sizeof(*fds
)));
2287 reset
->argsz
= sizeof(*reset
) + (count
* sizeof(*fds
));
2288 fds
= &reset
->group_fds
[0];
2290 /* Fill in group fds */
2291 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2292 for (i
= 0; i
< info
->count
; i
++) {
2293 if (group
->groupid
== devices
[i
].group_id
) {
2294 fds
[reset
->count
++] = group
->fd
;
2301 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_PCI_HOT_RESET
, reset
);
2304 trace_vfio_pci_hot_reset_result(vdev
->vbasedev
.name
,
2305 ret
? "%m" : "Success");
2308 /* Re-enable INTx on affected devices */
2309 for (i
= 0; i
< info
->count
; i
++) {
2310 PCIHostDeviceAddress host
;
2312 VFIODevice
*vbasedev_iter
;
2314 host
.domain
= devices
[i
].segment
;
2315 host
.bus
= devices
[i
].bus
;
2316 host
.slot
= PCI_SLOT(devices
[i
].devfn
);
2317 host
.function
= PCI_FUNC(devices
[i
].devfn
);
2319 if (vfio_pci_host_match(&host
, vdev
->vbasedev
.name
)) {
2323 QLIST_FOREACH(group
, &vfio_group_list
, next
) {
2324 if (group
->groupid
== devices
[i
].group_id
) {
2333 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2334 if (!vbasedev_iter
->dev
->realized
||
2335 vbasedev_iter
->type
!= VFIO_DEVICE_TYPE_PCI
) {
2338 tmp
= container_of(vbasedev_iter
, VFIOPCIDevice
, vbasedev
);
2339 if (vfio_pci_host_match(&host
, tmp
->vbasedev
.name
)) {
2340 vfio_pci_post_reset(tmp
);
2347 vfio_pci_post_reset(vdev
);
2355 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2356 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2357 * of doing hot resets when there is only a single device per bus. The in-use
2358 * here refers to how many VFIODevices are affected. A hot reset that affects
2359 * multiple devices, but only a single in-use device, means that we can call
2360 * it from our bus ->reset() callback since the extent is effectively a single
2361 * device. This allows us to make use of it in the hotplug path. When there
2362 * are multiple in-use devices, we can only trigger the hot reset during a
2363 * system reset and thus from our reset handler. We separate _one vs _multi
2364 * here so that we don't overlap and do a double reset on the system reset
2365 * path where both our reset handler and ->reset() callback are used. Calling
2366 * _one() will only do a hot reset for the one in-use devices case, calling
2367 * _multi() will do nothing if a _one() would have been sufficient.
2369 static int vfio_pci_hot_reset_one(VFIOPCIDevice
*vdev
)
2371 return vfio_pci_hot_reset(vdev
, true);
2374 static int vfio_pci_hot_reset_multi(VFIODevice
*vbasedev
)
2376 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2377 return vfio_pci_hot_reset(vdev
, false);
2380 static void vfio_pci_compute_needs_reset(VFIODevice
*vbasedev
)
2382 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2383 if (!vbasedev
->reset_works
|| (!vdev
->has_flr
&& vdev
->has_pm_reset
)) {
2384 vbasedev
->needs_reset
= true;
2388 static VFIODeviceOps vfio_pci_ops
= {
2389 .vfio_compute_needs_reset
= vfio_pci_compute_needs_reset
,
2390 .vfio_hot_reset_multi
= vfio_pci_hot_reset_multi
,
2391 .vfio_eoi
= vfio_intx_eoi
,
2394 int vfio_populate_vga(VFIOPCIDevice
*vdev
, Error
**errp
)
2396 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2397 struct vfio_region_info
*reg_info
;
2400 ret
= vfio_get_region_info(vbasedev
, VFIO_PCI_VGA_REGION_INDEX
, ®_info
);
2402 error_setg_errno(errp
, -ret
,
2403 "failed getting region info for VGA region index %d",
2404 VFIO_PCI_VGA_REGION_INDEX
);
2408 if (!(reg_info
->flags
& VFIO_REGION_INFO_FLAG_READ
) ||
2409 !(reg_info
->flags
& VFIO_REGION_INFO_FLAG_WRITE
) ||
2410 reg_info
->size
< 0xbffff + 1) {
2411 error_setg(errp
, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2412 (unsigned long)reg_info
->flags
,
2413 (unsigned long)reg_info
->size
);
2418 vdev
->vga
= g_new0(VFIOVGA
, 1);
2420 vdev
->vga
->fd_offset
= reg_info
->offset
;
2421 vdev
->vga
->fd
= vdev
->vbasedev
.fd
;
2425 vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].offset
= QEMU_PCI_VGA_MEM_BASE
;
2426 vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].nr
= QEMU_PCI_VGA_MEM
;
2427 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].quirks
);
2429 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].mem
,
2430 OBJECT(vdev
), &vfio_vga_ops
,
2431 &vdev
->vga
->region
[QEMU_PCI_VGA_MEM
],
2432 "vfio-vga-mmio@0xa0000",
2433 QEMU_PCI_VGA_MEM_SIZE
);
2435 vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].offset
= QEMU_PCI_VGA_IO_LO_BASE
;
2436 vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].nr
= QEMU_PCI_VGA_IO_LO
;
2437 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].quirks
);
2439 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].mem
,
2440 OBJECT(vdev
), &vfio_vga_ops
,
2441 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
],
2442 "vfio-vga-io@0x3b0",
2443 QEMU_PCI_VGA_IO_LO_SIZE
);
2445 vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].offset
= QEMU_PCI_VGA_IO_HI_BASE
;
2446 vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].nr
= QEMU_PCI_VGA_IO_HI
;
2447 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].quirks
);
2449 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].mem
,
2450 OBJECT(vdev
), &vfio_vga_ops
,
2451 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
],
2452 "vfio-vga-io@0x3c0",
2453 QEMU_PCI_VGA_IO_HI_SIZE
);
2455 pci_register_vga(&vdev
->pdev
, &vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].mem
,
2456 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].mem
,
2457 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].mem
);
2462 static void vfio_populate_device(VFIOPCIDevice
*vdev
, Error
**errp
)
2464 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2465 struct vfio_region_info
*reg_info
;
2466 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
) };
2469 /* Sanity check device */
2470 if (!(vbasedev
->flags
& VFIO_DEVICE_FLAGS_PCI
)) {
2471 error_setg(errp
, "this isn't a PCI device");
2475 if (vbasedev
->num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
2476 error_setg(errp
, "unexpected number of io regions %u",
2477 vbasedev
->num_regions
);
2481 if (vbasedev
->num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
2482 error_setg(errp
, "unexpected number of irqs %u", vbasedev
->num_irqs
);
2486 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
2487 char *name
= g_strdup_printf("%s BAR %d", vbasedev
->name
, i
);
2489 ret
= vfio_region_setup(OBJECT(vdev
), vbasedev
,
2490 &vdev
->bars
[i
].region
, i
, name
);
2494 error_setg_errno(errp
, -ret
, "failed to get region %d info", i
);
2498 QLIST_INIT(&vdev
->bars
[i
].quirks
);
2501 ret
= vfio_get_region_info(vbasedev
,
2502 VFIO_PCI_CONFIG_REGION_INDEX
, ®_info
);
2504 error_setg_errno(errp
, -ret
, "failed to get config info");
2508 trace_vfio_populate_device_config(vdev
->vbasedev
.name
,
2509 (unsigned long)reg_info
->size
,
2510 (unsigned long)reg_info
->offset
,
2511 (unsigned long)reg_info
->flags
);
2513 vdev
->config_size
= reg_info
->size
;
2514 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
2515 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
2517 vdev
->config_offset
= reg_info
->offset
;
2521 if (vdev
->features
& VFIO_FEATURE_ENABLE_VGA
) {
2522 ret
= vfio_populate_vga(vdev
, errp
);
2524 error_append_hint(errp
, "device does not support "
2525 "requested feature x-vga\n");
2530 irq_info
.index
= VFIO_PCI_ERR_IRQ_INDEX
;
2532 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
);
2534 /* This can fail for an old kernel or legacy PCI dev */
2535 trace_vfio_populate_device_get_irq_info_failure(strerror(errno
));
2536 } else if (irq_info
.count
== 1) {
2537 vdev
->pci_aer
= true;
2539 warn_report(VFIO_MSG_PREFIX
2540 "Could not enable error recovery for the device",
2545 static void vfio_put_device(VFIOPCIDevice
*vdev
)
2547 g_free(vdev
->vbasedev
.name
);
2550 vfio_put_base_device(&vdev
->vbasedev
);
2553 static void vfio_err_notifier_handler(void *opaque
)
2555 VFIOPCIDevice
*vdev
= opaque
;
2557 if (!event_notifier_test_and_clear(&vdev
->err_notifier
)) {
2562 * TBD. Retrieve the error details and decide what action
2563 * needs to be taken. One of the actions could be to pass
2564 * the error to the guest and have the guest driver recover
2565 * from the error. This requires that PCIe capabilities be
2566 * exposed to the guest. For now, we just terminate the
2567 * guest to contain the error.
2570 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__
, vdev
->vbasedev
.name
);
2572 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2576 * Registers error notifier for devices supporting error recovery.
2577 * If we encounter a failure in this function, we report an error
2578 * and continue after disabling error recovery support for the
2581 static void vfio_register_err_notifier(VFIOPCIDevice
*vdev
)
2586 if (!vdev
->pci_aer
) {
2590 if (event_notifier_init(&vdev
->err_notifier
, 0)) {
2591 error_report("vfio: Unable to init event notifier for error detection");
2592 vdev
->pci_aer
= false;
2596 fd
= event_notifier_get_fd(&vdev
->err_notifier
);
2597 qemu_set_fd_handler(fd
, vfio_err_notifier_handler
, NULL
, vdev
);
2599 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_ERR_IRQ_INDEX
, 0,
2600 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
2601 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2602 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
2603 event_notifier_cleanup(&vdev
->err_notifier
);
2604 vdev
->pci_aer
= false;
2608 static void vfio_unregister_err_notifier(VFIOPCIDevice
*vdev
)
2612 if (!vdev
->pci_aer
) {
2616 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_ERR_IRQ_INDEX
, 0,
2617 VFIO_IRQ_SET_ACTION_TRIGGER
, -1, &err
)) {
2618 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2620 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->err_notifier
),
2622 event_notifier_cleanup(&vdev
->err_notifier
);
2625 static void vfio_req_notifier_handler(void *opaque
)
2627 VFIOPCIDevice
*vdev
= opaque
;
2630 if (!event_notifier_test_and_clear(&vdev
->req_notifier
)) {
2634 qdev_unplug(DEVICE(vdev
), &err
);
2636 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2640 static void vfio_register_req_notifier(VFIOPCIDevice
*vdev
)
2642 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
),
2643 .index
= VFIO_PCI_REQ_IRQ_INDEX
};
2647 if (!(vdev
->features
& VFIO_FEATURE_ENABLE_REQ
)) {
2651 if (ioctl(vdev
->vbasedev
.fd
,
2652 VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
) < 0 || irq_info
.count
< 1) {
2656 if (event_notifier_init(&vdev
->req_notifier
, 0)) {
2657 error_report("vfio: Unable to init event notifier for device request");
2661 fd
= event_notifier_get_fd(&vdev
->req_notifier
);
2662 qemu_set_fd_handler(fd
, vfio_req_notifier_handler
, NULL
, vdev
);
2664 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_REQ_IRQ_INDEX
, 0,
2665 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
2666 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2667 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
2668 event_notifier_cleanup(&vdev
->req_notifier
);
2670 vdev
->req_enabled
= true;
2674 static void vfio_unregister_req_notifier(VFIOPCIDevice
*vdev
)
2678 if (!vdev
->req_enabled
) {
2682 if (vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_REQ_IRQ_INDEX
, 0,
2683 VFIO_IRQ_SET_ACTION_TRIGGER
, -1, &err
)) {
2684 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2686 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->req_notifier
),
2688 event_notifier_cleanup(&vdev
->req_notifier
);
2690 vdev
->req_enabled
= false;
2693 static void vfio_realize(PCIDevice
*pdev
, Error
**errp
)
2695 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
2696 VFIODevice
*vbasedev_iter
;
2698 char *tmp
, *subsys
, group_path
[PATH_MAX
], *group_name
;
2706 if (!vdev
->vbasedev
.sysfsdev
) {
2707 if (!(~vdev
->host
.domain
|| ~vdev
->host
.bus
||
2708 ~vdev
->host
.slot
|| ~vdev
->host
.function
)) {
2709 error_setg(errp
, "No provided host device");
2710 error_append_hint(errp
, "Use -device vfio-pci,host=DDDD:BB:DD.F "
2711 "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
2714 vdev
->vbasedev
.sysfsdev
=
2715 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2716 vdev
->host
.domain
, vdev
->host
.bus
,
2717 vdev
->host
.slot
, vdev
->host
.function
);
2720 if (stat(vdev
->vbasedev
.sysfsdev
, &st
) < 0) {
2721 error_setg_errno(errp
, errno
, "no such host device");
2722 error_prepend(errp
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.sysfsdev
);
2726 vdev
->vbasedev
.name
= g_path_get_basename(vdev
->vbasedev
.sysfsdev
);
2727 vdev
->vbasedev
.ops
= &vfio_pci_ops
;
2728 vdev
->vbasedev
.type
= VFIO_DEVICE_TYPE_PCI
;
2729 vdev
->vbasedev
.dev
= DEVICE(vdev
);
2731 tmp
= g_strdup_printf("%s/iommu_group", vdev
->vbasedev
.sysfsdev
);
2732 len
= readlink(tmp
, group_path
, sizeof(group_path
));
2735 if (len
<= 0 || len
>= sizeof(group_path
)) {
2736 error_setg_errno(errp
, len
< 0 ? errno
: ENAMETOOLONG
,
2737 "no iommu_group found");
2741 group_path
[len
] = 0;
2743 group_name
= basename(group_path
);
2744 if (sscanf(group_name
, "%d", &groupid
) != 1) {
2745 error_setg_errno(errp
, errno
, "failed to read %s", group_path
);
2749 trace_vfio_realize(vdev
->vbasedev
.name
, groupid
);
2751 group
= vfio_get_group(groupid
, pci_device_iommu_address_space(pdev
), errp
);
2756 QLIST_FOREACH(vbasedev_iter
, &group
->device_list
, next
) {
2757 if (strcmp(vbasedev_iter
->name
, vdev
->vbasedev
.name
) == 0) {
2758 error_setg(errp
, "device is already attached");
2759 vfio_put_group(group
);
2765 * Mediated devices *might* operate compatibly with memory ballooning, but
2766 * we cannot know for certain, it depends on whether the mdev vendor driver
2767 * stays in sync with the active working set of the guest driver. Prevent
2768 * the x-balloon-allowed option unless this is minimally an mdev device.
2770 tmp
= g_strdup_printf("%s/subsystem", vdev
->vbasedev
.sysfsdev
);
2771 subsys
= realpath(tmp
, NULL
);
2773 is_mdev
= subsys
&& (strcmp(subsys
, "/sys/bus/mdev") == 0);
2776 trace_vfio_mdev(vdev
->vbasedev
.name
, is_mdev
);
2778 if (vdev
->vbasedev
.balloon_allowed
&& !is_mdev
) {
2779 error_setg(errp
, "x-balloon-allowed only potentially compatible "
2780 "with mdev devices");
2781 vfio_put_group(group
);
2785 ret
= vfio_get_device(group
, vdev
->vbasedev
.name
, &vdev
->vbasedev
, errp
);
2787 vfio_put_group(group
);
2791 vfio_populate_device(vdev
, &err
);
2793 error_propagate(errp
, err
);
2797 /* Get a copy of config space */
2798 ret
= pread(vdev
->vbasedev
.fd
, vdev
->pdev
.config
,
2799 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
2800 vdev
->config_offset
);
2801 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
2802 ret
= ret
< 0 ? -errno
: -EFAULT
;
2803 error_setg_errno(errp
, -ret
, "failed to read device config space");
2807 /* vfio emulates a lot for us, but some bits need extra love */
2808 vdev
->emulated_config_bits
= g_malloc0(vdev
->config_size
);
2810 /* QEMU can choose to expose the ROM or not */
2811 memset(vdev
->emulated_config_bits
+ PCI_ROM_ADDRESS
, 0xff, 4);
2812 /* QEMU can also add or extend BARs */
2813 memset(vdev
->emulated_config_bits
+ PCI_BASE_ADDRESS_0
, 0xff, 6 * 4);
2816 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
2817 * device ID is managed by the vendor and need only be a 16-bit value.
2818 * Allow any 16-bit value for subsystem so they can be hidden or changed.
2820 if (vdev
->vendor_id
!= PCI_ANY_ID
) {
2821 if (vdev
->vendor_id
>= 0xffff) {
2822 error_setg(errp
, "invalid PCI vendor ID provided");
2825 vfio_add_emulated_word(vdev
, PCI_VENDOR_ID
, vdev
->vendor_id
, ~0);
2826 trace_vfio_pci_emulated_vendor_id(vdev
->vbasedev
.name
, vdev
->vendor_id
);
2828 vdev
->vendor_id
= pci_get_word(pdev
->config
+ PCI_VENDOR_ID
);
2831 if (vdev
->device_id
!= PCI_ANY_ID
) {
2832 if (vdev
->device_id
> 0xffff) {
2833 error_setg(errp
, "invalid PCI device ID provided");
2836 vfio_add_emulated_word(vdev
, PCI_DEVICE_ID
, vdev
->device_id
, ~0);
2837 trace_vfio_pci_emulated_device_id(vdev
->vbasedev
.name
, vdev
->device_id
);
2839 vdev
->device_id
= pci_get_word(pdev
->config
+ PCI_DEVICE_ID
);
2842 if (vdev
->sub_vendor_id
!= PCI_ANY_ID
) {
2843 if (vdev
->sub_vendor_id
> 0xffff) {
2844 error_setg(errp
, "invalid PCI subsystem vendor ID provided");
2847 vfio_add_emulated_word(vdev
, PCI_SUBSYSTEM_VENDOR_ID
,
2848 vdev
->sub_vendor_id
, ~0);
2849 trace_vfio_pci_emulated_sub_vendor_id(vdev
->vbasedev
.name
,
2850 vdev
->sub_vendor_id
);
2853 if (vdev
->sub_device_id
!= PCI_ANY_ID
) {
2854 if (vdev
->sub_device_id
> 0xffff) {
2855 error_setg(errp
, "invalid PCI subsystem device ID provided");
2858 vfio_add_emulated_word(vdev
, PCI_SUBSYSTEM_ID
, vdev
->sub_device_id
, ~0);
2859 trace_vfio_pci_emulated_sub_device_id(vdev
->vbasedev
.name
,
2860 vdev
->sub_device_id
);
2863 /* QEMU can change multi-function devices to single function, or reverse */
2864 vdev
->emulated_config_bits
[PCI_HEADER_TYPE
] =
2865 PCI_HEADER_TYPE_MULTI_FUNCTION
;
2867 /* Restore or clear multifunction, this is always controlled by QEMU */
2868 if (vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
2869 vdev
->pdev
.config
[PCI_HEADER_TYPE
] |= PCI_HEADER_TYPE_MULTI_FUNCTION
;
2871 vdev
->pdev
.config
[PCI_HEADER_TYPE
] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION
;
2875 * Clear host resource mapping info. If we choose not to register a
2876 * BAR, such as might be the case with the option ROM, we can get
2877 * confusing, unwritable, residual addresses from the host here.
2879 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
2880 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
2882 vfio_pci_size_rom(vdev
);
2884 vfio_bars_prepare(vdev
);
2886 vfio_msix_early_setup(vdev
, &err
);
2888 error_propagate(errp
, err
);
2892 vfio_bars_register(vdev
);
2894 ret
= vfio_add_capabilities(vdev
, errp
);
2900 vfio_vga_quirk_setup(vdev
);
2903 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
2904 vfio_bar_quirk_setup(vdev
, i
);
2907 if (!vdev
->igd_opregion
&&
2908 vdev
->features
& VFIO_FEATURE_ENABLE_IGD_OPREGION
) {
2909 struct vfio_region_info
*opregion
;
2911 if (vdev
->pdev
.qdev
.hotplugged
) {
2913 "cannot support IGD OpRegion feature on hotplugged "
2918 ret
= vfio_get_dev_region_info(&vdev
->vbasedev
,
2919 VFIO_REGION_TYPE_PCI_VENDOR_TYPE
| PCI_VENDOR_ID_INTEL
,
2920 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION
, &opregion
);
2922 error_setg_errno(errp
, -ret
,
2923 "does not support requested IGD OpRegion feature");
2927 ret
= vfio_pci_igd_opregion_init(vdev
, opregion
, errp
);
2934 /* QEMU emulates all of MSI & MSIX */
2935 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
) {
2936 memset(vdev
->emulated_config_bits
+ pdev
->msix_cap
, 0xff,
2940 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
) {
2941 memset(vdev
->emulated_config_bits
+ pdev
->msi_cap
, 0xff,
2942 vdev
->msi_cap_size
);
2945 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
2946 vdev
->intx
.mmap_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
2947 vfio_intx_mmap_enable
, vdev
);
2948 pci_device_set_intx_routing_notifier(&vdev
->pdev
, vfio_intx_update
);
2949 ret
= vfio_intx_enable(vdev
, errp
);
2955 if (vdev
->display
!= ON_OFF_AUTO_OFF
) {
2956 ret
= vfio_display_probe(vdev
, errp
);
2961 if (vdev
->enable_ramfb
&& vdev
->dpy
== NULL
) {
2962 error_setg(errp
, "ramfb=on requires display=on");
2965 if (vdev
->display_xres
|| vdev
->display_yres
) {
2966 if (vdev
->dpy
== NULL
) {
2967 error_setg(errp
, "xres and yres properties require display=on");
2970 if (vdev
->dpy
->edid_regs
== NULL
) {
2971 error_setg(errp
, "xres and yres properties need edid support");
2976 if (vdev
->vendor_id
== PCI_VENDOR_ID_NVIDIA
) {
2977 ret
= vfio_pci_nvidia_v100_ram_init(vdev
, errp
);
2978 if (ret
&& ret
!= -ENODEV
) {
2979 error_report("Failed to setup NVIDIA V100 GPU RAM");
2983 if (vdev
->vendor_id
== PCI_VENDOR_ID_IBM
) {
2984 ret
= vfio_pci_nvlink2_init(vdev
, errp
);
2985 if (ret
&& ret
!= -ENODEV
) {
2986 error_report("Failed to setup NVlink2 bridge");
2990 vfio_register_err_notifier(vdev
);
2991 vfio_register_req_notifier(vdev
);
2992 vfio_setup_resetfn_quirk(vdev
);
2997 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
2998 vfio_teardown_msi(vdev
);
2999 vfio_bars_exit(vdev
);
3001 error_prepend(errp
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
3004 static void vfio_instance_finalize(Object
*obj
)
3006 VFIOPCIDevice
*vdev
= PCI_VFIO(obj
);
3007 VFIOGroup
*group
= vdev
->vbasedev
.group
;
3009 vfio_display_finalize(vdev
);
3010 vfio_bars_finalize(vdev
);
3011 g_free(vdev
->emulated_config_bits
);
3014 * XXX Leaking igd_opregion is not an oversight, we can't remove the
3015 * fw_cfg entry therefore leaking this allocation seems like the safest
3018 * g_free(vdev->igd_opregion);
3020 vfio_put_device(vdev
);
3021 vfio_put_group(group
);
3024 static void vfio_exitfn(PCIDevice
*pdev
)
3026 VFIOPCIDevice
*vdev
= PCI_VFIO(pdev
);
3028 vfio_unregister_req_notifier(vdev
);
3029 vfio_unregister_err_notifier(vdev
);
3030 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3031 vfio_disable_interrupts(vdev
);
3032 if (vdev
->intx
.mmap_timer
) {
3033 timer_free(vdev
->intx
.mmap_timer
);
3035 vfio_teardown_msi(vdev
);
3036 vfio_bars_exit(vdev
);
3039 static void vfio_pci_reset(DeviceState
*dev
)
3041 VFIOPCIDevice
*vdev
= PCI_VFIO(dev
);
3043 trace_vfio_pci_reset(vdev
->vbasedev
.name
);
3045 vfio_pci_pre_reset(vdev
);
3047 if (vdev
->display
!= ON_OFF_AUTO_OFF
) {
3048 vfio_display_reset(vdev
);
3051 if (vdev
->resetfn
&& !vdev
->resetfn(vdev
)) {
3055 if (vdev
->vbasedev
.reset_works
&&
3056 (vdev
->has_flr
|| !vdev
->has_pm_reset
) &&
3057 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3058 trace_vfio_pci_reset_flr(vdev
->vbasedev
.name
);
3062 /* See if we can do our own bus reset */
3063 if (!vfio_pci_hot_reset_one(vdev
)) {
3067 /* If nothing else works and the device supports PM reset, use it */
3068 if (vdev
->vbasedev
.reset_works
&& vdev
->has_pm_reset
&&
3069 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3070 trace_vfio_pci_reset_pm(vdev
->vbasedev
.name
);
3075 vfio_pci_post_reset(vdev
);
3078 static void vfio_instance_init(Object
*obj
)
3080 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
3081 VFIOPCIDevice
*vdev
= PCI_VFIO(obj
);
3083 device_add_bootindex_property(obj
, &vdev
->bootindex
,
3085 &pci_dev
->qdev
, NULL
);
3086 vdev
->host
.domain
= ~0U;
3087 vdev
->host
.bus
= ~0U;
3088 vdev
->host
.slot
= ~0U;
3089 vdev
->host
.function
= ~0U;
3091 vdev
->nv_gpudirect_clique
= 0xFF;
3093 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
3094 * line, therefore, no need to wait to realize like other devices */
3095 pci_dev
->cap_present
|= QEMU_PCI_CAP_EXPRESS
;
3098 static Property vfio_pci_dev_properties
[] = {
3099 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice
, host
),
3100 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice
, vbasedev
.sysfsdev
),
3101 DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice
,
3102 display
, ON_OFF_AUTO_OFF
),
3103 DEFINE_PROP_UINT32("xres", VFIOPCIDevice
, display_xres
, 0),
3104 DEFINE_PROP_UINT32("yres", VFIOPCIDevice
, display_yres
, 0),
3105 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice
,
3106 intx
.mmap_timeout
, 1100),
3107 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice
, features
,
3108 VFIO_FEATURE_ENABLE_VGA_BIT
, false),
3109 DEFINE_PROP_BIT("x-req", VFIOPCIDevice
, features
,
3110 VFIO_FEATURE_ENABLE_REQ_BIT
, true),
3111 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice
, features
,
3112 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT
, false),
3113 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice
, vbasedev
.no_mmap
, false),
3114 DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice
,
3115 vbasedev
.balloon_allowed
, false),
3116 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice
, no_kvm_intx
, false),
3117 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice
, no_kvm_msi
, false),
3118 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice
, no_kvm_msix
, false),
3119 DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice
,
3120 no_geforce_quirks
, false),
3121 DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice
, no_kvm_ioeventfd
,
3123 DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice
, no_vfio_ioeventfd
,
3125 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice
, vendor_id
, PCI_ANY_ID
),
3126 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice
, device_id
, PCI_ANY_ID
),
3127 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice
,
3128 sub_vendor_id
, PCI_ANY_ID
),
3129 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice
,
3130 sub_device_id
, PCI_ANY_ID
),
3131 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice
, igd_gms
, 0),
3132 DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice
,
3133 nv_gpudirect_clique
,
3134 qdev_prop_nv_gpudirect_clique
, uint8_t),
3135 DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice
, msix_relo
,
3136 OFF_AUTOPCIBAR_OFF
),
3138 * TODO - support passed fds... is this necessary?
3139 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
3140 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
3142 DEFINE_PROP_END_OF_LIST(),
3145 static const VMStateDescription vfio_pci_vmstate
= {
3150 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
3152 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3153 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
3155 dc
->reset
= vfio_pci_reset
;
3156 dc
->props
= vfio_pci_dev_properties
;
3157 dc
->vmsd
= &vfio_pci_vmstate
;
3158 dc
->desc
= "VFIO-based PCI device assignment";
3159 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
3160 pdc
->realize
= vfio_realize
;
3161 pdc
->exit
= vfio_exitfn
;
3162 pdc
->config_read
= vfio_pci_read_config
;
3163 pdc
->config_write
= vfio_pci_write_config
;
3166 static const TypeInfo vfio_pci_dev_info
= {
3167 .name
= TYPE_VFIO_PCI
,
3168 .parent
= TYPE_PCI_DEVICE
,
3169 .instance_size
= sizeof(VFIOPCIDevice
),
3170 .class_init
= vfio_pci_dev_class_init
,
3171 .instance_init
= vfio_instance_init
,
3172 .instance_finalize
= vfio_instance_finalize
,
3173 .interfaces
= (InterfaceInfo
[]) {
3174 { INTERFACE_PCIE_DEVICE
},
3175 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
3180 static Property vfio_pci_dev_nohotplug_properties
[] = {
3181 DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice
, enable_ramfb
, false),
3182 DEFINE_PROP_END_OF_LIST(),
3185 static void vfio_pci_nohotplug_dev_class_init(ObjectClass
*klass
, void *data
)
3187 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3189 dc
->props
= vfio_pci_dev_nohotplug_properties
;
3190 dc
->hotpluggable
= false;
3193 static const TypeInfo vfio_pci_nohotplug_dev_info
= {
3194 .name
= TYPE_VIFO_PCI_NOHOTPLUG
,
3195 .parent
= TYPE_VFIO_PCI
,
3196 .instance_size
= sizeof(VFIOPCIDevice
),
3197 .class_init
= vfio_pci_nohotplug_dev_class_init
,
3200 static void register_vfio_pci_dev_type(void)
3202 type_register_static(&vfio_pci_dev_info
);
3203 type_register_static(&vfio_pci_nohotplug_dev_info
);
3206 type_init(register_vfio_pci_dev_type
)