]> git.proxmox.com Git - mirror_qemu.git/blame - hw/vfio/pci.c
error: Avoid unnecessary error_propagate() after error_setg()
[mirror_qemu.git] / hw / vfio / pci.c
CommitLineData
65501a74
AW
1/*
2 * vfio based device assignment support
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
c6eacb1a 21#include "qemu/osdep.h"
6dcfdbad 22#include <linux/vfio.h>
65501a74 23#include <sys/ioctl.h>
65501a74 24
650d103d 25#include "hw/hw.h"
83c9f4ca
PB
26#include "hw/pci/msi.h"
27#include "hw/pci/msix.h"
0282abf0 28#include "hw/pci/pci_bridge.h"
a27bd6c7 29#include "hw/qdev-properties.h"
d6454270 30#include "migration/vmstate.h"
1de7afc9 31#include "qemu/error-report.h"
db725815 32#include "qemu/main-loop.h"
0b8fa32f 33#include "qemu/module.h"
922a01a0 34#include "qemu/option.h"
1de7afc9 35#include "qemu/range.h"
e0255bb1 36#include "qemu/units.h"
6dcfdbad 37#include "sysemu/kvm.h"
54d31236 38#include "sysemu/runstate.h"
6dcfdbad 39#include "sysemu/sysemu.h"
78f33d2b 40#include "pci.h"
385f57cf 41#include "trace.h"
1108b2f8 42#include "qapi/error.h"
f045a010 43#include "migration/blocker.h"
4b943029 44
2683ccd5
LQ
45#define TYPE_VFIO_PCI "vfio-pci"
46#define PCI_VFIO(obj) OBJECT_CHECK(VFIOPCIDevice, obj, TYPE_VFIO_PCI)
47
f75ca627 48#define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
0c0c8f8a 49
9ee27d73 50static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
9ee27d73 51static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
65501a74 52
ea486926
AW
53/*
54 * Disabling BAR mmaping can be slow, but toggling it around INTx can
55 * also be a huge overhead. We try to get the best of both worlds by
56 * waiting until an interrupt to disable mmaps (subsequent transitions
57 * to the same state are effectively no overhead). If the interrupt has
58 * been serviced and the time gap is long enough, we re-enable mmaps for
59 * performance. This works well for things like graphics cards, which
60 * may not use their interrupt at all and are penalized to an unusable
61 * level by read/write BAR traps. Other devices, like NICs, have more
62 * regular interrupts and see much better latency by staying in non-mmap
63 * mode. We therefore set the default mmap_timeout such that a ping
64 * is just enough to keep the mmap disabled. Users can experiment with
65 * other options with the x-intx-mmap-timeout-ms parameter (a value of
66 * zero disables the timer).
67 */
68static void vfio_intx_mmap_enable(void *opaque)
69{
9ee27d73 70 VFIOPCIDevice *vdev = opaque;
ea486926
AW
71
72 if (vdev->intx.pending) {
bc72ad67
AB
73 timer_mod(vdev->intx.mmap_timer,
74 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
ea486926
AW
75 return;
76 }
77
78 vfio_mmap_set_enabled(vdev, true);
79}
80
65501a74
AW
81static void vfio_intx_interrupt(void *opaque)
82{
9ee27d73 83 VFIOPCIDevice *vdev = opaque;
65501a74
AW
84
85 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
86 return;
87 }
88
df92ee44 89 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
65501a74
AW
90
91 vdev->intx.pending = true;
68919cac 92 pci_irq_assert(&vdev->pdev);
ea486926
AW
93 vfio_mmap_set_enabled(vdev, false);
94 if (vdev->intx.mmap_timeout) {
bc72ad67
AB
95 timer_mod(vdev->intx.mmap_timer,
96 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
ea486926 97 }
65501a74
AW
98}
99
870cb6f1 100static void vfio_intx_eoi(VFIODevice *vbasedev)
65501a74 101{
a664477d
EA
102 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
103
65501a74
AW
104 if (!vdev->intx.pending) {
105 return;
106 }
107
870cb6f1 108 trace_vfio_intx_eoi(vbasedev->name);
65501a74
AW
109
110 vdev->intx.pending = false;
68919cac 111 pci_irq_deassert(&vdev->pdev);
a664477d 112 vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
65501a74
AW
113}
114
7dfb3424 115static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
e1d1e586
AW
116{
117#ifdef CONFIG_KVM
97a37576 118 int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt);
201a7331 119 Error *err = NULL;
e1d1e586 120
46746dba 121 if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
e1d1e586 122 vdev->intx.route.mode != PCI_INTX_ENABLED ||
9fc0e2d8 123 !kvm_resamplefds_enabled()) {
e1d1e586
AW
124 return;
125 }
126
127 /* Get to a known interrupt state */
97a37576 128 qemu_set_fd_handler(irq_fd, NULL, NULL, vdev);
5546a621 129 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586 130 vdev->intx.pending = false;
68919cac 131 pci_irq_deassert(&vdev->pdev);
e1d1e586
AW
132
133 /* Get an eventfd for resample/unmask */
134 if (event_notifier_init(&vdev->intx.unmask, 0)) {
7dfb3424 135 error_setg(errp, "event_notifier_init failed eoi");
e1d1e586
AW
136 goto fail;
137 }
138
97a37576
PX
139 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
140 &vdev->intx.interrupt,
141 &vdev->intx.unmask,
142 vdev->intx.route.irq)) {
7dfb3424 143 error_setg_errno(errp, errno, "failed to setup resample irqfd");
e1d1e586
AW
144 goto fail_irqfd;
145 }
146
201a7331
EA
147 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
148 VFIO_IRQ_SET_ACTION_UNMASK,
97a37576
PX
149 event_notifier_get_fd(&vdev->intx.unmask),
150 &err)) {
201a7331 151 error_propagate(errp, err);
e1d1e586
AW
152 goto fail_vfio;
153 }
154
155 /* Let'em rip */
5546a621 156 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586
AW
157
158 vdev->intx.kvm_accel = true;
159
870cb6f1 160 trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
e1d1e586
AW
161
162 return;
163
164fail_vfio:
97a37576
PX
165 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
166 vdev->intx.route.irq);
e1d1e586
AW
167fail_irqfd:
168 event_notifier_cleanup(&vdev->intx.unmask);
169fail:
97a37576 170 qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev);
5546a621 171 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586
AW
172#endif
173}
174
870cb6f1 175static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
e1d1e586
AW
176{
177#ifdef CONFIG_KVM
e1d1e586
AW
178 if (!vdev->intx.kvm_accel) {
179 return;
180 }
181
182 /*
183 * Get to a known state, hardware masked, QEMU ready to accept new
184 * interrupts, QEMU IRQ de-asserted.
185 */
5546a621 186 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586 187 vdev->intx.pending = false;
68919cac 188 pci_irq_deassert(&vdev->pdev);
e1d1e586
AW
189
190 /* Tell KVM to stop listening for an INTx irqfd */
97a37576
PX
191 if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
192 vdev->intx.route.irq)) {
312fd5f2 193 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
e1d1e586
AW
194 }
195
196 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
197 event_notifier_cleanup(&vdev->intx.unmask);
198
199 /* QEMU starts listening for interrupt events. */
97a37576
PX
200 qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt),
201 vfio_intx_interrupt, NULL, vdev);
e1d1e586
AW
202
203 vdev->intx.kvm_accel = false;
204
205 /* If we've missed an event, let it re-fire through QEMU */
5546a621 206 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586 207
870cb6f1 208 trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
e1d1e586
AW
209#endif
210}
211
ad54dbd8 212static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route)
e1d1e586 213{
7dfb3424 214 Error *err = NULL;
e1d1e586 215
870cb6f1 216 trace_vfio_intx_update(vdev->vbasedev.name,
ad54dbd8 217 vdev->intx.route.irq, route->irq);
e1d1e586 218
870cb6f1 219 vfio_intx_disable_kvm(vdev);
e1d1e586 220
ad54dbd8 221 vdev->intx.route = *route;
e1d1e586 222
ad54dbd8 223 if (route->mode != PCI_INTX_ENABLED) {
e1d1e586
AW
224 return;
225 }
226
7dfb3424
EA
227 vfio_intx_enable_kvm(vdev, &err);
228 if (err) {
e1eb292a 229 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
7dfb3424 230 }
e1d1e586
AW
231
232 /* Re-enable the interrupt in cased we missed an EOI */
870cb6f1 233 vfio_intx_eoi(&vdev->vbasedev);
e1d1e586
AW
234}
235
ad54dbd8
DG
236static void vfio_intx_routing_notifier(PCIDevice *pdev)
237{
238 VFIOPCIDevice *vdev = PCI_VFIO(pdev);
239 PCIINTxRoute route;
240
241 if (vdev->interrupt != VFIO_INT_INTx) {
242 return;
243 }
244
245 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
246
247 if (pci_intx_route_changed(&vdev->intx.route, &route)) {
248 vfio_intx_update(vdev, &route);
249 }
250}
251
c5478fea
DG
252static void vfio_irqchip_change(Notifier *notify, void *data)
253{
254 VFIOPCIDevice *vdev = container_of(notify, VFIOPCIDevice,
255 irqchip_change_notifier);
256
257 vfio_intx_update(vdev, &vdev->intx.route);
258}
259
7dfb3424 260static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
65501a74 261{
65501a74 262 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
7dfb3424 263 Error *err = NULL;
201a7331
EA
264 int32_t fd;
265 int ret;
266
65501a74 267
ea486926 268 if (!pin) {
65501a74
AW
269 return 0;
270 }
271
272 vfio_disable_interrupts(vdev);
273
274 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
68919cac 275 pci_config_set_interrupt_pin(vdev->pdev.config, pin);
e1d1e586
AW
276
277#ifdef CONFIG_KVM
278 /*
279 * Only conditional to avoid generating error messages on platforms
280 * where we won't actually use the result anyway.
281 */
9fc0e2d8 282 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
e1d1e586
AW
283 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
284 vdev->intx.pin);
285 }
286#endif
287
65501a74
AW
288 ret = event_notifier_init(&vdev->intx.interrupt, 0);
289 if (ret) {
7dfb3424 290 error_setg_errno(errp, -ret, "event_notifier_init failed");
65501a74
AW
291 return ret;
292 }
201a7331
EA
293 fd = event_notifier_get_fd(&vdev->intx.interrupt);
294 qemu_set_fd_handler(fd, vfio_intx_interrupt, NULL, vdev);
65501a74 295
201a7331
EA
296 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
297 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
298 error_propagate(errp, err);
299 qemu_set_fd_handler(fd, NULL, NULL, vdev);
ce59af2d 300 event_notifier_cleanup(&vdev->intx.interrupt);
201a7331 301 return -errno;
65501a74
AW
302 }
303
7dfb3424
EA
304 vfio_intx_enable_kvm(vdev, &err);
305 if (err) {
e1eb292a 306 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
7dfb3424 307 }
e1d1e586 308
65501a74
AW
309 vdev->interrupt = VFIO_INT_INTx;
310
870cb6f1 311 trace_vfio_intx_enable(vdev->vbasedev.name);
201a7331 312 return 0;
65501a74
AW
313}
314
870cb6f1 315static void vfio_intx_disable(VFIOPCIDevice *vdev)
65501a74
AW
316{
317 int fd;
318
bc72ad67 319 timer_del(vdev->intx.mmap_timer);
870cb6f1 320 vfio_intx_disable_kvm(vdev);
5546a621 321 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
65501a74 322 vdev->intx.pending = false;
68919cac 323 pci_irq_deassert(&vdev->pdev);
65501a74
AW
324 vfio_mmap_set_enabled(vdev, true);
325
326 fd = event_notifier_get_fd(&vdev->intx.interrupt);
327 qemu_set_fd_handler(fd, NULL, NULL, vdev);
328 event_notifier_cleanup(&vdev->intx.interrupt);
329
330 vdev->interrupt = VFIO_INT_NONE;
331
870cb6f1 332 trace_vfio_intx_disable(vdev->vbasedev.name);
65501a74
AW
333}
334
335/*
336 * MSI/X
337 */
338static void vfio_msi_interrupt(void *opaque)
339{
340 VFIOMSIVector *vector = opaque;
9ee27d73 341 VFIOPCIDevice *vdev = vector->vdev;
0de70dc7
AW
342 MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
343 void (*notify)(PCIDevice *dev, unsigned vector);
344 MSIMessage msg;
65501a74
AW
345 int nr = vector - vdev->msi_vectors;
346
347 if (!event_notifier_test_and_clear(&vector->interrupt)) {
348 return;
349 }
350
b3ebc10c 351 if (vdev->interrupt == VFIO_INT_MSIX) {
0de70dc7
AW
352 get_msg = msix_get_message;
353 notify = msix_notify;
95239e16
AW
354
355 /* A masked vector firing needs to use the PBA, enable it */
356 if (msix_is_masked(&vdev->pdev, nr)) {
357 set_bit(nr, vdev->msix->pending);
358 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
359 trace_vfio_msix_pba_enable(vdev->vbasedev.name);
360 }
9035f8c0 361 } else if (vdev->interrupt == VFIO_INT_MSI) {
0de70dc7
AW
362 get_msg = msi_get_message;
363 notify = msi_notify;
b3ebc10c
AW
364 } else {
365 abort();
366 }
367
0de70dc7 368 msg = get_msg(&vdev->pdev, nr);
bc5baffa 369 trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
0de70dc7 370 notify(&vdev->pdev, nr);
65501a74
AW
371}
372
9ee27d73 373static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
65501a74
AW
374{
375 struct vfio_irq_set *irq_set;
376 int ret = 0, i, argsz;
377 int32_t *fds;
378
379 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
380
381 irq_set = g_malloc0(argsz);
382 irq_set->argsz = argsz;
383 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
384 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
385 irq_set->start = 0;
386 irq_set->count = vdev->nr_vectors;
387 fds = (int32_t *)&irq_set->data;
388
389 for (i = 0; i < vdev->nr_vectors; i++) {
c048be5c
AW
390 int fd = -1;
391
392 /*
393 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
394 * bits, therefore we always use the KVM signaling path when setup.
395 * MSI-X mask and pending bits are emulated, so we want to use the
396 * KVM signaling path only when configured and unmasked.
397 */
398 if (vdev->msi_vectors[i].use) {
399 if (vdev->msi_vectors[i].virq < 0 ||
400 (msix && msix_is_masked(&vdev->pdev, i))) {
401 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
402 } else {
403 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
404 }
65501a74 405 }
c048be5c
AW
406
407 fds[i] = fd;
65501a74
AW
408 }
409
5546a621 410 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
65501a74
AW
411
412 g_free(irq_set);
413
65501a74
AW
414 return ret;
415}
416
46746dba 417static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
d1f6af6a 418 int vector_n, bool msix)
f4d45d47
AW
419{
420 int virq;
421
d1f6af6a 422 if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
f4d45d47
AW
423 return;
424 }
425
426 if (event_notifier_init(&vector->kvm_interrupt, 0)) {
427 return;
428 }
429
d1f6af6a 430 virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev);
f4d45d47
AW
431 if (virq < 0) {
432 event_notifier_cleanup(&vector->kvm_interrupt);
433 return;
434 }
435
1c9b71a7 436 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
f4d45d47
AW
437 NULL, virq) < 0) {
438 kvm_irqchip_release_virq(kvm_state, virq);
439 event_notifier_cleanup(&vector->kvm_interrupt);
440 return;
441 }
442
f4d45d47
AW
443 vector->virq = virq;
444}
445
446static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
447{
1c9b71a7
EA
448 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
449 vector->virq);
f4d45d47
AW
450 kvm_irqchip_release_virq(kvm_state, vector->virq);
451 vector->virq = -1;
452 event_notifier_cleanup(&vector->kvm_interrupt);
453}
454
dc9f06ca
PF
455static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
456 PCIDevice *pdev)
f4d45d47 457{
dc9f06ca 458 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
3f1fea0f 459 kvm_irqchip_commit_routes(kvm_state);
f4d45d47
AW
460}
461
b0223e29
AW
462static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
463 MSIMessage *msg, IOHandler *handler)
65501a74 464{
2683ccd5 465 VFIOPCIDevice *vdev = PCI_VFIO(pdev);
65501a74
AW
466 VFIOMSIVector *vector;
467 int ret;
468
df92ee44 469 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
65501a74 470
65501a74 471 vector = &vdev->msi_vectors[nr];
65501a74 472
f4d45d47
AW
473 if (!vector->use) {
474 vector->vdev = vdev;
475 vector->virq = -1;
476 if (event_notifier_init(&vector->interrupt, 0)) {
477 error_report("vfio: Error: event_notifier_init failed");
478 }
479 vector->use = true;
480 msix_vector_use(pdev, nr);
65501a74
AW
481 }
482
f4d45d47
AW
483 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
484 handler, NULL, vector);
485
65501a74
AW
486 /*
487 * Attempt to enable route through KVM irqchip,
488 * default to userspace handling if unavailable.
489 */
f4d45d47
AW
490 if (vector->virq >= 0) {
491 if (!msg) {
492 vfio_remove_kvm_msi_virq(vector);
493 } else {
dc9f06ca 494 vfio_update_kvm_msi_virq(vector, *msg, pdev);
65501a74 495 }
f4d45d47 496 } else {
6d17a018
DG
497 if (msg) {
498 vfio_add_kvm_msi_virq(vdev, vector, nr, true);
499 }
65501a74
AW
500 }
501
502 /*
503 * We don't want to have the host allocate all possible MSI vectors
504 * for a device if they're not in use, so we shutdown and incrementally
505 * increase them as needed.
506 */
507 if (vdev->nr_vectors < nr + 1) {
5546a621 508 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
65501a74
AW
509 vdev->nr_vectors = nr + 1;
510 ret = vfio_enable_vectors(vdev, true);
511 if (ret) {
312fd5f2 512 error_report("vfio: failed to enable vectors, %d", ret);
65501a74 513 }
65501a74 514 } else {
201a7331
EA
515 Error *err = NULL;
516 int32_t fd;
1a403133 517
f4d45d47 518 if (vector->virq >= 0) {
201a7331 519 fd = event_notifier_get_fd(&vector->kvm_interrupt);
f4d45d47 520 } else {
201a7331 521 fd = event_notifier_get_fd(&vector->interrupt);
f4d45d47 522 }
1a403133 523
201a7331
EA
524 if (vfio_set_irq_signaling(&vdev->vbasedev,
525 VFIO_PCI_MSIX_IRQ_INDEX, nr,
526 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
527 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
65501a74 528 }
65501a74
AW
529 }
530
95239e16
AW
531 /* Disable PBA emulation when nothing more is pending. */
532 clear_bit(nr, vdev->msix->pending);
533 if (find_first_bit(vdev->msix->pending,
534 vdev->nr_vectors) == vdev->nr_vectors) {
535 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
536 trace_vfio_msix_pba_disable(vdev->vbasedev.name);
537 }
538
65501a74
AW
539 return 0;
540}
541
b0223e29
AW
542static int vfio_msix_vector_use(PCIDevice *pdev,
543 unsigned int nr, MSIMessage msg)
544{
545 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
546}
547
65501a74
AW
548static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
549{
2683ccd5 550 VFIOPCIDevice *vdev = PCI_VFIO(pdev);
65501a74 551 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
65501a74 552
df92ee44 553 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
65501a74
AW
554
555 /*
f4d45d47
AW
556 * There are still old guests that mask and unmask vectors on every
557 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
558 * the KVM setup in place, simply switch VFIO to use the non-bypass
559 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
560 * core will mask the interrupt and set pending bits, allowing it to
561 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
65501a74 562 */
f4d45d47 563 if (vector->virq >= 0) {
201a7331 564 int32_t fd = event_notifier_get_fd(&vector->interrupt);
5053bd78 565 Error *err = NULL;
1a403133 566
5053bd78
EA
567 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr,
568 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
569 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
570 }
65501a74 571 }
65501a74
AW
572}
573
0de70dc7 574static void vfio_msix_enable(VFIOPCIDevice *vdev)
fd704adc
AW
575{
576 vfio_disable_interrupts(vdev);
577
bdd81add 578 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
fd704adc
AW
579
580 vdev->interrupt = VFIO_INT_MSIX;
581
b0223e29
AW
582 /*
583 * Some communication channels between VF & PF or PF & fw rely on the
584 * physical state of the device and expect that enabling MSI-X from the
585 * guest enables the same on the host. When our guest is Linux, the
586 * guest driver call to pci_enable_msix() sets the enabling bit in the
587 * MSI-X capability, but leaves the vector table masked. We therefore
588 * can't rely on a vector_use callback (from request_irq() in the guest)
589 * to switch the physical device into MSI-X mode because that may come a
590 * long time after pci_enable_msix(). This code enables vector 0 with
591 * triggering to userspace, then immediately release the vector, leaving
592 * the physical device with no vectors enabled, but MSI-X enabled, just
593 * like the guest view.
594 */
595 vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
596 vfio_msix_vector_release(&vdev->pdev, 0);
597
fd704adc 598 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
bbef882c 599 vfio_msix_vector_release, NULL)) {
312fd5f2 600 error_report("vfio: msix_set_vector_notifiers failed");
fd704adc
AW
601 }
602
0de70dc7 603 trace_vfio_msix_enable(vdev->vbasedev.name);
fd704adc
AW
604}
605
0de70dc7 606static void vfio_msi_enable(VFIOPCIDevice *vdev)
65501a74
AW
607{
608 int ret, i;
609
610 vfio_disable_interrupts(vdev);
611
612 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
613retry:
bdd81add 614 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
65501a74
AW
615
616 for (i = 0; i < vdev->nr_vectors; i++) {
65501a74
AW
617 VFIOMSIVector *vector = &vdev->msi_vectors[i];
618
619 vector->vdev = vdev;
f4d45d47 620 vector->virq = -1;
65501a74
AW
621 vector->use = true;
622
623 if (event_notifier_init(&vector->interrupt, 0)) {
312fd5f2 624 error_report("vfio: Error: event_notifier_init failed");
65501a74
AW
625 }
626
f4d45d47
AW
627 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
628 vfio_msi_interrupt, NULL, vector);
629
65501a74
AW
630 /*
631 * Attempt to enable route through KVM irqchip,
632 * default to userspace handling if unavailable.
633 */
d1f6af6a 634 vfio_add_kvm_msi_virq(vdev, vector, i, false);
65501a74
AW
635 }
636
f4d45d47
AW
637 /* Set interrupt type prior to possible interrupts */
638 vdev->interrupt = VFIO_INT_MSI;
639
65501a74
AW
640 ret = vfio_enable_vectors(vdev, false);
641 if (ret) {
642 if (ret < 0) {
312fd5f2 643 error_report("vfio: Error: Failed to setup MSI fds: %m");
65501a74
AW
644 } else if (ret != vdev->nr_vectors) {
645 error_report("vfio: Error: Failed to enable %d "
312fd5f2 646 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
65501a74
AW
647 }
648
649 for (i = 0; i < vdev->nr_vectors; i++) {
650 VFIOMSIVector *vector = &vdev->msi_vectors[i];
651 if (vector->virq >= 0) {
f4d45d47 652 vfio_remove_kvm_msi_virq(vector);
65501a74 653 }
f4d45d47
AW
654 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
655 NULL, NULL, NULL);
65501a74
AW
656 event_notifier_cleanup(&vector->interrupt);
657 }
658
659 g_free(vdev->msi_vectors);
d964d3b5 660 vdev->msi_vectors = NULL;
65501a74
AW
661
662 if (ret > 0 && ret != vdev->nr_vectors) {
663 vdev->nr_vectors = ret;
664 goto retry;
665 }
666 vdev->nr_vectors = 0;
667
f4d45d47
AW
668 /*
669 * Failing to setup MSI doesn't really fall within any specification.
670 * Let's try leaving interrupts disabled and hope the guest figures
671 * out to fall back to INTx for this device.
672 */
673 error_report("vfio: Error: Failed to enable MSI");
674 vdev->interrupt = VFIO_INT_NONE;
675
65501a74
AW
676 return;
677 }
678
0de70dc7 679 trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
65501a74
AW
680}
681
0de70dc7 682static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
fd704adc 683{
7dfb3424 684 Error *err = NULL;
f4d45d47
AW
685 int i;
686
687 for (i = 0; i < vdev->nr_vectors; i++) {
688 VFIOMSIVector *vector = &vdev->msi_vectors[i];
689 if (vdev->msi_vectors[i].use) {
690 if (vector->virq >= 0) {
691 vfio_remove_kvm_msi_virq(vector);
692 }
693 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
694 NULL, NULL, NULL);
695 event_notifier_cleanup(&vector->interrupt);
696 }
697 }
698
fd704adc
AW
699 g_free(vdev->msi_vectors);
700 vdev->msi_vectors = NULL;
701 vdev->nr_vectors = 0;
702 vdev->interrupt = VFIO_INT_NONE;
703
7dfb3424
EA
704 vfio_intx_enable(vdev, &err);
705 if (err) {
c3b8e3e0 706 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
7dfb3424 707 }
fd704adc
AW
708}
709
0de70dc7 710static void vfio_msix_disable(VFIOPCIDevice *vdev)
fd704adc 711{
3e40ba0f
AW
712 int i;
713
fd704adc
AW
714 msix_unset_vector_notifiers(&vdev->pdev);
715
3e40ba0f
AW
716 /*
717 * MSI-X will only release vectors if MSI-X is still enabled on the
718 * device, check through the rest and release it ourselves if necessary.
719 */
720 for (i = 0; i < vdev->nr_vectors; i++) {
721 if (vdev->msi_vectors[i].use) {
722 vfio_msix_vector_release(&vdev->pdev, i);
f4d45d47 723 msix_vector_unuse(&vdev->pdev, i);
3e40ba0f
AW
724 }
725 }
726
fd704adc 727 if (vdev->nr_vectors) {
5546a621 728 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
fd704adc
AW
729 }
730
0de70dc7 731 vfio_msi_disable_common(vdev);
fd704adc 732
95239e16
AW
733 memset(vdev->msix->pending, 0,
734 BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
735
0de70dc7 736 trace_vfio_msix_disable(vdev->vbasedev.name);
fd704adc
AW
737}
738
0de70dc7 739static void vfio_msi_disable(VFIOPCIDevice *vdev)
65501a74 740{
5546a621 741 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
0de70dc7 742 vfio_msi_disable_common(vdev);
65501a74 743
0de70dc7 744 trace_vfio_msi_disable(vdev->vbasedev.name);
65501a74
AW
745}
746
9ee27d73 747static void vfio_update_msi(VFIOPCIDevice *vdev)
c7679d45
AW
748{
749 int i;
750
751 for (i = 0; i < vdev->nr_vectors; i++) {
752 VFIOMSIVector *vector = &vdev->msi_vectors[i];
753 MSIMessage msg;
754
755 if (!vector->use || vector->virq < 0) {
756 continue;
757 }
758
759 msg = msi_get_message(&vdev->pdev, i);
dc9f06ca 760 vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
c7679d45
AW
761 }
762}
763
9ee27d73 764static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
6f864e6e 765{
46900226 766 struct vfio_region_info *reg_info;
6f864e6e
AW
767 uint64_t size;
768 off_t off = 0;
7d489dcd 769 ssize_t bytes;
6f864e6e 770
46900226
AW
771 if (vfio_get_region_info(&vdev->vbasedev,
772 VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
6f864e6e
AW
773 error_report("vfio: Error getting ROM info: %m");
774 return;
775 }
776
46900226
AW
777 trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
778 (unsigned long)reg_info->offset,
779 (unsigned long)reg_info->flags);
780
781 vdev->rom_size = size = reg_info->size;
782 vdev->rom_offset = reg_info->offset;
6f864e6e 783
46900226 784 g_free(reg_info);
6f864e6e
AW
785
786 if (!vdev->rom_size) {
e638073c 787 vdev->rom_read_failed = true;
d20b43df 788 error_report("vfio-pci: Cannot read device rom at "
df92ee44 789 "%s", vdev->vbasedev.name);
d20b43df
BD
790 error_printf("Device option ROM contents are probably invalid "
791 "(check dmesg).\nSkip option ROM probe with rombar=0, "
792 "or load from file with romfile=\n");
6f864e6e
AW
793 return;
794 }
795
796 vdev->rom = g_malloc(size);
797 memset(vdev->rom, 0xff, size);
798
799 while (size) {
5546a621
EA
800 bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
801 size, vdev->rom_offset + off);
6f864e6e
AW
802 if (bytes == 0) {
803 break;
804 } else if (bytes > 0) {
805 off += bytes;
806 size -= bytes;
807 } else {
808 if (errno == EINTR || errno == EAGAIN) {
809 continue;
810 }
811 error_report("vfio: Error reading device ROM: %m");
812 break;
813 }
814 }
e2e5ee9c
AW
815
816 /*
817 * Test the ROM signature against our device, if the vendor is correct
818 * but the device ID doesn't match, store the correct device ID and
819 * recompute the checksum. Intel IGD devices need this and are known
820 * to have bogus checksums so we can't simply adjust the checksum.
821 */
822 if (pci_get_word(vdev->rom) == 0xaa55 &&
823 pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
824 !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
825 uint16_t vid, did;
826
827 vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
828 did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
829
830 if (vid == vdev->vendor_id && did != vdev->device_id) {
831 int i;
832 uint8_t csum, *data = vdev->rom;
833
834 pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
835 vdev->device_id);
836 data[6] = 0;
837
838 for (csum = 0, i = 0; i < vdev->rom_size; i++) {
839 csum += data[i];
840 }
841
842 data[6] = -csum;
843 }
844 }
6f864e6e
AW
845}
846
847static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
848{
9ee27d73 849 VFIOPCIDevice *vdev = opaque;
75bd0c72
ND
850 union {
851 uint8_t byte;
852 uint16_t word;
853 uint32_t dword;
854 uint64_t qword;
855 } val;
856 uint64_t data = 0;
6f864e6e
AW
857
858 /* Load the ROM lazily when the guest tries to read it */
db01eedb 859 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
6f864e6e
AW
860 vfio_pci_load_rom(vdev);
861 }
862
6758008e 863 memcpy(&val, vdev->rom + addr,
6f864e6e
AW
864 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
865
75bd0c72
ND
866 switch (size) {
867 case 1:
868 data = val.byte;
869 break;
870 case 2:
871 data = le16_to_cpu(val.word);
872 break;
873 case 4:
874 data = le32_to_cpu(val.dword);
875 break;
876 default:
877 hw_error("vfio: unsupported read size, %d bytes\n", size);
878 break;
879 }
880
df92ee44 881 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
6f864e6e 882
75bd0c72 883 return data;
6f864e6e
AW
884}
885
64fa25a0
AW
886static void vfio_rom_write(void *opaque, hwaddr addr,
887 uint64_t data, unsigned size)
888{
889}
890
6f864e6e
AW
891static const MemoryRegionOps vfio_rom_ops = {
892 .read = vfio_rom_read,
64fa25a0 893 .write = vfio_rom_write,
6758008e 894 .endianness = DEVICE_LITTLE_ENDIAN,
6f864e6e
AW
895};
896
9ee27d73 897static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
6f864e6e 898{
b1c50c5f 899 uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
6f864e6e 900 off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
4b943029 901 DeviceState *dev = DEVICE(vdev);
062ed5d8 902 char *name;
5546a621 903 int fd = vdev->vbasedev.fd;
6f864e6e
AW
904
905 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
4b943029
BD
906 /* Since pci handles romfile, just print a message and return */
907 if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
8f8f5885
MA
908 warn_report("Device at %s is known to cause system instability"
909 " issues during option rom execution",
910 vdev->vbasedev.name);
911 error_printf("Proceeding anyway since user specified romfile\n");
4b943029 912 }
6f864e6e
AW
913 return;
914 }
915
916 /*
917 * Use the same size ROM BAR as the physical device. The contents
918 * will get filled in later when the guest tries to read it.
919 */
5546a621
EA
920 if (pread(fd, &orig, 4, offset) != 4 ||
921 pwrite(fd, &size, 4, offset) != 4 ||
922 pread(fd, &size, 4, offset) != 4 ||
923 pwrite(fd, &orig, 4, offset) != 4) {
7df9381b 924 error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
6f864e6e
AW
925 return;
926 }
927
b1c50c5f 928 size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
6f864e6e
AW
929
930 if (!size) {
931 return;
932 }
933
4b943029
BD
934 if (vfio_blacklist_opt_rom(vdev)) {
935 if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
8f8f5885
MA
936 warn_report("Device at %s is known to cause system instability"
937 " issues during option rom execution",
938 vdev->vbasedev.name);
939 error_printf("Proceeding anyway since user specified"
940 " non zero value for rombar\n");
4b943029 941 } else {
8f8f5885
MA
942 warn_report("Rom loading for device at %s has been disabled"
943 " due to system instability issues",
944 vdev->vbasedev.name);
945 error_printf("Specify rombar=1 or romfile to force\n");
4b943029
BD
946 return;
947 }
948 }
949
df92ee44 950 trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
6f864e6e 951
062ed5d8 952 name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
6f864e6e
AW
953
954 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
955 &vfio_rom_ops, vdev, name, size);
062ed5d8 956 g_free(name);
6f864e6e
AW
957
958 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
959 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
960
e638073c 961 vdev->rom_read_failed = false;
6f864e6e
AW
962}
963
c00d61d8 964void vfio_vga_write(void *opaque, hwaddr addr,
f15689c7
AW
965 uint64_t data, unsigned size)
966{
967 VFIOVGARegion *region = opaque;
968 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
969 union {
970 uint8_t byte;
971 uint16_t word;
972 uint32_t dword;
973 uint64_t qword;
974 } buf;
975 off_t offset = vga->fd_offset + region->offset + addr;
976
977 switch (size) {
978 case 1:
979 buf.byte = data;
980 break;
981 case 2:
982 buf.word = cpu_to_le16(data);
983 break;
984 case 4:
985 buf.dword = cpu_to_le32(data);
986 break;
987 default:
4e505ddd 988 hw_error("vfio: unsupported write size, %d bytes", size);
f15689c7
AW
989 break;
990 }
991
992 if (pwrite(vga->fd, &buf, size, offset) != size) {
993 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
994 __func__, region->offset + addr, data, size);
995 }
996
385f57cf 997 trace_vfio_vga_write(region->offset + addr, data, size);
f15689c7
AW
998}
999
c00d61d8 1000uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
f15689c7
AW
1001{
1002 VFIOVGARegion *region = opaque;
1003 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1004 union {
1005 uint8_t byte;
1006 uint16_t word;
1007 uint32_t dword;
1008 uint64_t qword;
1009 } buf;
1010 uint64_t data = 0;
1011 off_t offset = vga->fd_offset + region->offset + addr;
1012
1013 if (pread(vga->fd, &buf, size, offset) != size) {
1014 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1015 __func__, region->offset + addr, size);
1016 return (uint64_t)-1;
1017 }
1018
1019 switch (size) {
1020 case 1:
1021 data = buf.byte;
1022 break;
1023 case 2:
1024 data = le16_to_cpu(buf.word);
1025 break;
1026 case 4:
1027 data = le32_to_cpu(buf.dword);
1028 break;
1029 default:
4e505ddd 1030 hw_error("vfio: unsupported read size, %d bytes", size);
f15689c7
AW
1031 break;
1032 }
1033
385f57cf 1034 trace_vfio_vga_read(region->offset + addr, size, data);
f15689c7
AW
1035
1036 return data;
1037}
1038
1039static const MemoryRegionOps vfio_vga_ops = {
1040 .read = vfio_vga_read,
1041 .write = vfio_vga_write,
1042 .endianness = DEVICE_LITTLE_ENDIAN,
1043};
1044
95251725
YX
1045/*
1046 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1047 * size if the BAR is in an exclusive page in host so that we could map
1048 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1049 * page in guest. So we should set the priority of the expanded memory
1050 * region to zero in case of overlap with BARs which share the same page
1051 * with the sub-page BAR in guest. Besides, we should also recover the
1052 * size of this sub-page BAR when its base address is changed in guest
1053 * and not page aligned any more.
1054 */
1055static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
1056{
2683ccd5 1057 VFIOPCIDevice *vdev = PCI_VFIO(pdev);
95251725 1058 VFIORegion *region = &vdev->bars[bar].region;
3a286732 1059 MemoryRegion *mmap_mr, *region_mr, *base_mr;
95251725
YX
1060 PCIIORegion *r;
1061 pcibus_t bar_addr;
1062 uint64_t size = region->size;
1063
1064 /* Make sure that the whole region is allowed to be mmapped */
1065 if (region->nr_mmaps != 1 || !region->mmaps[0].mmap ||
1066 region->mmaps[0].size != region->size) {
1067 return;
1068 }
1069
1070 r = &pdev->io_regions[bar];
1071 bar_addr = r->addr;
3a286732
AW
1072 base_mr = vdev->bars[bar].mr;
1073 region_mr = region->mem;
95251725
YX
1074 mmap_mr = &region->mmaps[0].mem;
1075
1076 /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1077 if (bar_addr != PCI_BAR_UNMAPPED &&
1078 !(bar_addr & ~qemu_real_host_page_mask)) {
1079 size = qemu_real_host_page_size;
1080 }
1081
1082 memory_region_transaction_begin();
1083
3a286732
AW
1084 if (vdev->bars[bar].size < size) {
1085 memory_region_set_size(base_mr, size);
1086 }
1087 memory_region_set_size(region_mr, size);
95251725 1088 memory_region_set_size(mmap_mr, size);
3a286732
AW
1089 if (size != vdev->bars[bar].size && memory_region_is_mapped(base_mr)) {
1090 memory_region_del_subregion(r->address_space, base_mr);
95251725 1091 memory_region_add_subregion_overlap(r->address_space,
3a286732 1092 bar_addr, base_mr, 0);
95251725
YX
1093 }
1094
1095 memory_region_transaction_commit();
1096}
1097
65501a74
AW
1098/*
1099 * PCI config space
1100 */
c00d61d8 1101uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
65501a74 1102{
2683ccd5 1103 VFIOPCIDevice *vdev = PCI_VFIO(pdev);
4b5d5e87 1104 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
65501a74 1105
4b5d5e87
AW
1106 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1107 emu_bits = le32_to_cpu(emu_bits);
65501a74 1108
4b5d5e87
AW
1109 if (emu_bits) {
1110 emu_val = pci_default_read_config(pdev, addr, len);
1111 }
1112
1113 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1114 ssize_t ret;
1115
5546a621
EA
1116 ret = pread(vdev->vbasedev.fd, &phys_val, len,
1117 vdev->config_offset + addr);
4b5d5e87 1118 if (ret != len) {
7df9381b
AW
1119 error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1120 __func__, vdev->vbasedev.name, addr, len);
65501a74
AW
1121 return -errno;
1122 }
4b5d5e87 1123 phys_val = le32_to_cpu(phys_val);
65501a74
AW
1124 }
1125
4b5d5e87 1126 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
65501a74 1127
df92ee44 1128 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
65501a74
AW
1129
1130 return val;
1131}
1132
c00d61d8
AW
1133void vfio_pci_write_config(PCIDevice *pdev,
1134 uint32_t addr, uint32_t val, int len)
65501a74 1135{
2683ccd5 1136 VFIOPCIDevice *vdev = PCI_VFIO(pdev);
65501a74
AW
1137 uint32_t val_le = cpu_to_le32(val);
1138
df92ee44 1139 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
65501a74
AW
1140
1141 /* Write everything to VFIO, let it filter out what we can't write */
5546a621
EA
1142 if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
1143 != len) {
7df9381b
AW
1144 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1145 __func__, vdev->vbasedev.name, addr, val, len);
65501a74
AW
1146 }
1147
65501a74
AW
1148 /* MSI/MSI-X Enabling/Disabling */
1149 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1150 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1151 int is_enabled, was_enabled = msi_enabled(pdev);
1152
1153 pci_default_write_config(pdev, addr, val, len);
1154
1155 is_enabled = msi_enabled(pdev);
1156
c7679d45
AW
1157 if (!was_enabled) {
1158 if (is_enabled) {
0de70dc7 1159 vfio_msi_enable(vdev);
c7679d45
AW
1160 }
1161 } else {
1162 if (!is_enabled) {
0de70dc7 1163 vfio_msi_disable(vdev);
c7679d45
AW
1164 } else {
1165 vfio_update_msi(vdev);
1166 }
65501a74 1167 }
4b5d5e87 1168 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
65501a74
AW
1169 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1170 int is_enabled, was_enabled = msix_enabled(pdev);
1171
1172 pci_default_write_config(pdev, addr, val, len);
1173
1174 is_enabled = msix_enabled(pdev);
1175
1176 if (!was_enabled && is_enabled) {
0de70dc7 1177 vfio_msix_enable(vdev);
65501a74 1178 } else if (was_enabled && !is_enabled) {
0de70dc7 1179 vfio_msix_disable(vdev);
65501a74 1180 }
95251725
YX
1181 } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) ||
1182 range_covers_byte(addr, len, PCI_COMMAND)) {
1183 pcibus_t old_addr[PCI_NUM_REGIONS - 1];
1184 int bar;
1185
1186 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1187 old_addr[bar] = pdev->io_regions[bar].addr;
1188 }
1189
1190 pci_default_write_config(pdev, addr, val, len);
1191
1192 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1193 if (old_addr[bar] != pdev->io_regions[bar].addr &&
3a286732
AW
1194 vdev->bars[bar].region.size > 0 &&
1195 vdev->bars[bar].region.size < qemu_real_host_page_size) {
95251725
YX
1196 vfio_sub_page_bar_update_mapping(pdev, bar);
1197 }
1198 }
4b5d5e87
AW
1199 } else {
1200 /* Write everything to QEMU to keep emulated bits correct */
1201 pci_default_write_config(pdev, addr, val, len);
65501a74
AW
1202 }
1203}
1204
65501a74
AW
1205/*
1206 * Interrupt setup
1207 */
9ee27d73 1208static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
65501a74 1209{
b3e27c3a
AW
1210 /*
1211 * More complicated than it looks. Disabling MSI/X transitions the
1212 * device to INTx mode (if supported). Therefore we need to first
1213 * disable MSI/X and then cleanup by disabling INTx.
1214 */
1215 if (vdev->interrupt == VFIO_INT_MSIX) {
0de70dc7 1216 vfio_msix_disable(vdev);
b3e27c3a 1217 } else if (vdev->interrupt == VFIO_INT_MSI) {
0de70dc7 1218 vfio_msi_disable(vdev);
b3e27c3a
AW
1219 }
1220
1221 if (vdev->interrupt == VFIO_INT_INTx) {
870cb6f1 1222 vfio_intx_disable(vdev);
65501a74
AW
1223 }
1224}
1225
7ef165b9 1226static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
65501a74
AW
1227{
1228 uint16_t ctrl;
1229 bool msi_64bit, msi_maskbit;
1230 int ret, entries;
1108b2f8 1231 Error *err = NULL;
65501a74 1232
5546a621 1233 if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
65501a74 1234 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
7ef165b9 1235 error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS");
65501a74
AW
1236 return -errno;
1237 }
1238 ctrl = le16_to_cpu(ctrl);
1239
1240 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1241 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1242 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1243
0de70dc7 1244 trace_vfio_msi_setup(vdev->vbasedev.name, pos);
65501a74 1245
1108b2f8 1246 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
65501a74 1247 if (ret < 0) {
e43b9a5a
AW
1248 if (ret == -ENOTSUP) {
1249 return 0;
1250 }
4b576648 1251 error_propagate_prepend(errp, err, "msi_init failed: ");
65501a74
AW
1252 return ret;
1253 }
1254 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1255
1256 return 0;
1257}
1258
db0da029
AW
1259static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
1260{
1261 off_t start, end;
1262 VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
1263
ae0215b2
AK
1264 /*
1265 * If the host driver allows mapping of a MSIX data, we are going to
1266 * do map the entire BAR and emulate MSIX table on top of that.
1267 */
1268 if (vfio_has_region_cap(&vdev->vbasedev, region->nr,
1269 VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) {
1270 return;
1271 }
1272
db0da029
AW
1273 /*
1274 * We expect to find a single mmap covering the whole BAR, anything else
1275 * means it's either unsupported or already setup.
1276 */
1277 if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
1278 region->size != region->mmaps[0].size) {
1279 return;
1280 }
1281
1282 /* MSI-X table start and end aligned to host page size */
1283 start = vdev->msix->table_offset & qemu_real_host_page_mask;
1284 end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1285 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1286
1287 /*
1288 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1289 * NB - Host page size is necessarily a power of two and so is the PCI
1290 * BAR (not counting EA yet), therefore if we have host page aligned
1291 * @start and @end, then any remainder of the BAR before or after those
1292 * must be at least host page sized and therefore mmap'able.
1293 */
1294 if (!start) {
1295 if (end >= region->size) {
1296 region->nr_mmaps = 0;
1297 g_free(region->mmaps);
1298 region->mmaps = NULL;
1299 trace_vfio_msix_fixup(vdev->vbasedev.name,
1300 vdev->msix->table_bar, 0, 0);
1301 } else {
1302 region->mmaps[0].offset = end;
1303 region->mmaps[0].size = region->size - end;
1304 trace_vfio_msix_fixup(vdev->vbasedev.name,
1305 vdev->msix->table_bar, region->mmaps[0].offset,
1306 region->mmaps[0].offset + region->mmaps[0].size);
1307 }
1308
1309 /* Maybe it's aligned at the end of the BAR */
1310 } else if (end >= region->size) {
1311 region->mmaps[0].size = start;
1312 trace_vfio_msix_fixup(vdev->vbasedev.name,
1313 vdev->msix->table_bar, region->mmaps[0].offset,
1314 region->mmaps[0].offset + region->mmaps[0].size);
1315
1316 /* Otherwise it must split the BAR */
1317 } else {
1318 region->nr_mmaps = 2;
1319 region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
1320
1321 memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));
1322
1323 region->mmaps[0].size = start;
1324 trace_vfio_msix_fixup(vdev->vbasedev.name,
1325 vdev->msix->table_bar, region->mmaps[0].offset,
1326 region->mmaps[0].offset + region->mmaps[0].size);
1327
1328 region->mmaps[1].offset = end;
1329 region->mmaps[1].size = region->size - end;
1330 trace_vfio_msix_fixup(vdev->vbasedev.name,
1331 vdev->msix->table_bar, region->mmaps[1].offset,
1332 region->mmaps[1].offset + region->mmaps[1].size);
1333 }
1334}
1335
89d5202e
AW
1336static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
1337{
1338 int target_bar = -1;
1339 size_t msix_sz;
1340
1341 if (!vdev->msix || vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
1342 return;
1343 }
1344
1345 /* The actual minimum size of MSI-X structures */
1346 msix_sz = (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE) +
1347 (QEMU_ALIGN_UP(vdev->msix->entries, 64) / 8);
1348 /* Round up to host pages, we don't want to share a page */
1349 msix_sz = REAL_HOST_PAGE_ALIGN(msix_sz);
1350 /* PCI BARs must be a power of 2 */
1351 msix_sz = pow2ceil(msix_sz);
1352
1353 if (vdev->msix_relo == OFF_AUTOPCIBAR_AUTO) {
1354 /*
1355 * TODO: Lookup table for known devices.
1356 *
1357 * Logically we might use an algorithm here to select the BAR adding
1358 * the least additional MMIO space, but we cannot programatically
1359 * predict the driver dependency on BAR ordering or sizing, therefore
1360 * 'auto' becomes a lookup for combinations reported to work.
1361 */
1362 if (target_bar < 0) {
1363 error_setg(errp, "No automatic MSI-X relocation available for "
1364 "device %04x:%04x", vdev->vendor_id, vdev->device_id);
1365 return;
1366 }
1367 } else {
1368 target_bar = (int)(vdev->msix_relo - OFF_AUTOPCIBAR_BAR0);
1369 }
1370
1371 /* I/O port BARs cannot host MSI-X structures */
1372 if (vdev->bars[target_bar].ioport) {
1373 error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1374 "I/O port BAR", target_bar);
1375 return;
1376 }
1377
1378 /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1379 if (!vdev->bars[target_bar].size &&
1380 target_bar > 0 && vdev->bars[target_bar - 1].mem64) {
1381 error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1382 "consumed by 64-bit BAR %d", target_bar, target_bar - 1);
1383 return;
1384 }
1385
1386 /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
e0255bb1 1387 if (vdev->bars[target_bar].size > 1 * GiB &&
89d5202e
AW
1388 !vdev->bars[target_bar].mem64) {
1389 error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1390 "no space to extend 32-bit BAR", target_bar);
1391 return;
1392 }
1393
1394 /*
1395 * If adding a new BAR, test if we can make it 64bit. We make it
1396 * prefetchable since QEMU MSI-X emulation has no read side effects
1397 * and doing so makes mapping more flexible.
1398 */
1399 if (!vdev->bars[target_bar].size) {
1400 if (target_bar < (PCI_ROM_SLOT - 1) &&
1401 !vdev->bars[target_bar + 1].size) {
1402 vdev->bars[target_bar].mem64 = true;
1403 vdev->bars[target_bar].type = PCI_BASE_ADDRESS_MEM_TYPE_64;
1404 }
1405 vdev->bars[target_bar].type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1406 vdev->bars[target_bar].size = msix_sz;
1407 vdev->msix->table_offset = 0;
1408 } else {
1409 vdev->bars[target_bar].size = MAX(vdev->bars[target_bar].size * 2,
1410 msix_sz * 2);
1411 /*
1412 * Due to above size calc, MSI-X always starts halfway into the BAR,
1413 * which will always be a separate host page.
1414 */
1415 vdev->msix->table_offset = vdev->bars[target_bar].size / 2;
1416 }
1417
1418 vdev->msix->table_bar = target_bar;
1419 vdev->msix->pba_bar = target_bar;
1420 /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1421 vdev->msix->pba_offset = vdev->msix->table_offset +
1422 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE);
1423
1424 trace_vfio_msix_relo(vdev->vbasedev.name,
1425 vdev->msix->table_bar, vdev->msix->table_offset);
1426}
1427
65501a74
AW
1428/*
1429 * We don't have any control over how pci_add_capability() inserts
1430 * capabilities into the chain. In order to setup MSI-X we need a
1431 * MemoryRegion for the BAR. In order to setup the BAR and not
1432 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1433 * need to first look for where the MSI-X table lives. So we
1434 * unfortunately split MSI-X setup across two functions.
1435 */
ec3bcf42 1436static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
65501a74
AW
1437{
1438 uint8_t pos;
1439 uint16_t ctrl;
1440 uint32_t table, pba;
5546a621 1441 int fd = vdev->vbasedev.fd;
b5bd049f 1442 VFIOMSIXInfo *msix;
65501a74
AW
1443
1444 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1445 if (!pos) {
ec3bcf42 1446 return;
65501a74
AW
1447 }
1448
5546a621 1449 if (pread(fd, &ctrl, sizeof(ctrl),
b58b17f7 1450 vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
008d0e2d 1451 error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS");
ec3bcf42 1452 return;
65501a74
AW
1453 }
1454
5546a621 1455 if (pread(fd, &table, sizeof(table),
65501a74 1456 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
008d0e2d 1457 error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE");
ec3bcf42 1458 return;
65501a74
AW
1459 }
1460
5546a621 1461 if (pread(fd, &pba, sizeof(pba),
65501a74 1462 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
008d0e2d 1463 error_setg_errno(errp, errno, "failed to read PCI MSIX PBA");
ec3bcf42 1464 return;
65501a74
AW
1465 }
1466
1467 ctrl = le16_to_cpu(ctrl);
1468 table = le32_to_cpu(table);
1469 pba = le32_to_cpu(pba);
1470
b5bd049f
AW
1471 msix = g_malloc0(sizeof(*msix));
1472 msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1473 msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1474 msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1475 msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1476 msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
65501a74 1477
43302969
GL
1478 /*
1479 * Test the size of the pba_offset variable and catch if it extends outside
1480 * of the specified BAR. If it is the case, we need to apply a hardware
1481 * specific quirk if the device is known or we have a broken configuration.
1482 */
b5bd049f 1483 if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
43302969
GL
1484 /*
1485 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1486 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1487 * the VF PBA offset while the BAR itself is only 8k. The correct value
1488 * is 0x1000, so we hard code that here.
1489 */
ff635e37
AW
1490 if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
1491 (vdev->device_id & 0xff00) == 0x5800) {
b5bd049f 1492 msix->pba_offset = 0x1000;
c60807de 1493 } else if (vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
008d0e2d
EA
1494 error_setg(errp, "hardware reports invalid configuration, "
1495 "MSIX PBA outside of specified BAR");
b5bd049f 1496 g_free(msix);
ec3bcf42 1497 return;
43302969
GL
1498 }
1499 }
1500
0de70dc7 1501 trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
b5bd049f
AW
1502 msix->table_offset, msix->entries);
1503 vdev->msix = msix;
65501a74 1504
db0da029 1505 vfio_pci_fixup_msix_region(vdev);
89d5202e
AW
1506
1507 vfio_pci_relocate_msix(vdev, errp);
65501a74
AW
1508}
1509
7ef165b9 1510static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
65501a74
AW
1511{
1512 int ret;
ee640c62 1513 Error *err = NULL;
65501a74 1514
95239e16
AW
1515 vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
1516 sizeof(unsigned long));
65501a74 1517 ret = msix_init(&vdev->pdev, vdev->msix->entries,
3a286732 1518 vdev->bars[vdev->msix->table_bar].mr,
65501a74 1519 vdev->msix->table_bar, vdev->msix->table_offset,
3a286732 1520 vdev->bars[vdev->msix->pba_bar].mr,
ee640c62
C
1521 vdev->msix->pba_bar, vdev->msix->pba_offset, pos,
1522 &err);
65501a74 1523 if (ret < 0) {
e43b9a5a 1524 if (ret == -ENOTSUP) {
e1eb292a 1525 warn_report_err(err);
e43b9a5a
AW
1526 return 0;
1527 }
ee640c62
C
1528
1529 error_propagate(errp, err);
65501a74
AW
1530 return ret;
1531 }
1532
95239e16
AW
1533 /*
1534 * The PCI spec suggests that devices provide additional alignment for
1535 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1536 * For an assigned device, this hopefully means that emulation of MSI-X
1537 * structures does not affect the performance of the device. If devices
1538 * fail to provide that alignment, a significant performance penalty may
1539 * result, for instance Mellanox MT27500 VFs:
1540 * http://www.spinics.net/lists/kvm/msg125881.html
1541 *
1542 * The PBA is simply not that important for such a serious regression and
1543 * most drivers do not appear to look at it. The solution for this is to
1544 * disable the PBA MemoryRegion unless it's being used. We disable it
1545 * here and only enable it if a masked vector fires through QEMU. As the
1546 * vector-use notifier is called, which occurs on unmask, we test whether
1547 * PBA emulation is needed and again disable if not.
1548 */
1549 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
1550
fcad0d21
AK
1551 /*
1552 * The emulated machine may provide a paravirt interface for MSIX setup
1553 * so it is not strictly necessary to emulate MSIX here. This becomes
1554 * helpful when frequently accessed MMIO registers are located in
1555 * subpages adjacent to the MSIX table but the MSIX data containing page
1556 * cannot be mapped because of a host page size bigger than the MSIX table
1557 * alignment.
1558 */
1559 if (object_property_get_bool(OBJECT(qdev_get_machine()),
1560 "vfio-no-msix-emulation", NULL)) {
1561 memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false);
1562 }
1563
65501a74
AW
1564 return 0;
1565}
1566
9ee27d73 1567static void vfio_teardown_msi(VFIOPCIDevice *vdev)
65501a74
AW
1568{
1569 msi_uninit(&vdev->pdev);
1570
1571 if (vdev->msix) {
a664477d 1572 msix_uninit(&vdev->pdev,
3a286732
AW
1573 vdev->bars[vdev->msix->table_bar].mr,
1574 vdev->bars[vdev->msix->pba_bar].mr);
95239e16 1575 g_free(vdev->msix->pending);
65501a74
AW
1576 }
1577}
1578
1579/*
1580 * Resource setup
1581 */
9ee27d73 1582static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
65501a74
AW
1583{
1584 int i;
1585
1586 for (i = 0; i < PCI_ROM_SLOT; i++) {
db0da029 1587 vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
65501a74
AW
1588 }
1589}
1590
3a286732 1591static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr)
65501a74
AW
1592{
1593 VFIOBAR *bar = &vdev->bars[nr];
1594
65501a74 1595 uint32_t pci_bar;
65501a74
AW
1596 int ret;
1597
1598 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2d82f8a3 1599 if (!bar->region.size) {
65501a74
AW
1600 return;
1601 }
1602
65501a74 1603 /* Determine what type of BAR this is for registration */
5546a621 1604 ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
65501a74
AW
1605 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
1606 if (ret != sizeof(pci_bar)) {
312fd5f2 1607 error_report("vfio: Failed to read BAR %d (%m)", nr);
65501a74
AW
1608 return;
1609 }
1610
1611 pci_bar = le32_to_cpu(pci_bar);
39360f0b
AW
1612 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1613 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
3a286732
AW
1614 bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1615 ~PCI_BASE_ADDRESS_MEM_MASK);
1616 bar->size = bar->region.size;
1617}
1618
1619static void vfio_bars_prepare(VFIOPCIDevice *vdev)
1620{
1621 int i;
1622
1623 for (i = 0; i < PCI_ROM_SLOT; i++) {
1624 vfio_bar_prepare(vdev, i);
1625 }
1626}
1627
1628static void vfio_bar_register(VFIOPCIDevice *vdev, int nr)
1629{
1630 VFIOBAR *bar = &vdev->bars[nr];
1631 char *name;
65501a74 1632
3a286732
AW
1633 if (!bar->size) {
1634 return;
65501a74 1635 }
7076eabc 1636
3a286732
AW
1637 bar->mr = g_new0(MemoryRegion, 1);
1638 name = g_strdup_printf("%s base BAR %d", vdev->vbasedev.name, nr);
1639 memory_region_init_io(bar->mr, OBJECT(vdev), NULL, NULL, name, bar->size);
1640 g_free(name);
1641
1642 if (bar->region.size) {
1643 memory_region_add_subregion(bar->mr, 0, bar->region.mem);
1644
1645 if (vfio_region_mmap(&bar->region)) {
1646 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1647 vdev->vbasedev.name, nr);
1648 }
1649 }
1650
1651 pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr);
65501a74
AW
1652}
1653
3a286732 1654static void vfio_bars_register(VFIOPCIDevice *vdev)
65501a74
AW
1655{
1656 int i;
1657
1658 for (i = 0; i < PCI_ROM_SLOT; i++) {
3a286732 1659 vfio_bar_register(vdev, i);
65501a74
AW
1660 }
1661}
1662
2d82f8a3 1663static void vfio_bars_exit(VFIOPCIDevice *vdev)
65501a74
AW
1664{
1665 int i;
1666
1667 for (i = 0; i < PCI_ROM_SLOT; i++) {
3a286732
AW
1668 VFIOBAR *bar = &vdev->bars[i];
1669
2d82f8a3 1670 vfio_bar_quirk_exit(vdev, i);
3a286732
AW
1671 vfio_region_exit(&bar->region);
1672 if (bar->region.size) {
1673 memory_region_del_subregion(bar->mr, bar->region.mem);
1674 }
65501a74 1675 }
f15689c7 1676
2d82f8a3 1677 if (vdev->vga) {
f15689c7 1678 pci_unregister_vga(&vdev->pdev);
2d82f8a3 1679 vfio_vga_quirk_exit(vdev);
f15689c7 1680 }
65501a74
AW
1681}
1682
2d82f8a3 1683static void vfio_bars_finalize(VFIOPCIDevice *vdev)
ba5e6bfa
PB
1684{
1685 int i;
1686
1687 for (i = 0; i < PCI_ROM_SLOT; i++) {
3a286732
AW
1688 VFIOBAR *bar = &vdev->bars[i];
1689
2d82f8a3 1690 vfio_bar_quirk_finalize(vdev, i);
3a286732
AW
1691 vfio_region_finalize(&bar->region);
1692 if (bar->size) {
1693 object_unparent(OBJECT(bar->mr));
1694 g_free(bar->mr);
1695 }
ba5e6bfa
PB
1696 }
1697
2d82f8a3
AW
1698 if (vdev->vga) {
1699 vfio_vga_quirk_finalize(vdev);
1700 for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1701 object_unparent(OBJECT(&vdev->vga->region[i].mem));
1702 }
1703 g_free(vdev->vga);
ba5e6bfa
PB
1704 }
1705}
1706
65501a74
AW
1707/*
1708 * General setup
1709 */
1710static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1711{
88caf177
CF
1712 uint8_t tmp;
1713 uint16_t next = PCI_CONFIG_SPACE_SIZE;
65501a74
AW
1714
1715 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
3fc1c182 1716 tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
65501a74
AW
1717 if (tmp > pos && tmp < next) {
1718 next = tmp;
1719 }
1720 }
1721
1722 return next - pos;
1723}
1724
325ae8d5
CF
1725
1726static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
1727{
1728 uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
1729
1730 for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
1731 tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
1732 if (tmp > pos && tmp < next) {
1733 next = tmp;
1734 }
1735 }
1736
1737 return next - pos;
1738}
1739
96adc5c7
AW
1740static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1741{
1742 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1743}
1744
9ee27d73 1745static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
96adc5c7
AW
1746 uint16_t val, uint16_t mask)
1747{
1748 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1749 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1750 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1751}
1752
1753static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1754{
1755 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1756}
1757
9ee27d73 1758static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
96adc5c7
AW
1759 uint32_t val, uint32_t mask)
1760{
1761 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1762 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1763 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1764}
1765
7ef165b9
EA
1766static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
1767 Error **errp)
96adc5c7
AW
1768{
1769 uint16_t flags;
1770 uint8_t type;
1771
1772 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
1773 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
1774
1775 if (type != PCI_EXP_TYPE_ENDPOINT &&
1776 type != PCI_EXP_TYPE_LEG_END &&
1777 type != PCI_EXP_TYPE_RC_END) {
1778
7ef165b9
EA
1779 error_setg(errp, "assignment of PCIe type 0x%x "
1780 "devices is not currently supported", type);
96adc5c7
AW
1781 return -EINVAL;
1782 }
1783
fd56e061
DG
1784 if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) {
1785 PCIBus *bus = pci_get_bus(&vdev->pdev);
0282abf0
AW
1786 PCIDevice *bridge;
1787
96adc5c7 1788 /*
0282abf0
AW
1789 * Traditionally PCI device assignment exposes the PCIe capability
1790 * as-is on non-express buses. The reason being that some drivers
1791 * simply assume that it's there, for example tg3. However when
1792 * we're running on a native PCIe machine type, like Q35, we need
1793 * to hide the PCIe capability. The reason for this is twofold;
1794 * first Windows guests get a Code 10 error when the PCIe capability
1795 * is exposed in this configuration. Therefore express devices won't
1796 * work at all unless they're attached to express buses in the VM.
1797 * Second, a native PCIe machine introduces the possibility of fine
1798 * granularity IOMMUs supporting both translation and isolation.
1799 * Guest code to discover the IOMMU visibility of a device, such as
1800 * IOMMU grouping code on Linux, is very aware of device types and
1801 * valid transitions between bus types. An express device on a non-
1802 * express bus is not a valid combination on bare metal systems.
1803 *
1804 * Drivers that require a PCIe capability to make the device
1805 * functional are simply going to need to have their devices placed
1806 * on a PCIe bus in the VM.
96adc5c7 1807 */
0282abf0
AW
1808 while (!pci_bus_is_root(bus)) {
1809 bridge = pci_bridge_get_device(bus);
fd56e061 1810 bus = pci_get_bus(bridge);
0282abf0
AW
1811 }
1812
1813 if (pci_bus_is_express(bus)) {
1814 return 0;
1815 }
1816
fd56e061 1817 } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) {
96adc5c7
AW
1818 /*
1819 * On a Root Complex bus Endpoints become Root Complex Integrated
1820 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1821 */
1822 if (type == PCI_EXP_TYPE_ENDPOINT) {
1823 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1824 PCI_EXP_TYPE_RC_END << 4,
1825 PCI_EXP_FLAGS_TYPE);
1826
1827 /* Link Capabilities, Status, and Control goes away */
1828 if (size > PCI_EXP_LNKCTL) {
1829 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
1830 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1831 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
1832
1833#ifndef PCI_EXP_LNKCAP2
1834#define PCI_EXP_LNKCAP2 44
1835#endif
1836#ifndef PCI_EXP_LNKSTA2
1837#define PCI_EXP_LNKSTA2 50
1838#endif
1839 /* Link 2 Capabilities, Status, and Control goes away */
1840 if (size > PCI_EXP_LNKCAP2) {
1841 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
1842 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
1843 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
1844 }
1845 }
1846
1847 } else if (type == PCI_EXP_TYPE_LEG_END) {
1848 /*
1849 * Legacy endpoints don't belong on the root complex. Windows
1850 * seems to be happier with devices if we skip the capability.
1851 */
1852 return 0;
1853 }
1854
1855 } else {
1856 /*
1857 * Convert Root Complex Integrated Endpoints to regular endpoints.
1858 * These devices don't support LNK/LNK2 capabilities, so make them up.
1859 */
1860 if (type == PCI_EXP_TYPE_RC_END) {
1861 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1862 PCI_EXP_TYPE_ENDPOINT << 4,
1863 PCI_EXP_FLAGS_TYPE);
1864 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
d96a0ac7
AW
1865 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1) |
1866 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT), ~0);
96adc5c7
AW
1867 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1868 }
96adc5c7
AW
1869 }
1870
47985727
AW
1871 /*
1872 * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
1873 * (Niantic errate #35) causing Windows to error with a Code 10 for the
1874 * device on Q35. Fixup any such devices to report version 1. If we
1875 * were to remove the capability entirely the guest would lose extended
1876 * config space.
1877 */
1878 if ((flags & PCI_EXP_FLAGS_VERS) == 0) {
1879 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1880 1, PCI_EXP_FLAGS_VERS);
1881 }
1882
9a7c2a59
MZ
1883 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
1884 errp);
1885 if (pos < 0) {
1886 return pos;
96adc5c7
AW
1887 }
1888
9a7c2a59
MZ
1889 vdev->pdev.exp.exp_cap = pos;
1890
96adc5c7
AW
1891 return pos;
1892}
1893
9ee27d73 1894static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
befe5176
AW
1895{
1896 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
1897
1898 if (cap & PCI_EXP_DEVCAP_FLR) {
df92ee44 1899 trace_vfio_check_pcie_flr(vdev->vbasedev.name);
befe5176
AW
1900 vdev->has_flr = true;
1901 }
1902}
1903
9ee27d73 1904static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
befe5176
AW
1905{
1906 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
1907
1908 if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
df92ee44 1909 trace_vfio_check_pm_reset(vdev->vbasedev.name);
befe5176
AW
1910 vdev->has_pm_reset = true;
1911 }
1912}
1913
9ee27d73 1914static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
befe5176
AW
1915{
1916 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
1917
1918 if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
df92ee44 1919 trace_vfio_check_af_flr(vdev->vbasedev.name);
befe5176
AW
1920 vdev->has_flr = true;
1921 }
1922}
1923
7ef165b9 1924static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
65501a74
AW
1925{
1926 PCIDevice *pdev = &vdev->pdev;
1927 uint8_t cap_id, next, size;
1928 int ret;
1929
1930 cap_id = pdev->config[pos];
3fc1c182 1931 next = pdev->config[pos + PCI_CAP_LIST_NEXT];
65501a74
AW
1932
1933 /*
1934 * If it becomes important to configure capabilities to their actual
1935 * size, use this as the default when it's something we don't recognize.
1936 * Since QEMU doesn't actually handle many of the config accesses,
1937 * exact size doesn't seem worthwhile.
1938 */
1939 size = vfio_std_cap_max_size(pdev, pos);
1940
1941 /*
1942 * pci_add_capability always inserts the new capability at the head
1943 * of the chain. Therefore to end up with a chain that matches the
1944 * physical device, we insert from the end by making this recursive.
3fc1c182 1945 * This is also why we pre-calculate size above as cached config space
65501a74
AW
1946 * will be changed as we unwind the stack.
1947 */
1948 if (next) {
7ef165b9 1949 ret = vfio_add_std_cap(vdev, next, errp);
65501a74 1950 if (ret) {
5b31c822 1951 return ret;
65501a74
AW
1952 }
1953 } else {
96adc5c7
AW
1954 /* Begin the rebuild, use QEMU emulated list bits */
1955 pdev->config[PCI_CAPABILITY_LIST] = 0;
1956 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
1957 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
e3f79f3b
AW
1958
1959 ret = vfio_add_virt_caps(vdev, errp);
1960 if (ret) {
1961 return ret;
1962 }
65501a74
AW
1963 }
1964
e3f79f3b
AW
1965 /* Scale down size, esp in case virt caps were added above */
1966 size = MIN(size, vfio_std_cap_max_size(pdev, pos));
1967
96adc5c7 1968 /* Use emulated next pointer to allow dropping caps */
3fc1c182 1969 pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
96adc5c7 1970
65501a74
AW
1971 switch (cap_id) {
1972 case PCI_CAP_ID_MSI:
7ef165b9 1973 ret = vfio_msi_setup(vdev, pos, errp);
65501a74 1974 break;
96adc5c7 1975 case PCI_CAP_ID_EXP:
befe5176 1976 vfio_check_pcie_flr(vdev, pos);
7ef165b9 1977 ret = vfio_setup_pcie_cap(vdev, pos, size, errp);
96adc5c7 1978 break;
65501a74 1979 case PCI_CAP_ID_MSIX:
7ef165b9 1980 ret = vfio_msix_setup(vdev, pos, errp);
65501a74 1981 break;
ba661818 1982 case PCI_CAP_ID_PM:
befe5176 1983 vfio_check_pm_reset(vdev, pos);
ba661818 1984 vdev->pm_cap = pos;
27841278 1985 ret = pci_add_capability(pdev, cap_id, pos, size, errp);
befe5176
AW
1986 break;
1987 case PCI_CAP_ID_AF:
1988 vfio_check_af_flr(vdev, pos);
27841278 1989 ret = pci_add_capability(pdev, cap_id, pos, size, errp);
befe5176 1990 break;
65501a74 1991 default:
27841278 1992 ret = pci_add_capability(pdev, cap_id, pos, size, errp);
65501a74
AW
1993 break;
1994 }
5b31c822 1995
65501a74 1996 if (ret < 0) {
7ef165b9
EA
1997 error_prepend(errp,
1998 "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
1999 cap_id, size, pos);
65501a74
AW
2000 return ret;
2001 }
2002
2003 return 0;
2004}
2005
7ef165b9 2006static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
325ae8d5
CF
2007{
2008 PCIDevice *pdev = &vdev->pdev;
2009 uint32_t header;
2010 uint16_t cap_id, next, size;
2011 uint8_t cap_ver;
2012 uint8_t *config;
2013
e37dac06 2014 /* Only add extended caps if we have them and the guest can see them */
fd56e061 2015 if (!pci_is_express(pdev) || !pci_bus_is_express(pci_get_bus(pdev)) ||
e37dac06 2016 !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
7ef165b9 2017 return;
e37dac06
AW
2018 }
2019
325ae8d5
CF
2020 /*
2021 * pcie_add_capability always inserts the new capability at the tail
2022 * of the chain. Therefore to end up with a chain that matches the
2023 * physical device, we cache the config space to avoid overwriting
2024 * the original config space when we parse the extended capabilities.
2025 */
2026 config = g_memdup(pdev->config, vdev->config_size);
2027
e37dac06
AW
2028 /*
2029 * Extended capabilities are chained with each pointing to the next, so we
2030 * can drop anything other than the head of the chain simply by modifying
d0d1cd70
AW
2031 * the previous next pointer. Seed the head of the chain here such that
2032 * we can simply skip any capabilities we want to drop below, regardless
2033 * of their position in the chain. If this stub capability still exists
2034 * after we add the capabilities we want to expose, update the capability
2035 * ID to zero. Note that we cannot seed with the capability header being
2036 * zero as this conflicts with definition of an absent capability chain
2037 * and prevents capabilities beyond the head of the list from being added.
2038 * By replacing the dummy capability ID with zero after walking the device
2039 * chain, we also transparently mark extended capabilities as absent if
2040 * no capabilities were added. Note that the PCIe spec defines an absence
2041 * of extended capabilities to be determined by a value of zero for the
2042 * capability ID, version, AND next pointer. A non-zero next pointer
2043 * should be sufficient to indicate additional capabilities are present,
2044 * which will occur if we call pcie_add_capability() below. The entire
2045 * first dword is emulated to support this.
2046 *
2047 * NB. The kernel side does similar masking, so be prepared that our
2048 * view of the device may also contain a capability ID zero in the head
2049 * of the chain. Skip it for the same reason that we cannot seed the
2050 * chain with a zero capability.
e37dac06
AW
2051 */
2052 pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
2053 PCI_EXT_CAP(0xFFFF, 0, 0));
2054 pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
2055 pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
2056
325ae8d5
CF
2057 for (next = PCI_CONFIG_SPACE_SIZE; next;
2058 next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
2059 header = pci_get_long(config + next);
2060 cap_id = PCI_EXT_CAP_ID(header);
2061 cap_ver = PCI_EXT_CAP_VER(header);
2062
2063 /*
2064 * If it becomes important to configure extended capabilities to their
2065 * actual size, use this as the default when it's something we don't
2066 * recognize. Since QEMU doesn't actually handle many of the config
2067 * accesses, exact size doesn't seem worthwhile.
2068 */
2069 size = vfio_ext_cap_max_size(config, next);
2070
325ae8d5
CF
2071 /* Use emulated next pointer to allow dropping extended caps */
2072 pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
2073 PCI_EXT_CAP_NEXT_MASK);
e37dac06
AW
2074
2075 switch (cap_id) {
d0d1cd70 2076 case 0: /* kernel masked capability */
e37dac06 2077 case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
383a7af7 2078 case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
3412d8ec 2079 case PCI_EXT_CAP_ID_REBAR: /* Can't expose read-only */
e37dac06
AW
2080 trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
2081 break;
2082 default:
2083 pcie_add_capability(pdev, cap_id, cap_ver, next, size);
2084 }
2085
2086 }
2087
2088 /* Cleanup chain head ID if necessary */
2089 if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
2090 pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
325ae8d5
CF
2091 }
2092
2093 g_free(config);
7ef165b9 2094 return;
325ae8d5
CF
2095}
2096
7ef165b9 2097static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
65501a74
AW
2098{
2099 PCIDevice *pdev = &vdev->pdev;
325ae8d5 2100 int ret;
65501a74
AW
2101
2102 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
2103 !pdev->config[PCI_CAPABILITY_LIST]) {
2104 return 0; /* Nothing to add */
2105 }
2106
7ef165b9 2107 ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp);
325ae8d5
CF
2108 if (ret) {
2109 return ret;
2110 }
2111
7ef165b9
EA
2112 vfio_add_ext_cap(vdev);
2113 return 0;
65501a74
AW
2114}
2115
9ee27d73 2116static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
f16f39c3
AW
2117{
2118 PCIDevice *pdev = &vdev->pdev;
2119 uint16_t cmd;
2120
2121 vfio_disable_interrupts(vdev);
2122
2123 /* Make sure the device is in D0 */
2124 if (vdev->pm_cap) {
2125 uint16_t pmcsr;
2126 uint8_t state;
2127
2128 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
2129 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2130 if (state) {
2131 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2132 vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
2133 /* vfio handles the necessary delay here */
2134 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
2135 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2136 if (state) {
4e505ddd 2137 error_report("vfio: Unable to power on device, stuck in D%d",
f16f39c3
AW
2138 state);
2139 }
2140 }
2141 }
2142
2143 /*
2144 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2145 * Also put INTx Disable in known state.
2146 */
2147 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
2148 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
2149 PCI_COMMAND_INTX_DISABLE);
2150 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
2151}
2152
9ee27d73 2153static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
f16f39c3 2154{
7dfb3424 2155 Error *err = NULL;
a52a4c47 2156 int nr;
7dfb3424
EA
2157
2158 vfio_intx_enable(vdev, &err);
2159 if (err) {
c3b8e3e0 2160 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
7dfb3424 2161 }
a52a4c47
IY
2162
2163 for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
2164 off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr);
2165 uint32_t val = 0;
2166 uint32_t len = sizeof(val);
2167
2168 if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) {
2169 error_report("%s(%s) reset bar %d failed: %m", __func__,
2170 vdev->vbasedev.name, nr);
2171 }
2172 }
469d02de
AW
2173
2174 vfio_quirk_reset(vdev);
f16f39c3
AW
2175}
2176
7df9381b 2177static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
f16f39c3 2178{
7df9381b
AW
2179 char tmp[13];
2180
2181 sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
2182 addr->bus, addr->slot, addr->function);
2183
2184 return (strcmp(tmp, name) == 0);
f16f39c3
AW
2185}
2186
9ee27d73 2187static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
f16f39c3
AW
2188{
2189 VFIOGroup *group;
2190 struct vfio_pci_hot_reset_info *info;
2191 struct vfio_pci_dependent_device *devices;
2192 struct vfio_pci_hot_reset *reset;
2193 int32_t *fds;
2194 int ret, i, count;
2195 bool multi = false;
2196
df92ee44 2197 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
f16f39c3 2198
893bfc3c
C
2199 if (!single) {
2200 vfio_pci_pre_reset(vdev);
2201 }
b47d8efa 2202 vdev->vbasedev.needs_reset = false;
f16f39c3
AW
2203
2204 info = g_malloc0(sizeof(*info));
2205 info->argsz = sizeof(*info);
2206
5546a621 2207 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
f16f39c3
AW
2208 if (ret && errno != ENOSPC) {
2209 ret = -errno;
2210 if (!vdev->has_pm_reset) {
7df9381b
AW
2211 error_report("vfio: Cannot reset device %s, "
2212 "no available reset mechanism.", vdev->vbasedev.name);
f16f39c3
AW
2213 }
2214 goto out_single;
2215 }
2216
2217 count = info->count;
2218 info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
2219 info->argsz = sizeof(*info) + (count * sizeof(*devices));
2220 devices = &info->devices[0];
2221
5546a621 2222 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
f16f39c3
AW
2223 if (ret) {
2224 ret = -errno;
2225 error_report("vfio: hot reset info failed: %m");
2226 goto out_single;
2227 }
2228
df92ee44 2229 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
f16f39c3
AW
2230
2231 /* Verify that we have all the groups required */
2232 for (i = 0; i < info->count; i++) {
2233 PCIHostDeviceAddress host;
9ee27d73 2234 VFIOPCIDevice *tmp;
b47d8efa 2235 VFIODevice *vbasedev_iter;
f16f39c3
AW
2236
2237 host.domain = devices[i].segment;
2238 host.bus = devices[i].bus;
2239 host.slot = PCI_SLOT(devices[i].devfn);
2240 host.function = PCI_FUNC(devices[i].devfn);
2241
385f57cf 2242 trace_vfio_pci_hot_reset_dep_devices(host.domain,
f16f39c3
AW
2243 host.bus, host.slot, host.function, devices[i].group_id);
2244
7df9381b 2245 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
f16f39c3
AW
2246 continue;
2247 }
2248
62356b72 2249 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2250 if (group->groupid == devices[i].group_id) {
2251 break;
2252 }
2253 }
2254
2255 if (!group) {
2256 if (!vdev->has_pm_reset) {
df92ee44 2257 error_report("vfio: Cannot reset device %s, "
f16f39c3 2258 "depends on group %d which is not owned.",
df92ee44 2259 vdev->vbasedev.name, devices[i].group_id);
f16f39c3
AW
2260 }
2261 ret = -EPERM;
2262 goto out;
2263 }
2264
2265 /* Prep dependent devices for reset and clear our marker. */
b47d8efa 2266 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
7da624e2
AW
2267 if (!vbasedev_iter->dev->realized ||
2268 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
b47d8efa
EA
2269 continue;
2270 }
2271 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
7df9381b 2272 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
f16f39c3 2273 if (single) {
f16f39c3
AW
2274 ret = -EINVAL;
2275 goto out_single;
2276 }
2277 vfio_pci_pre_reset(tmp);
b47d8efa 2278 tmp->vbasedev.needs_reset = false;
f16f39c3
AW
2279 multi = true;
2280 break;
2281 }
2282 }
2283 }
2284
2285 if (!single && !multi) {
f16f39c3
AW
2286 ret = -EINVAL;
2287 goto out_single;
2288 }
2289
2290 /* Determine how many group fds need to be passed */
2291 count = 0;
62356b72 2292 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2293 for (i = 0; i < info->count; i++) {
2294 if (group->groupid == devices[i].group_id) {
2295 count++;
2296 break;
2297 }
2298 }
2299 }
2300
2301 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
2302 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
2303 fds = &reset->group_fds[0];
2304
2305 /* Fill in group fds */
62356b72 2306 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2307 for (i = 0; i < info->count; i++) {
2308 if (group->groupid == devices[i].group_id) {
2309 fds[reset->count++] = group->fd;
2310 break;
2311 }
2312 }
2313 }
2314
2315 /* Bus reset! */
5546a621 2316 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
f16f39c3
AW
2317 g_free(reset);
2318
df92ee44 2319 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
385f57cf 2320 ret ? "%m" : "Success");
f16f39c3
AW
2321
2322out:
2323 /* Re-enable INTx on affected devices */
2324 for (i = 0; i < info->count; i++) {
2325 PCIHostDeviceAddress host;
9ee27d73 2326 VFIOPCIDevice *tmp;
b47d8efa 2327 VFIODevice *vbasedev_iter;
f16f39c3
AW
2328
2329 host.domain = devices[i].segment;
2330 host.bus = devices[i].bus;
2331 host.slot = PCI_SLOT(devices[i].devfn);
2332 host.function = PCI_FUNC(devices[i].devfn);
2333
7df9381b 2334 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
f16f39c3
AW
2335 continue;
2336 }
2337
62356b72 2338 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2339 if (group->groupid == devices[i].group_id) {
2340 break;
2341 }
2342 }
2343
2344 if (!group) {
2345 break;
2346 }
2347
b47d8efa 2348 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
7da624e2
AW
2349 if (!vbasedev_iter->dev->realized ||
2350 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
b47d8efa
EA
2351 continue;
2352 }
2353 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
7df9381b 2354 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
f16f39c3
AW
2355 vfio_pci_post_reset(tmp);
2356 break;
2357 }
2358 }
2359 }
2360out_single:
893bfc3c
C
2361 if (!single) {
2362 vfio_pci_post_reset(vdev);
2363 }
f16f39c3
AW
2364 g_free(info);
2365
2366 return ret;
2367}
2368
2369/*
2370 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2371 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2372 * of doing hot resets when there is only a single device per bus. The in-use
2373 * here refers to how many VFIODevices are affected. A hot reset that affects
2374 * multiple devices, but only a single in-use device, means that we can call
2375 * it from our bus ->reset() callback since the extent is effectively a single
2376 * device. This allows us to make use of it in the hotplug path. When there
2377 * are multiple in-use devices, we can only trigger the hot reset during a
2378 * system reset and thus from our reset handler. We separate _one vs _multi
2379 * here so that we don't overlap and do a double reset on the system reset
2380 * path where both our reset handler and ->reset() callback are used. Calling
2381 * _one() will only do a hot reset for the one in-use devices case, calling
2382 * _multi() will do nothing if a _one() would have been sufficient.
2383 */
9ee27d73 2384static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
f16f39c3
AW
2385{
2386 return vfio_pci_hot_reset(vdev, true);
2387}
2388
b47d8efa 2389static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
f16f39c3 2390{
b47d8efa 2391 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
f16f39c3
AW
2392 return vfio_pci_hot_reset(vdev, false);
2393}
2394
b47d8efa
EA
2395static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
2396{
2397 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2398 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
2399 vbasedev->needs_reset = true;
2400 }
2401}
2402
2403static VFIODeviceOps vfio_pci_ops = {
2404 .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
2405 .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
870cb6f1 2406 .vfio_eoi = vfio_intx_eoi,
b47d8efa
EA
2407};
2408
cde4279b 2409int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
e593c021
AW
2410{
2411 VFIODevice *vbasedev = &vdev->vbasedev;
2412 struct vfio_region_info *reg_info;
2413 int ret;
2414
4225f2b6
AW
2415 ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
2416 if (ret) {
cde4279b
EA
2417 error_setg_errno(errp, -ret,
2418 "failed getting region info for VGA region index %d",
2419 VFIO_PCI_VGA_REGION_INDEX);
4225f2b6
AW
2420 return ret;
2421 }
e593c021 2422
4225f2b6
AW
2423 if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
2424 !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2425 reg_info->size < 0xbffff + 1) {
cde4279b
EA
2426 error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2427 (unsigned long)reg_info->flags,
2428 (unsigned long)reg_info->size);
4225f2b6
AW
2429 g_free(reg_info);
2430 return -EINVAL;
2431 }
e593c021 2432
4225f2b6 2433 vdev->vga = g_new0(VFIOVGA, 1);
e593c021 2434
4225f2b6
AW
2435 vdev->vga->fd_offset = reg_info->offset;
2436 vdev->vga->fd = vdev->vbasedev.fd;
e593c021 2437
4225f2b6 2438 g_free(reg_info);
e593c021 2439
4225f2b6
AW
2440 vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2441 vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2442 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
e593c021 2443
182bca45
AW
2444 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2445 OBJECT(vdev), &vfio_vga_ops,
2446 &vdev->vga->region[QEMU_PCI_VGA_MEM],
2447 "vfio-vga-mmio@0xa0000",
2448 QEMU_PCI_VGA_MEM_SIZE);
2449
4225f2b6
AW
2450 vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2451 vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2452 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
e593c021 2453
182bca45
AW
2454 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2455 OBJECT(vdev), &vfio_vga_ops,
2456 &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
2457 "vfio-vga-io@0x3b0",
2458 QEMU_PCI_VGA_IO_LO_SIZE);
2459
4225f2b6
AW
2460 vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2461 vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2462 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
e593c021 2463
182bca45
AW
2464 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
2465 OBJECT(vdev), &vfio_vga_ops,
2466 &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
2467 "vfio-vga-io@0x3c0",
2468 QEMU_PCI_VGA_IO_HI_SIZE);
2469
2470 pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2471 &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2472 &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
2473
e593c021
AW
2474 return 0;
2475}
2476
e04cff9d 2477static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
65501a74 2478{
217e9fdc 2479 VFIODevice *vbasedev = &vdev->vbasedev;
46900226 2480 struct vfio_region_info *reg_info;
7b4b0e9e 2481 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
d13dd2d7 2482 int i, ret = -1;
65501a74
AW
2483
2484 /* Sanity check device */
d13dd2d7 2485 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2312d907 2486 error_setg(errp, "this isn't a PCI device");
e04cff9d 2487 return;
65501a74
AW
2488 }
2489
d13dd2d7 2490 if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2312d907
EA
2491 error_setg(errp, "unexpected number of io regions %u",
2492 vbasedev->num_regions);
e04cff9d 2493 return;
65501a74
AW
2494 }
2495
d13dd2d7 2496 if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2312d907 2497 error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
e04cff9d 2498 return;
65501a74
AW
2499 }
2500
2501 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
db0da029
AW
2502 char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
2503
2504 ret = vfio_region_setup(OBJECT(vdev), vbasedev,
2505 &vdev->bars[i].region, i, name);
2506 g_free(name);
2507
65501a74 2508 if (ret) {
2312d907 2509 error_setg_errno(errp, -ret, "failed to get region %d info", i);
e04cff9d 2510 return;
65501a74
AW
2511 }
2512
7076eabc 2513 QLIST_INIT(&vdev->bars[i].quirks);
46900226 2514 }
65501a74 2515
46900226
AW
2516 ret = vfio_get_region_info(vbasedev,
2517 VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
65501a74 2518 if (ret) {
2312d907 2519 error_setg_errno(errp, -ret, "failed to get config info");
e04cff9d 2520 return;
65501a74
AW
2521 }
2522
d13dd2d7 2523 trace_vfio_populate_device_config(vdev->vbasedev.name,
46900226
AW
2524 (unsigned long)reg_info->size,
2525 (unsigned long)reg_info->offset,
2526 (unsigned long)reg_info->flags);
65501a74 2527
46900226 2528 vdev->config_size = reg_info->size;
6a659bbf
AW
2529 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2530 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2531 }
46900226
AW
2532 vdev->config_offset = reg_info->offset;
2533
2534 g_free(reg_info);
65501a74 2535
e593c021 2536 if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2312d907 2537 ret = vfio_populate_vga(vdev, errp);
f15689c7 2538 if (ret) {
2312d907 2539 error_append_hint(errp, "device does not support "
cde4279b 2540 "requested feature x-vga\n");
e04cff9d 2541 return;
f15689c7 2542 }
f15689c7 2543 }
47cbe50c 2544
7b4b0e9e
VMP
2545 irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
2546
5546a621 2547 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
7b4b0e9e
VMP
2548 if (ret) {
2549 /* This can fail for an old kernel or legacy PCI dev */
772f1b37 2550 trace_vfio_populate_device_get_irq_info_failure(strerror(errno));
7b4b0e9e
VMP
2551 } else if (irq_info.count == 1) {
2552 vdev->pci_aer = true;
2553 } else {
e1eb292a
MA
2554 warn_report(VFIO_MSG_PREFIX
2555 "Could not enable error recovery for the device",
2556 vbasedev->name);
7b4b0e9e 2557 }
d13dd2d7
EA
2558}
2559
9ee27d73 2560static void vfio_put_device(VFIOPCIDevice *vdev)
65501a74 2561{
462037c9 2562 g_free(vdev->vbasedev.name);
db0da029
AW
2563 g_free(vdev->msix);
2564
d13dd2d7 2565 vfio_put_base_device(&vdev->vbasedev);
65501a74
AW
2566}
2567
7b4b0e9e
VMP
2568static void vfio_err_notifier_handler(void *opaque)
2569{
9ee27d73 2570 VFIOPCIDevice *vdev = opaque;
7b4b0e9e
VMP
2571
2572 if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2573 return;
2574 }
2575
2576 /*
2577 * TBD. Retrieve the error details and decide what action
2578 * needs to be taken. One of the actions could be to pass
2579 * the error to the guest and have the guest driver recover
2580 * from the error. This requires that PCIe capabilities be
2581 * exposed to the guest. For now, we just terminate the
2582 * guest to contain the error.
2583 */
2584
7df9381b 2585 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
7b4b0e9e 2586
ba29776f 2587 vm_stop(RUN_STATE_INTERNAL_ERROR);
7b4b0e9e
VMP
2588}
2589
2590/*
2591 * Registers error notifier for devices supporting error recovery.
2592 * If we encounter a failure in this function, we report an error
2593 * and continue after disabling error recovery support for the
2594 * device.
2595 */
9ee27d73 2596static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
7b4b0e9e 2597{
201a7331
EA
2598 Error *err = NULL;
2599 int32_t fd;
7b4b0e9e
VMP
2600
2601 if (!vdev->pci_aer) {
2602 return;
2603 }
2604
2605 if (event_notifier_init(&vdev->err_notifier, 0)) {
8fbf47c3 2606 error_report("vfio: Unable to init event notifier for error detection");
7b4b0e9e
VMP
2607 vdev->pci_aer = false;
2608 return;
2609 }
2610
201a7331
EA
2611 fd = event_notifier_get_fd(&vdev->err_notifier);
2612 qemu_set_fd_handler(fd, vfio_err_notifier_handler, NULL, vdev);
7b4b0e9e 2613
201a7331
EA
2614 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
2615 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
2616 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2617 qemu_set_fd_handler(fd, NULL, NULL, vdev);
7b4b0e9e
VMP
2618 event_notifier_cleanup(&vdev->err_notifier);
2619 vdev->pci_aer = false;
2620 }
7b4b0e9e
VMP
2621}
2622
9ee27d73 2623static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
7b4b0e9e 2624{
201a7331 2625 Error *err = NULL;
7b4b0e9e
VMP
2626
2627 if (!vdev->pci_aer) {
2628 return;
2629 }
2630
201a7331
EA
2631 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
2632 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
2633 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
7b4b0e9e 2634 }
7b4b0e9e
VMP
2635 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2636 NULL, NULL, vdev);
2637 event_notifier_cleanup(&vdev->err_notifier);
2638}
2639
47cbe50c
AW
2640static void vfio_req_notifier_handler(void *opaque)
2641{
2642 VFIOPCIDevice *vdev = opaque;
35c7cb4c 2643 Error *err = NULL;
47cbe50c
AW
2644
2645 if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2646 return;
2647 }
2648
a2596aee 2649 qdev_unplug(DEVICE(vdev), &err);
35c7cb4c 2650 if (err) {
e1eb292a 2651 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
35c7cb4c 2652 }
47cbe50c
AW
2653}
2654
2655static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2656{
2657 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
2658 .index = VFIO_PCI_REQ_IRQ_INDEX };
201a7331
EA
2659 Error *err = NULL;
2660 int32_t fd;
47cbe50c
AW
2661
2662 if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2663 return;
2664 }
2665
2666 if (ioctl(vdev->vbasedev.fd,
2667 VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
2668 return;
2669 }
2670
2671 if (event_notifier_init(&vdev->req_notifier, 0)) {
2672 error_report("vfio: Unable to init event notifier for device request");
2673 return;
2674 }
2675
201a7331
EA
2676 fd = event_notifier_get_fd(&vdev->req_notifier);
2677 qemu_set_fd_handler(fd, vfio_req_notifier_handler, NULL, vdev);
47cbe50c 2678
201a7331
EA
2679 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
2680 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
2681 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2682 qemu_set_fd_handler(fd, NULL, NULL, vdev);
47cbe50c
AW
2683 event_notifier_cleanup(&vdev->req_notifier);
2684 } else {
2685 vdev->req_enabled = true;
2686 }
47cbe50c
AW
2687}
2688
2689static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
2690{
201a7331 2691 Error *err = NULL;
47cbe50c
AW
2692
2693 if (!vdev->req_enabled) {
2694 return;
2695 }
2696
201a7331
EA
2697 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
2698 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
2699 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
47cbe50c 2700 }
47cbe50c
AW
2701 qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
2702 NULL, NULL, vdev);
2703 event_notifier_cleanup(&vdev->req_notifier);
2704
2705 vdev->req_enabled = false;
2706}
2707
1a22aca1 2708static void vfio_realize(PCIDevice *pdev, Error **errp)
65501a74 2709{
2683ccd5 2710 VFIOPCIDevice *vdev = PCI_VFIO(pdev);
b47d8efa 2711 VFIODevice *vbasedev_iter;
65501a74 2712 VFIOGroup *group;
238e9172 2713 char *tmp, *subsys, group_path[PATH_MAX], *group_name;
ec3bcf42 2714 Error *err = NULL;
65501a74
AW
2715 ssize_t len;
2716 struct stat st;
2717 int groupid;
581406e0 2718 int i, ret;
238e9172 2719 bool is_mdev;
65501a74 2720
7df9381b 2721 if (!vdev->vbasedev.sysfsdev) {
4a946268
EA
2722 if (!(~vdev->host.domain || ~vdev->host.bus ||
2723 ~vdev->host.slot || ~vdev->host.function)) {
2724 error_setg(errp, "No provided host device");
6e4e6f0d
DJS
2725 error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
2726 "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
4a946268
EA
2727 return;
2728 }
7df9381b
AW
2729 vdev->vbasedev.sysfsdev =
2730 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2731 vdev->host.domain, vdev->host.bus,
2732 vdev->host.slot, vdev->host.function);
2733 }
2734
2735 if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
1a22aca1 2736 error_setg_errno(errp, errno, "no such host device");
c3b8e3e0 2737 error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.sysfsdev);
1a22aca1 2738 return;
65501a74
AW
2739 }
2740
f045a010
JF
2741 if (!pdev->failover_pair_id) {
2742 error_setg(&vdev->migration_blocker,
2743 "VFIO device doesn't support migration");
2744 ret = migrate_add_blocker(vdev->migration_blocker, &err);
ed92369a 2745 if (ret) {
f045a010
JF
2746 error_propagate(errp, err);
2747 error_free(vdev->migration_blocker);
1335d643 2748 vdev->migration_blocker = NULL;
f045a010
JF
2749 return;
2750 }
2751 }
2752
3e015d81 2753 vdev->vbasedev.name = g_path_get_basename(vdev->vbasedev.sysfsdev);
b47d8efa 2754 vdev->vbasedev.ops = &vfio_pci_ops;
462037c9 2755 vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
a2596aee 2756 vdev->vbasedev.dev = DEVICE(vdev);
462037c9 2757
7df9381b
AW
2758 tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
2759 len = readlink(tmp, group_path, sizeof(group_path));
2760 g_free(tmp);
65501a74 2761
7df9381b 2762 if (len <= 0 || len >= sizeof(group_path)) {
1a22aca1
EA
2763 error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG,
2764 "no iommu_group found");
426ec904 2765 goto error;
65501a74
AW
2766 }
2767
7df9381b 2768 group_path[len] = 0;
65501a74 2769
7df9381b 2770 group_name = basename(group_path);
65501a74 2771 if (sscanf(group_name, "%d", &groupid) != 1) {
1a22aca1 2772 error_setg_errno(errp, errno, "failed to read %s", group_path);
426ec904 2773 goto error;
65501a74
AW
2774 }
2775
1a22aca1 2776 trace_vfio_realize(vdev->vbasedev.name, groupid);
65501a74 2777
1a22aca1 2778 group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp);
65501a74 2779 if (!group) {
426ec904 2780 goto error;
65501a74
AW
2781 }
2782
b47d8efa
EA
2783 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2784 if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
1a22aca1 2785 error_setg(errp, "device is already attached");
65501a74 2786 vfio_put_group(group);
426ec904 2787 goto error;
65501a74
AW
2788 }
2789 }
2790
238e9172 2791 /*
aff92b82 2792 * Mediated devices *might* operate compatibly with discarding of RAM, but
238e9172
AW
2793 * we cannot know for certain, it depends on whether the mdev vendor driver
2794 * stays in sync with the active working set of the guest driver. Prevent
2795 * the x-balloon-allowed option unless this is minimally an mdev device.
2796 */
2797 tmp = g_strdup_printf("%s/subsystem", vdev->vbasedev.sysfsdev);
2798 subsys = realpath(tmp, NULL);
2799 g_free(tmp);
a1c0f886 2800 is_mdev = subsys && (strcmp(subsys, "/sys/bus/mdev") == 0);
238e9172
AW
2801 free(subsys);
2802
2803 trace_vfio_mdev(vdev->vbasedev.name, is_mdev);
2804
aff92b82 2805 if (vdev->vbasedev.ram_block_discard_allowed && !is_mdev) {
238e9172
AW
2806 error_setg(errp, "x-balloon-allowed only potentially compatible "
2807 "with mdev devices");
2808 vfio_put_group(group);
2809 goto error;
2810 }
2811
1a22aca1 2812 ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp);
65501a74 2813 if (ret) {
65501a74 2814 vfio_put_group(group);
426ec904 2815 goto error;
65501a74
AW
2816 }
2817
e04cff9d
EA
2818 vfio_populate_device(vdev, &err);
2819 if (err) {
2820 error_propagate(errp, err);
2312d907 2821 goto error;
217e9fdc
PB
2822 }
2823
65501a74 2824 /* Get a copy of config space */
5546a621 2825 ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
65501a74
AW
2826 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
2827 vdev->config_offset);
2828 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
2829 ret = ret < 0 ? -errno : -EFAULT;
1a22aca1 2830 error_setg_errno(errp, -ret, "failed to read device config space");
426ec904 2831 goto error;
65501a74
AW
2832 }
2833
4b5d5e87
AW
2834 /* vfio emulates a lot for us, but some bits need extra love */
2835 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
2836
2837 /* QEMU can choose to expose the ROM or not */
2838 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
04f336b0
AW
2839 /* QEMU can also add or extend BARs */
2840 memset(vdev->emulated_config_bits + PCI_BASE_ADDRESS_0, 0xff, 6 * 4);
4b5d5e87 2841
89dcccc5
AW
2842 /*
2843 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
2844 * device ID is managed by the vendor and need only be a 16-bit value.
2845 * Allow any 16-bit value for subsystem so they can be hidden or changed.
2846 */
2847 if (vdev->vendor_id != PCI_ANY_ID) {
2848 if (vdev->vendor_id >= 0xffff) {
1a22aca1 2849 error_setg(errp, "invalid PCI vendor ID provided");
426ec904 2850 goto error;
89dcccc5
AW
2851 }
2852 vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
2853 trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
2854 } else {
2855 vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2856 }
2857
2858 if (vdev->device_id != PCI_ANY_ID) {
2859 if (vdev->device_id > 0xffff) {
1a22aca1 2860 error_setg(errp, "invalid PCI device ID provided");
426ec904 2861 goto error;
89dcccc5
AW
2862 }
2863 vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
2864 trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
2865 } else {
2866 vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2867 }
2868
2869 if (vdev->sub_vendor_id != PCI_ANY_ID) {
2870 if (vdev->sub_vendor_id > 0xffff) {
1a22aca1 2871 error_setg(errp, "invalid PCI subsystem vendor ID provided");
426ec904 2872 goto error;
89dcccc5
AW
2873 }
2874 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
2875 vdev->sub_vendor_id, ~0);
2876 trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
2877 vdev->sub_vendor_id);
2878 }
2879
2880 if (vdev->sub_device_id != PCI_ANY_ID) {
2881 if (vdev->sub_device_id > 0xffff) {
1a22aca1 2882 error_setg(errp, "invalid PCI subsystem device ID provided");
426ec904 2883 goto error;
89dcccc5
AW
2884 }
2885 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
2886 trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
2887 vdev->sub_device_id);
2888 }
ff635e37 2889
4b5d5e87
AW
2890 /* QEMU can change multi-function devices to single function, or reverse */
2891 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
2892 PCI_HEADER_TYPE_MULTI_FUNCTION;
2893
187d6232
AW
2894 /* Restore or clear multifunction, this is always controlled by QEMU */
2895 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
2896 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
2897 } else {
2898 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
2899 }
2900
65501a74
AW
2901 /*
2902 * Clear host resource mapping info. If we choose not to register a
2903 * BAR, such as might be the case with the option ROM, we can get
2904 * confusing, unwritable, residual addresses from the host here.
2905 */
2906 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
2907 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
2908
6f864e6e 2909 vfio_pci_size_rom(vdev);
65501a74 2910
89d5202e
AW
2911 vfio_bars_prepare(vdev);
2912
ec3bcf42
EA
2913 vfio_msix_early_setup(vdev, &err);
2914 if (err) {
2915 error_propagate(errp, err);
008d0e2d 2916 goto error;
65501a74
AW
2917 }
2918
3a286732 2919 vfio_bars_register(vdev);
65501a74 2920
1a22aca1 2921 ret = vfio_add_capabilities(vdev, errp);
65501a74
AW
2922 if (ret) {
2923 goto out_teardown;
2924 }
2925
182bca45
AW
2926 if (vdev->vga) {
2927 vfio_vga_quirk_setup(vdev);
2928 }
2929
581406e0
AW
2930 for (i = 0; i < PCI_ROM_SLOT; i++) {
2931 vfio_bar_quirk_setup(vdev, i);
2932 }
2933
6ced0bba
AW
2934 if (!vdev->igd_opregion &&
2935 vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
2936 struct vfio_region_info *opregion;
2937
2938 if (vdev->pdev.qdev.hotplugged) {
1a22aca1 2939 error_setg(errp,
426ec904
EA
2940 "cannot support IGD OpRegion feature on hotplugged "
2941 "device");
6ced0bba
AW
2942 goto out_teardown;
2943 }
2944
2945 ret = vfio_get_dev_region_info(&vdev->vbasedev,
2946 VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
2947 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
2948 if (ret) {
1a22aca1 2949 error_setg_errno(errp, -ret,
426ec904 2950 "does not support requested IGD OpRegion feature");
6ced0bba
AW
2951 goto out_teardown;
2952 }
2953
1a22aca1 2954 ret = vfio_pci_igd_opregion_init(vdev, opregion, errp);
6ced0bba
AW
2955 g_free(opregion);
2956 if (ret) {
6ced0bba
AW
2957 goto out_teardown;
2958 }
2959 }
2960
4b5d5e87
AW
2961 /* QEMU emulates all of MSI & MSIX */
2962 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
2963 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
2964 MSIX_CAP_LENGTH);
2965 }
2966
2967 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
2968 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
2969 vdev->msi_cap_size);
2970 }
2971
65501a74 2972 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
bc72ad67 2973 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
ea486926 2974 vfio_intx_mmap_enable, vdev);
ad54dbd8
DG
2975 pci_device_set_intx_routing_notifier(&vdev->pdev,
2976 vfio_intx_routing_notifier);
c5478fea
DG
2977 vdev->irqchip_change_notifier.notify = vfio_irqchip_change;
2978 kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier);
1a22aca1 2979 ret = vfio_intx_enable(vdev, errp);
65501a74 2980 if (ret) {
c5478fea 2981 goto out_deregister;
65501a74
AW
2982 }
2983 }
2984
a9994687
GH
2985 if (vdev->display != ON_OFF_AUTO_OFF) {
2986 ret = vfio_display_probe(vdev, errp);
2987 if (ret) {
c5478fea 2988 goto out_deregister;
a9994687
GH
2989 }
2990 }
b290659f
GH
2991 if (vdev->enable_ramfb && vdev->dpy == NULL) {
2992 error_setg(errp, "ramfb=on requires display=on");
c5478fea 2993 goto out_deregister;
b290659f 2994 }
c62a0c7c
GH
2995 if (vdev->display_xres || vdev->display_yres) {
2996 if (vdev->dpy == NULL) {
2997 error_setg(errp, "xres and yres properties require display=on");
c5478fea 2998 goto out_deregister;
c62a0c7c
GH
2999 }
3000 if (vdev->dpy->edid_regs == NULL) {
3001 error_setg(errp, "xres and yres properties need edid support");
c5478fea 3002 goto out_deregister;
c62a0c7c
GH
3003 }
3004 }
a9994687 3005
ec132efa
AK
3006 if (vdev->vendor_id == PCI_VENDOR_ID_NVIDIA) {
3007 ret = vfio_pci_nvidia_v100_ram_init(vdev, errp);
3008 if (ret && ret != -ENODEV) {
3009 error_report("Failed to setup NVIDIA V100 GPU RAM");
3010 }
3011 }
3012
3013 if (vdev->vendor_id == PCI_VENDOR_ID_IBM) {
3014 ret = vfio_pci_nvlink2_init(vdev, errp);
3015 if (ret && ret != -ENODEV) {
3016 error_report("Failed to setup NVlink2 bridge");
3017 }
3018 }
3019
7b4b0e9e 3020 vfio_register_err_notifier(vdev);
47cbe50c 3021 vfio_register_req_notifier(vdev);
c9c50009 3022 vfio_setup_resetfn_quirk(vdev);
c29029dd 3023
1a22aca1 3024 return;
65501a74 3025
c5478fea 3026out_deregister:
65501a74 3027 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
c5478fea
DG
3028 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
3029out_teardown:
65501a74 3030 vfio_teardown_msi(vdev);
2d82f8a3 3031 vfio_bars_exit(vdev);
426ec904 3032error:
c3b8e3e0 3033 error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name);
f045a010
JF
3034 if (vdev->migration_blocker) {
3035 migrate_del_blocker(vdev->migration_blocker);
3036 error_free(vdev->migration_blocker);
1335d643 3037 vdev->migration_blocker = NULL;
f045a010 3038 }
77a10d04
PB
3039}
3040
3041static void vfio_instance_finalize(Object *obj)
3042{
2683ccd5 3043 VFIOPCIDevice *vdev = PCI_VFIO(obj);
77a10d04
PB
3044 VFIOGroup *group = vdev->vbasedev.group;
3045
a9994687 3046 vfio_display_finalize(vdev);
2d82f8a3 3047 vfio_bars_finalize(vdev);
4b5d5e87 3048 g_free(vdev->emulated_config_bits);
77a10d04 3049 g_free(vdev->rom);
f045a010
JF
3050 if (vdev->migration_blocker) {
3051 migrate_del_blocker(vdev->migration_blocker);
3052 error_free(vdev->migration_blocker);
3053 }
c4c45e94
AW
3054 /*
3055 * XXX Leaking igd_opregion is not an oversight, we can't remove the
3056 * fw_cfg entry therefore leaking this allocation seems like the safest
3057 * option.
3058 *
3059 * g_free(vdev->igd_opregion);
3060 */
65501a74
AW
3061 vfio_put_device(vdev);
3062 vfio_put_group(group);
65501a74
AW
3063}
3064
3065static void vfio_exitfn(PCIDevice *pdev)
3066{
2683ccd5 3067 VFIOPCIDevice *vdev = PCI_VFIO(pdev);
65501a74 3068
47cbe50c 3069 vfio_unregister_req_notifier(vdev);
7b4b0e9e 3070 vfio_unregister_err_notifier(vdev);
65501a74 3071 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
0446f812
PX
3072 if (vdev->irqchip_change_notifier.notify) {
3073 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
3074 }
65501a74 3075 vfio_disable_interrupts(vdev);
ea486926 3076 if (vdev->intx.mmap_timer) {
bc72ad67 3077 timer_free(vdev->intx.mmap_timer);
ea486926 3078 }
65501a74 3079 vfio_teardown_msi(vdev);
2d82f8a3 3080 vfio_bars_exit(vdev);
65501a74
AW
3081}
3082
3083static void vfio_pci_reset(DeviceState *dev)
3084{
2683ccd5 3085 VFIOPCIDevice *vdev = PCI_VFIO(dev);
65501a74 3086
df92ee44 3087 trace_vfio_pci_reset(vdev->vbasedev.name);
5834a83f 3088
f16f39c3 3089 vfio_pci_pre_reset(vdev);
ba661818 3090
8983e3e3
TZ
3091 if (vdev->display != ON_OFF_AUTO_OFF) {
3092 vfio_display_reset(vdev);
3093 }
3094
5655f931
AW
3095 if (vdev->resetfn && !vdev->resetfn(vdev)) {
3096 goto post_reset;
3097 }
3098
b47d8efa
EA
3099 if (vdev->vbasedev.reset_works &&
3100 (vdev->has_flr || !vdev->has_pm_reset) &&
5546a621 3101 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
df92ee44 3102 trace_vfio_pci_reset_flr(vdev->vbasedev.name);
f16f39c3 3103 goto post_reset;
ba661818
AW
3104 }
3105
f16f39c3
AW
3106 /* See if we can do our own bus reset */
3107 if (!vfio_pci_hot_reset_one(vdev)) {
3108 goto post_reset;
3109 }
5834a83f 3110
f16f39c3 3111 /* If nothing else works and the device supports PM reset, use it */
b47d8efa 3112 if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
5546a621 3113 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
df92ee44 3114 trace_vfio_pci_reset_pm(vdev->vbasedev.name);
f16f39c3 3115 goto post_reset;
65501a74 3116 }
5834a83f 3117
f16f39c3
AW
3118post_reset:
3119 vfio_pci_post_reset(vdev);
65501a74
AW
3120}
3121
abc5b3bf
GA
3122static void vfio_instance_init(Object *obj)
3123{
3124 PCIDevice *pci_dev = PCI_DEVICE(obj);
2683ccd5 3125 VFIOPCIDevice *vdev = PCI_VFIO(obj);
abc5b3bf
GA
3126
3127 device_add_bootindex_property(obj, &vdev->bootindex,
3128 "bootindex", NULL,
40c2281c 3129 &pci_dev->qdev);
4a946268
EA
3130 vdev->host.domain = ~0U;
3131 vdev->host.bus = ~0U;
3132 vdev->host.slot = ~0U;
3133 vdev->host.function = ~0U;
dfbee78d
AW
3134
3135 vdev->nv_gpudirect_clique = 0xFF;
d61a363d
YB
3136
3137 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
3138 * line, therefore, no need to wait to realize like other devices */
3139 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
abc5b3bf
GA
3140}
3141
65501a74 3142static Property vfio_pci_dev_properties[] = {
9ee27d73 3143 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
7df9381b 3144 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
a9994687 3145 DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice,
8151a9c5 3146 display, ON_OFF_AUTO_OFF),
c62a0c7c
GH
3147 DEFINE_PROP_UINT32("xres", VFIOPCIDevice, display_xres, 0),
3148 DEFINE_PROP_UINT32("yres", VFIOPCIDevice, display_yres, 0),
9ee27d73 3149 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
ea486926 3150 intx.mmap_timeout, 1100),
9ee27d73 3151 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
f15689c7 3152 VFIO_FEATURE_ENABLE_VGA_BIT, false),
47cbe50c
AW
3153 DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
3154 VFIO_FEATURE_ENABLE_REQ_BIT, true),
6ced0bba
AW
3155 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
3156 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
5e15d79b 3157 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
238e9172 3158 DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
aff92b82 3159 vbasedev.ram_block_discard_allowed, false),
46746dba
AW
3160 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
3161 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
3162 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
db32d0f4
AW
3163 DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice,
3164 no_geforce_quirks, false),
c958c51d
AW
3165 DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice, no_kvm_ioeventfd,
3166 false),
2b1dbd0d
AW
3167 DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice, no_vfio_ioeventfd,
3168 false),
89dcccc5
AW
3169 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
3170 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
3171 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
3172 sub_vendor_id, PCI_ANY_ID),
3173 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
3174 sub_device_id, PCI_ANY_ID),
c4c45e94 3175 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
dfbee78d
AW
3176 DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice,
3177 nv_gpudirect_clique,
3178 qdev_prop_nv_gpudirect_clique, uint8_t),
89d5202e
AW
3179 DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo,
3180 OFF_AUTOPCIBAR_OFF),
65501a74
AW
3181 /*
3182 * TODO - support passed fds... is this necessary?
9ee27d73
EA
3183 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
3184 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
65501a74
AW
3185 */
3186 DEFINE_PROP_END_OF_LIST(),
3187};
3188
65501a74
AW
3189static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
3190{
3191 DeviceClass *dc = DEVICE_CLASS(klass);
3192 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
3193
3194 dc->reset = vfio_pci_reset;
4f67d30b 3195 device_class_set_props(dc, vfio_pci_dev_properties);
d9f0e638 3196 dc->desc = "VFIO-based PCI device assignment";
125ee0ed 3197 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1a22aca1 3198 pdc->realize = vfio_realize;
65501a74
AW
3199 pdc->exit = vfio_exitfn;
3200 pdc->config_read = vfio_pci_read_config;
3201 pdc->config_write = vfio_pci_write_config;
3202}
3203
3204static const TypeInfo vfio_pci_dev_info = {
2683ccd5 3205 .name = TYPE_VFIO_PCI,
65501a74 3206 .parent = TYPE_PCI_DEVICE,
9ee27d73 3207 .instance_size = sizeof(VFIOPCIDevice),
65501a74 3208 .class_init = vfio_pci_dev_class_init,
abc5b3bf 3209 .instance_init = vfio_instance_init,
77a10d04 3210 .instance_finalize = vfio_instance_finalize,
a5fa336f
EH
3211 .interfaces = (InterfaceInfo[]) {
3212 { INTERFACE_PCIE_DEVICE },
3213 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
3214 { }
3215 },
65501a74
AW
3216};
3217
b290659f
GH
3218static Property vfio_pci_dev_nohotplug_properties[] = {
3219 DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice, enable_ramfb, false),
3220 DEFINE_PROP_END_OF_LIST(),
3221};
3222
3223static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass, void *data)
3224{
3225 DeviceClass *dc = DEVICE_CLASS(klass);
3226
4f67d30b 3227 device_class_set_props(dc, vfio_pci_dev_nohotplug_properties);
b290659f
GH
3228 dc->hotpluggable = false;
3229}
3230
3231static const TypeInfo vfio_pci_nohotplug_dev_info = {
f75ca627 3232 .name = TYPE_VFIO_PCI_NOHOTPLUG,
0c0c8f8a 3233 .parent = TYPE_VFIO_PCI,
b290659f
GH
3234 .instance_size = sizeof(VFIOPCIDevice),
3235 .class_init = vfio_pci_nohotplug_dev_class_init,
3236};
3237
65501a74
AW
3238static void register_vfio_pci_dev_type(void)
3239{
3240 type_register_static(&vfio_pci_dev_info);
b290659f 3241 type_register_static(&vfio_pci_nohotplug_dev_info);
65501a74
AW
3242}
3243
3244type_init(register_vfio_pci_dev_type)