]> git.proxmox.com Git - mirror_qemu.git/blame - hw/vfio/pci.c
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
[mirror_qemu.git] / hw / vfio / pci.c
CommitLineData
65501a74
AW
1/*
2 * vfio based device assignment support
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
c6eacb1a 21#include "qemu/osdep.h"
6dcfdbad 22#include <linux/vfio.h>
65501a74 23#include <sys/ioctl.h>
65501a74 24
650d103d 25#include "hw/hw.h"
83c9f4ca
PB
26#include "hw/pci/msi.h"
27#include "hw/pci/msix.h"
0282abf0 28#include "hw/pci/pci_bridge.h"
a27bd6c7 29#include "hw/qdev-properties.h"
ce35e229 30#include "hw/qdev-properties-system.h"
d6454270 31#include "migration/vmstate.h"
f3558b1b 32#include "qapi/qmp/qdict.h"
1de7afc9 33#include "qemu/error-report.h"
db725815 34#include "qemu/main-loop.h"
0b8fa32f 35#include "qemu/module.h"
1de7afc9 36#include "qemu/range.h"
e0255bb1 37#include "qemu/units.h"
6dcfdbad 38#include "sysemu/kvm.h"
54d31236 39#include "sysemu/runstate.h"
78f33d2b 40#include "pci.h"
385f57cf 41#include "trace.h"
1108b2f8 42#include "qapi/error.h"
f045a010 43#include "migration/blocker.h"
c5e2fb3c 44#include "migration/qemu-file.h"
4b943029 45
f75ca627 46#define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
0c0c8f8a 47
9ee27d73 48static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
9ee27d73 49static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
65501a74 50
ea486926
AW
51/*
52 * Disabling BAR mmaping can be slow, but toggling it around INTx can
53 * also be a huge overhead. We try to get the best of both worlds by
54 * waiting until an interrupt to disable mmaps (subsequent transitions
55 * to the same state are effectively no overhead). If the interrupt has
56 * been serviced and the time gap is long enough, we re-enable mmaps for
57 * performance. This works well for things like graphics cards, which
58 * may not use their interrupt at all and are penalized to an unusable
59 * level by read/write BAR traps. Other devices, like NICs, have more
60 * regular interrupts and see much better latency by staying in non-mmap
61 * mode. We therefore set the default mmap_timeout such that a ping
62 * is just enough to keep the mmap disabled. Users can experiment with
63 * other options with the x-intx-mmap-timeout-ms parameter (a value of
64 * zero disables the timer).
65 */
66static void vfio_intx_mmap_enable(void *opaque)
67{
9ee27d73 68 VFIOPCIDevice *vdev = opaque;
ea486926
AW
69
70 if (vdev->intx.pending) {
bc72ad67
AB
71 timer_mod(vdev->intx.mmap_timer,
72 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
ea486926
AW
73 return;
74 }
75
76 vfio_mmap_set_enabled(vdev, true);
77}
78
65501a74
AW
79static void vfio_intx_interrupt(void *opaque)
80{
9ee27d73 81 VFIOPCIDevice *vdev = opaque;
65501a74
AW
82
83 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
84 return;
85 }
86
df92ee44 87 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
65501a74
AW
88
89 vdev->intx.pending = true;
68919cac 90 pci_irq_assert(&vdev->pdev);
ea486926
AW
91 vfio_mmap_set_enabled(vdev, false);
92 if (vdev->intx.mmap_timeout) {
bc72ad67
AB
93 timer_mod(vdev->intx.mmap_timer,
94 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
ea486926 95 }
65501a74
AW
96}
97
870cb6f1 98static void vfio_intx_eoi(VFIODevice *vbasedev)
65501a74 99{
a664477d
EA
100 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
101
65501a74
AW
102 if (!vdev->intx.pending) {
103 return;
104 }
105
870cb6f1 106 trace_vfio_intx_eoi(vbasedev->name);
65501a74
AW
107
108 vdev->intx.pending = false;
68919cac 109 pci_irq_deassert(&vdev->pdev);
a664477d 110 vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
65501a74
AW
111}
112
7dfb3424 113static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
e1d1e586
AW
114{
115#ifdef CONFIG_KVM
97a37576 116 int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt);
e1d1e586 117
46746dba 118 if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
e1d1e586 119 vdev->intx.route.mode != PCI_INTX_ENABLED ||
9fc0e2d8 120 !kvm_resamplefds_enabled()) {
e1d1e586
AW
121 return;
122 }
123
124 /* Get to a known interrupt state */
97a37576 125 qemu_set_fd_handler(irq_fd, NULL, NULL, vdev);
5546a621 126 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586 127 vdev->intx.pending = false;
68919cac 128 pci_irq_deassert(&vdev->pdev);
e1d1e586
AW
129
130 /* Get an eventfd for resample/unmask */
131 if (event_notifier_init(&vdev->intx.unmask, 0)) {
7dfb3424 132 error_setg(errp, "event_notifier_init failed eoi");
e1d1e586
AW
133 goto fail;
134 }
135
97a37576
PX
136 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
137 &vdev->intx.interrupt,
138 &vdev->intx.unmask,
139 vdev->intx.route.irq)) {
7dfb3424 140 error_setg_errno(errp, errno, "failed to setup resample irqfd");
e1d1e586
AW
141 goto fail_irqfd;
142 }
143
201a7331
EA
144 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
145 VFIO_IRQ_SET_ACTION_UNMASK,
97a37576 146 event_notifier_get_fd(&vdev->intx.unmask),
668f62ec 147 errp)) {
e1d1e586
AW
148 goto fail_vfio;
149 }
150
151 /* Let'em rip */
5546a621 152 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586
AW
153
154 vdev->intx.kvm_accel = true;
155
870cb6f1 156 trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
e1d1e586
AW
157
158 return;
159
160fail_vfio:
97a37576
PX
161 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
162 vdev->intx.route.irq);
e1d1e586
AW
163fail_irqfd:
164 event_notifier_cleanup(&vdev->intx.unmask);
165fail:
97a37576 166 qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev);
5546a621 167 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586
AW
168#endif
169}
170
870cb6f1 171static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
e1d1e586
AW
172{
173#ifdef CONFIG_KVM
e1d1e586
AW
174 if (!vdev->intx.kvm_accel) {
175 return;
176 }
177
178 /*
179 * Get to a known state, hardware masked, QEMU ready to accept new
180 * interrupts, QEMU IRQ de-asserted.
181 */
5546a621 182 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586 183 vdev->intx.pending = false;
68919cac 184 pci_irq_deassert(&vdev->pdev);
e1d1e586
AW
185
186 /* Tell KVM to stop listening for an INTx irqfd */
97a37576
PX
187 if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
188 vdev->intx.route.irq)) {
312fd5f2 189 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
e1d1e586
AW
190 }
191
192 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
193 event_notifier_cleanup(&vdev->intx.unmask);
194
195 /* QEMU starts listening for interrupt events. */
97a37576
PX
196 qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt),
197 vfio_intx_interrupt, NULL, vdev);
e1d1e586
AW
198
199 vdev->intx.kvm_accel = false;
200
201 /* If we've missed an event, let it re-fire through QEMU */
5546a621 202 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586 203
870cb6f1 204 trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
e1d1e586
AW
205#endif
206}
207
ad54dbd8 208static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route)
e1d1e586 209{
7dfb3424 210 Error *err = NULL;
e1d1e586 211
870cb6f1 212 trace_vfio_intx_update(vdev->vbasedev.name,
ad54dbd8 213 vdev->intx.route.irq, route->irq);
e1d1e586 214
870cb6f1 215 vfio_intx_disable_kvm(vdev);
e1d1e586 216
ad54dbd8 217 vdev->intx.route = *route;
e1d1e586 218
ad54dbd8 219 if (route->mode != PCI_INTX_ENABLED) {
e1d1e586
AW
220 return;
221 }
222
7dfb3424
EA
223 vfio_intx_enable_kvm(vdev, &err);
224 if (err) {
e1eb292a 225 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
7dfb3424 226 }
e1d1e586
AW
227
228 /* Re-enable the interrupt in cased we missed an EOI */
870cb6f1 229 vfio_intx_eoi(&vdev->vbasedev);
e1d1e586
AW
230}
231
ad54dbd8
DG
232static void vfio_intx_routing_notifier(PCIDevice *pdev)
233{
01b46064 234 VFIOPCIDevice *vdev = VFIO_PCI(pdev);
ad54dbd8
DG
235 PCIINTxRoute route;
236
237 if (vdev->interrupt != VFIO_INT_INTx) {
238 return;
239 }
240
241 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
242
243 if (pci_intx_route_changed(&vdev->intx.route, &route)) {
244 vfio_intx_update(vdev, &route);
245 }
246}
247
c5478fea
DG
248static void vfio_irqchip_change(Notifier *notify, void *data)
249{
250 VFIOPCIDevice *vdev = container_of(notify, VFIOPCIDevice,
251 irqchip_change_notifier);
252
253 vfio_intx_update(vdev, &vdev->intx.route);
254}
255
7dfb3424 256static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
65501a74 257{
65501a74 258 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
7dfb3424 259 Error *err = NULL;
201a7331
EA
260 int32_t fd;
261 int ret;
262
65501a74 263
ea486926 264 if (!pin) {
65501a74
AW
265 return 0;
266 }
267
268 vfio_disable_interrupts(vdev);
269
270 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
68919cac 271 pci_config_set_interrupt_pin(vdev->pdev.config, pin);
e1d1e586
AW
272
273#ifdef CONFIG_KVM
274 /*
275 * Only conditional to avoid generating error messages on platforms
276 * where we won't actually use the result anyway.
277 */
9fc0e2d8 278 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
e1d1e586
AW
279 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
280 vdev->intx.pin);
281 }
282#endif
283
65501a74
AW
284 ret = event_notifier_init(&vdev->intx.interrupt, 0);
285 if (ret) {
7dfb3424 286 error_setg_errno(errp, -ret, "event_notifier_init failed");
65501a74
AW
287 return ret;
288 }
201a7331
EA
289 fd = event_notifier_get_fd(&vdev->intx.interrupt);
290 qemu_set_fd_handler(fd, vfio_intx_interrupt, NULL, vdev);
65501a74 291
201a7331 292 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
af175e85 293 VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
201a7331 294 qemu_set_fd_handler(fd, NULL, NULL, vdev);
ce59af2d 295 event_notifier_cleanup(&vdev->intx.interrupt);
201a7331 296 return -errno;
65501a74
AW
297 }
298
7dfb3424
EA
299 vfio_intx_enable_kvm(vdev, &err);
300 if (err) {
e1eb292a 301 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
7dfb3424 302 }
e1d1e586 303
65501a74
AW
304 vdev->interrupt = VFIO_INT_INTx;
305
870cb6f1 306 trace_vfio_intx_enable(vdev->vbasedev.name);
201a7331 307 return 0;
65501a74
AW
308}
309
870cb6f1 310static void vfio_intx_disable(VFIOPCIDevice *vdev)
65501a74
AW
311{
312 int fd;
313
bc72ad67 314 timer_del(vdev->intx.mmap_timer);
870cb6f1 315 vfio_intx_disable_kvm(vdev);
5546a621 316 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
65501a74 317 vdev->intx.pending = false;
68919cac 318 pci_irq_deassert(&vdev->pdev);
65501a74
AW
319 vfio_mmap_set_enabled(vdev, true);
320
321 fd = event_notifier_get_fd(&vdev->intx.interrupt);
322 qemu_set_fd_handler(fd, NULL, NULL, vdev);
323 event_notifier_cleanup(&vdev->intx.interrupt);
324
325 vdev->interrupt = VFIO_INT_NONE;
326
870cb6f1 327 trace_vfio_intx_disable(vdev->vbasedev.name);
65501a74
AW
328}
329
330/*
331 * MSI/X
332 */
333static void vfio_msi_interrupt(void *opaque)
334{
335 VFIOMSIVector *vector = opaque;
9ee27d73 336 VFIOPCIDevice *vdev = vector->vdev;
0de70dc7
AW
337 MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
338 void (*notify)(PCIDevice *dev, unsigned vector);
339 MSIMessage msg;
65501a74
AW
340 int nr = vector - vdev->msi_vectors;
341
342 if (!event_notifier_test_and_clear(&vector->interrupt)) {
343 return;
344 }
345
b3ebc10c 346 if (vdev->interrupt == VFIO_INT_MSIX) {
0de70dc7
AW
347 get_msg = msix_get_message;
348 notify = msix_notify;
95239e16
AW
349
350 /* A masked vector firing needs to use the PBA, enable it */
351 if (msix_is_masked(&vdev->pdev, nr)) {
352 set_bit(nr, vdev->msix->pending);
353 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
354 trace_vfio_msix_pba_enable(vdev->vbasedev.name);
355 }
9035f8c0 356 } else if (vdev->interrupt == VFIO_INT_MSI) {
0de70dc7
AW
357 get_msg = msi_get_message;
358 notify = msi_notify;
b3ebc10c
AW
359 } else {
360 abort();
361 }
362
0de70dc7 363 msg = get_msg(&vdev->pdev, nr);
bc5baffa 364 trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
0de70dc7 365 notify(&vdev->pdev, nr);
65501a74
AW
366}
367
9ee27d73 368static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
65501a74
AW
369{
370 struct vfio_irq_set *irq_set;
371 int ret = 0, i, argsz;
372 int32_t *fds;
373
374 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
375
376 irq_set = g_malloc0(argsz);
377 irq_set->argsz = argsz;
378 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
379 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
380 irq_set->start = 0;
381 irq_set->count = vdev->nr_vectors;
382 fds = (int32_t *)&irq_set->data;
383
384 for (i = 0; i < vdev->nr_vectors; i++) {
c048be5c
AW
385 int fd = -1;
386
387 /*
388 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
389 * bits, therefore we always use the KVM signaling path when setup.
390 * MSI-X mask and pending bits are emulated, so we want to use the
391 * KVM signaling path only when configured and unmasked.
392 */
393 if (vdev->msi_vectors[i].use) {
394 if (vdev->msi_vectors[i].virq < 0 ||
395 (msix && msix_is_masked(&vdev->pdev, i))) {
396 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
397 } else {
398 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
399 }
65501a74 400 }
c048be5c
AW
401
402 fds[i] = fd;
65501a74
AW
403 }
404
5546a621 405 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
65501a74
AW
406
407 g_free(irq_set);
408
65501a74
AW
409 return ret;
410}
411
46746dba 412static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
d1f6af6a 413 int vector_n, bool msix)
f4d45d47 414{
def4c557 415 KVMRouteChange c;
f4d45d47
AW
416 int virq;
417
d1f6af6a 418 if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
f4d45d47
AW
419 return;
420 }
421
422 if (event_notifier_init(&vector->kvm_interrupt, 0)) {
423 return;
424 }
425
def4c557
LM
426 c = kvm_irqchip_begin_route_changes(kvm_state);
427 virq = kvm_irqchip_add_msi_route(&c, vector_n, &vdev->pdev);
f4d45d47
AW
428 if (virq < 0) {
429 event_notifier_cleanup(&vector->kvm_interrupt);
430 return;
431 }
def4c557 432 kvm_irqchip_commit_route_changes(&c);
f4d45d47 433
1c9b71a7 434 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
f4d45d47
AW
435 NULL, virq) < 0) {
436 kvm_irqchip_release_virq(kvm_state, virq);
437 event_notifier_cleanup(&vector->kvm_interrupt);
438 return;
439 }
440
f4d45d47
AW
441 vector->virq = virq;
442}
443
444static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
445{
1c9b71a7
EA
446 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
447 vector->virq);
f4d45d47
AW
448 kvm_irqchip_release_virq(kvm_state, vector->virq);
449 vector->virq = -1;
450 event_notifier_cleanup(&vector->kvm_interrupt);
451}
452
dc9f06ca
PF
453static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
454 PCIDevice *pdev)
f4d45d47 455{
dc9f06ca 456 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
3f1fea0f 457 kvm_irqchip_commit_routes(kvm_state);
f4d45d47
AW
458}
459
b0223e29
AW
460static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
461 MSIMessage *msg, IOHandler *handler)
65501a74 462{
01b46064 463 VFIOPCIDevice *vdev = VFIO_PCI(pdev);
65501a74
AW
464 VFIOMSIVector *vector;
465 int ret;
466
df92ee44 467 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
65501a74 468
65501a74 469 vector = &vdev->msi_vectors[nr];
65501a74 470
f4d45d47
AW
471 if (!vector->use) {
472 vector->vdev = vdev;
473 vector->virq = -1;
474 if (event_notifier_init(&vector->interrupt, 0)) {
475 error_report("vfio: Error: event_notifier_init failed");
476 }
477 vector->use = true;
478 msix_vector_use(pdev, nr);
65501a74
AW
479 }
480
f4d45d47
AW
481 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
482 handler, NULL, vector);
483
65501a74
AW
484 /*
485 * Attempt to enable route through KVM irqchip,
486 * default to userspace handling if unavailable.
487 */
f4d45d47
AW
488 if (vector->virq >= 0) {
489 if (!msg) {
490 vfio_remove_kvm_msi_virq(vector);
491 } else {
dc9f06ca 492 vfio_update_kvm_msi_virq(vector, *msg, pdev);
65501a74 493 }
f4d45d47 494 } else {
6d17a018
DG
495 if (msg) {
496 vfio_add_kvm_msi_virq(vdev, vector, nr, true);
497 }
65501a74
AW
498 }
499
500 /*
501 * We don't want to have the host allocate all possible MSI vectors
502 * for a device if they're not in use, so we shutdown and incrementally
503 * increase them as needed.
504 */
505 if (vdev->nr_vectors < nr + 1) {
5546a621 506 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
65501a74
AW
507 vdev->nr_vectors = nr + 1;
508 ret = vfio_enable_vectors(vdev, true);
509 if (ret) {
312fd5f2 510 error_report("vfio: failed to enable vectors, %d", ret);
65501a74 511 }
65501a74 512 } else {
201a7331
EA
513 Error *err = NULL;
514 int32_t fd;
1a403133 515
f4d45d47 516 if (vector->virq >= 0) {
201a7331 517 fd = event_notifier_get_fd(&vector->kvm_interrupt);
f4d45d47 518 } else {
201a7331 519 fd = event_notifier_get_fd(&vector->interrupt);
f4d45d47 520 }
1a403133 521
201a7331
EA
522 if (vfio_set_irq_signaling(&vdev->vbasedev,
523 VFIO_PCI_MSIX_IRQ_INDEX, nr,
524 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
525 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
65501a74 526 }
65501a74
AW
527 }
528
95239e16
AW
529 /* Disable PBA emulation when nothing more is pending. */
530 clear_bit(nr, vdev->msix->pending);
531 if (find_first_bit(vdev->msix->pending,
532 vdev->nr_vectors) == vdev->nr_vectors) {
533 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
534 trace_vfio_msix_pba_disable(vdev->vbasedev.name);
535 }
536
65501a74
AW
537 return 0;
538}
539
b0223e29
AW
540static int vfio_msix_vector_use(PCIDevice *pdev,
541 unsigned int nr, MSIMessage msg)
542{
543 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
544}
545
65501a74
AW
546static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
547{
01b46064 548 VFIOPCIDevice *vdev = VFIO_PCI(pdev);
65501a74 549 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
65501a74 550
df92ee44 551 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
65501a74
AW
552
553 /*
f4d45d47
AW
554 * There are still old guests that mask and unmask vectors on every
555 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
556 * the KVM setup in place, simply switch VFIO to use the non-bypass
557 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
558 * core will mask the interrupt and set pending bits, allowing it to
559 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
65501a74 560 */
f4d45d47 561 if (vector->virq >= 0) {
201a7331 562 int32_t fd = event_notifier_get_fd(&vector->interrupt);
5053bd78 563 Error *err = NULL;
1a403133 564
5053bd78
EA
565 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr,
566 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
567 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
568 }
65501a74 569 }
65501a74
AW
570}
571
0de70dc7 572static void vfio_msix_enable(VFIOPCIDevice *vdev)
fd704adc 573{
ecebe53f
SL
574 PCIDevice *pdev = &vdev->pdev;
575 unsigned int nr, max_vec = 0;
576
fd704adc
AW
577 vfio_disable_interrupts(vdev);
578
bdd81add 579 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
fd704adc
AW
580
581 vdev->interrupt = VFIO_INT_MSIX;
582
b0223e29
AW
583 /*
584 * Some communication channels between VF & PF or PF & fw rely on the
585 * physical state of the device and expect that enabling MSI-X from the
586 * guest enables the same on the host. When our guest is Linux, the
587 * guest driver call to pci_enable_msix() sets the enabling bit in the
588 * MSI-X capability, but leaves the vector table masked. We therefore
589 * can't rely on a vector_use callback (from request_irq() in the guest)
590 * to switch the physical device into MSI-X mode because that may come a
591 * long time after pci_enable_msix(). This code enables vector 0 with
592 * triggering to userspace, then immediately release the vector, leaving
593 * the physical device with no vectors enabled, but MSI-X enabled, just
594 * like the guest view.
ecebe53f
SL
595 * If there are already unmasked vectors (in migration resume phase and
596 * some guest startups) which will be enabled soon, we can allocate all
597 * of them here to avoid inefficiently disabling and enabling vectors
598 * repeatedly later.
b0223e29 599 */
ecebe53f
SL
600 if (!pdev->msix_function_masked) {
601 for (nr = 0; nr < msix_nr_vectors_allocated(pdev); nr++) {
602 if (!msix_is_masked(pdev, nr)) {
603 max_vec = nr;
604 }
605 }
606 }
607 vfio_msix_vector_do_use(pdev, max_vec, NULL, NULL);
608 vfio_msix_vector_release(pdev, max_vec);
b0223e29 609
ecebe53f 610 if (msix_set_vector_notifiers(pdev, vfio_msix_vector_use,
bbef882c 611 vfio_msix_vector_release, NULL)) {
312fd5f2 612 error_report("vfio: msix_set_vector_notifiers failed");
fd704adc
AW
613 }
614
0de70dc7 615 trace_vfio_msix_enable(vdev->vbasedev.name);
fd704adc
AW
616}
617
0de70dc7 618static void vfio_msi_enable(VFIOPCIDevice *vdev)
65501a74
AW
619{
620 int ret, i;
621
622 vfio_disable_interrupts(vdev);
623
624 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
625retry:
bdd81add 626 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
65501a74
AW
627
628 for (i = 0; i < vdev->nr_vectors; i++) {
65501a74
AW
629 VFIOMSIVector *vector = &vdev->msi_vectors[i];
630
631 vector->vdev = vdev;
f4d45d47 632 vector->virq = -1;
65501a74
AW
633 vector->use = true;
634
635 if (event_notifier_init(&vector->interrupt, 0)) {
312fd5f2 636 error_report("vfio: Error: event_notifier_init failed");
65501a74
AW
637 }
638
f4d45d47
AW
639 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
640 vfio_msi_interrupt, NULL, vector);
641
65501a74
AW
642 /*
643 * Attempt to enable route through KVM irqchip,
644 * default to userspace handling if unavailable.
645 */
d1f6af6a 646 vfio_add_kvm_msi_virq(vdev, vector, i, false);
65501a74
AW
647 }
648
f4d45d47
AW
649 /* Set interrupt type prior to possible interrupts */
650 vdev->interrupt = VFIO_INT_MSI;
651
65501a74
AW
652 ret = vfio_enable_vectors(vdev, false);
653 if (ret) {
654 if (ret < 0) {
312fd5f2 655 error_report("vfio: Error: Failed to setup MSI fds: %m");
65501a74
AW
656 } else if (ret != vdev->nr_vectors) {
657 error_report("vfio: Error: Failed to enable %d "
312fd5f2 658 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
65501a74
AW
659 }
660
661 for (i = 0; i < vdev->nr_vectors; i++) {
662 VFIOMSIVector *vector = &vdev->msi_vectors[i];
663 if (vector->virq >= 0) {
f4d45d47 664 vfio_remove_kvm_msi_virq(vector);
65501a74 665 }
f4d45d47
AW
666 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
667 NULL, NULL, NULL);
65501a74
AW
668 event_notifier_cleanup(&vector->interrupt);
669 }
670
671 g_free(vdev->msi_vectors);
d964d3b5 672 vdev->msi_vectors = NULL;
65501a74
AW
673
674 if (ret > 0 && ret != vdev->nr_vectors) {
675 vdev->nr_vectors = ret;
676 goto retry;
677 }
678 vdev->nr_vectors = 0;
679
f4d45d47
AW
680 /*
681 * Failing to setup MSI doesn't really fall within any specification.
682 * Let's try leaving interrupts disabled and hope the guest figures
683 * out to fall back to INTx for this device.
684 */
685 error_report("vfio: Error: Failed to enable MSI");
686 vdev->interrupt = VFIO_INT_NONE;
687
65501a74
AW
688 return;
689 }
690
0de70dc7 691 trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
65501a74
AW
692}
693
0de70dc7 694static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
fd704adc 695{
7dfb3424 696 Error *err = NULL;
f4d45d47
AW
697 int i;
698
699 for (i = 0; i < vdev->nr_vectors; i++) {
700 VFIOMSIVector *vector = &vdev->msi_vectors[i];
701 if (vdev->msi_vectors[i].use) {
702 if (vector->virq >= 0) {
703 vfio_remove_kvm_msi_virq(vector);
704 }
705 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
706 NULL, NULL, NULL);
707 event_notifier_cleanup(&vector->interrupt);
708 }
709 }
710
fd704adc
AW
711 g_free(vdev->msi_vectors);
712 vdev->msi_vectors = NULL;
713 vdev->nr_vectors = 0;
714 vdev->interrupt = VFIO_INT_NONE;
715
7dfb3424
EA
716 vfio_intx_enable(vdev, &err);
717 if (err) {
c3b8e3e0 718 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
7dfb3424 719 }
fd704adc
AW
720}
721
0de70dc7 722static void vfio_msix_disable(VFIOPCIDevice *vdev)
fd704adc 723{
3e40ba0f
AW
724 int i;
725
fd704adc
AW
726 msix_unset_vector_notifiers(&vdev->pdev);
727
3e40ba0f
AW
728 /*
729 * MSI-X will only release vectors if MSI-X is still enabled on the
730 * device, check through the rest and release it ourselves if necessary.
731 */
732 for (i = 0; i < vdev->nr_vectors; i++) {
733 if (vdev->msi_vectors[i].use) {
734 vfio_msix_vector_release(&vdev->pdev, i);
f4d45d47 735 msix_vector_unuse(&vdev->pdev, i);
3e40ba0f
AW
736 }
737 }
738
fd704adc 739 if (vdev->nr_vectors) {
5546a621 740 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
fd704adc
AW
741 }
742
0de70dc7 743 vfio_msi_disable_common(vdev);
fd704adc 744
95239e16
AW
745 memset(vdev->msix->pending, 0,
746 BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
747
0de70dc7 748 trace_vfio_msix_disable(vdev->vbasedev.name);
fd704adc
AW
749}
750
0de70dc7 751static void vfio_msi_disable(VFIOPCIDevice *vdev)
65501a74 752{
5546a621 753 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
0de70dc7 754 vfio_msi_disable_common(vdev);
65501a74 755
0de70dc7 756 trace_vfio_msi_disable(vdev->vbasedev.name);
65501a74
AW
757}
758
9ee27d73 759static void vfio_update_msi(VFIOPCIDevice *vdev)
c7679d45
AW
760{
761 int i;
762
763 for (i = 0; i < vdev->nr_vectors; i++) {
764 VFIOMSIVector *vector = &vdev->msi_vectors[i];
765 MSIMessage msg;
766
767 if (!vector->use || vector->virq < 0) {
768 continue;
769 }
770
771 msg = msi_get_message(&vdev->pdev, i);
dc9f06ca 772 vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
c7679d45
AW
773 }
774}
775
9ee27d73 776static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
6f864e6e 777{
46900226 778 struct vfio_region_info *reg_info;
6f864e6e
AW
779 uint64_t size;
780 off_t off = 0;
7d489dcd 781 ssize_t bytes;
6f864e6e 782
46900226
AW
783 if (vfio_get_region_info(&vdev->vbasedev,
784 VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
6f864e6e
AW
785 error_report("vfio: Error getting ROM info: %m");
786 return;
787 }
788
46900226
AW
789 trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
790 (unsigned long)reg_info->offset,
791 (unsigned long)reg_info->flags);
792
793 vdev->rom_size = size = reg_info->size;
794 vdev->rom_offset = reg_info->offset;
6f864e6e 795
46900226 796 g_free(reg_info);
6f864e6e
AW
797
798 if (!vdev->rom_size) {
e638073c 799 vdev->rom_read_failed = true;
d20b43df 800 error_report("vfio-pci: Cannot read device rom at "
df92ee44 801 "%s", vdev->vbasedev.name);
d20b43df
BD
802 error_printf("Device option ROM contents are probably invalid "
803 "(check dmesg).\nSkip option ROM probe with rombar=0, "
804 "or load from file with romfile=\n");
6f864e6e
AW
805 return;
806 }
807
808 vdev->rom = g_malloc(size);
809 memset(vdev->rom, 0xff, size);
810
811 while (size) {
5546a621
EA
812 bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
813 size, vdev->rom_offset + off);
6f864e6e
AW
814 if (bytes == 0) {
815 break;
816 } else if (bytes > 0) {
817 off += bytes;
818 size -= bytes;
819 } else {
820 if (errno == EINTR || errno == EAGAIN) {
821 continue;
822 }
823 error_report("vfio: Error reading device ROM: %m");
824 break;
825 }
826 }
e2e5ee9c
AW
827
828 /*
829 * Test the ROM signature against our device, if the vendor is correct
830 * but the device ID doesn't match, store the correct device ID and
831 * recompute the checksum. Intel IGD devices need this and are known
832 * to have bogus checksums so we can't simply adjust the checksum.
833 */
834 if (pci_get_word(vdev->rom) == 0xaa55 &&
835 pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
836 !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
837 uint16_t vid, did;
838
839 vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
840 did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
841
842 if (vid == vdev->vendor_id && did != vdev->device_id) {
843 int i;
844 uint8_t csum, *data = vdev->rom;
845
846 pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
847 vdev->device_id);
848 data[6] = 0;
849
850 for (csum = 0, i = 0; i < vdev->rom_size; i++) {
851 csum += data[i];
852 }
853
854 data[6] = -csum;
855 }
856 }
6f864e6e
AW
857}
858
859static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
860{
9ee27d73 861 VFIOPCIDevice *vdev = opaque;
75bd0c72
ND
862 union {
863 uint8_t byte;
864 uint16_t word;
865 uint32_t dword;
866 uint64_t qword;
867 } val;
868 uint64_t data = 0;
6f864e6e
AW
869
870 /* Load the ROM lazily when the guest tries to read it */
db01eedb 871 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
6f864e6e
AW
872 vfio_pci_load_rom(vdev);
873 }
874
6758008e 875 memcpy(&val, vdev->rom + addr,
6f864e6e
AW
876 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
877
75bd0c72
ND
878 switch (size) {
879 case 1:
880 data = val.byte;
881 break;
882 case 2:
883 data = le16_to_cpu(val.word);
884 break;
885 case 4:
886 data = le32_to_cpu(val.dword);
887 break;
888 default:
889 hw_error("vfio: unsupported read size, %d bytes\n", size);
890 break;
891 }
892
df92ee44 893 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
6f864e6e 894
75bd0c72 895 return data;
6f864e6e
AW
896}
897
64fa25a0
AW
898static void vfio_rom_write(void *opaque, hwaddr addr,
899 uint64_t data, unsigned size)
900{
901}
902
6f864e6e
AW
903static const MemoryRegionOps vfio_rom_ops = {
904 .read = vfio_rom_read,
64fa25a0 905 .write = vfio_rom_write,
6758008e 906 .endianness = DEVICE_LITTLE_ENDIAN,
6f864e6e
AW
907};
908
9ee27d73 909static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
6f864e6e 910{
b1c50c5f 911 uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
6f864e6e 912 off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
4b943029 913 DeviceState *dev = DEVICE(vdev);
062ed5d8 914 char *name;
5546a621 915 int fd = vdev->vbasedev.fd;
6f864e6e
AW
916
917 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
4b943029 918 /* Since pci handles romfile, just print a message and return */
4eda914c 919 if (vfio_opt_rom_in_denylist(vdev) && vdev->pdev.romfile) {
8f8f5885
MA
920 warn_report("Device at %s is known to cause system instability"
921 " issues during option rom execution",
922 vdev->vbasedev.name);
923 error_printf("Proceeding anyway since user specified romfile\n");
4b943029 924 }
6f864e6e
AW
925 return;
926 }
927
928 /*
929 * Use the same size ROM BAR as the physical device. The contents
930 * will get filled in later when the guest tries to read it.
931 */
5546a621
EA
932 if (pread(fd, &orig, 4, offset) != 4 ||
933 pwrite(fd, &size, 4, offset) != 4 ||
934 pread(fd, &size, 4, offset) != 4 ||
935 pwrite(fd, &orig, 4, offset) != 4) {
7df9381b 936 error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
6f864e6e
AW
937 return;
938 }
939
b1c50c5f 940 size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
6f864e6e
AW
941
942 if (!size) {
943 return;
944 }
945
4eda914c 946 if (vfio_opt_rom_in_denylist(vdev)) {
f3558b1b 947 if (dev->opts && qdict_haskey(dev->opts, "rombar")) {
8f8f5885
MA
948 warn_report("Device at %s is known to cause system instability"
949 " issues during option rom execution",
950 vdev->vbasedev.name);
951 error_printf("Proceeding anyway since user specified"
952 " non zero value for rombar\n");
4b943029 953 } else {
8f8f5885
MA
954 warn_report("Rom loading for device at %s has been disabled"
955 " due to system instability issues",
956 vdev->vbasedev.name);
957 error_printf("Specify rombar=1 or romfile to force\n");
4b943029
BD
958 return;
959 }
960 }
961
df92ee44 962 trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
6f864e6e 963
062ed5d8 964 name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
6f864e6e
AW
965
966 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
967 &vfio_rom_ops, vdev, name, size);
062ed5d8 968 g_free(name);
6f864e6e
AW
969
970 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
971 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
972
e638073c 973 vdev->rom_read_failed = false;
6f864e6e
AW
974}
975
c00d61d8 976void vfio_vga_write(void *opaque, hwaddr addr,
f15689c7
AW
977 uint64_t data, unsigned size)
978{
979 VFIOVGARegion *region = opaque;
980 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
981 union {
982 uint8_t byte;
983 uint16_t word;
984 uint32_t dword;
985 uint64_t qword;
986 } buf;
987 off_t offset = vga->fd_offset + region->offset + addr;
988
989 switch (size) {
990 case 1:
991 buf.byte = data;
992 break;
993 case 2:
994 buf.word = cpu_to_le16(data);
995 break;
996 case 4:
997 buf.dword = cpu_to_le32(data);
998 break;
999 default:
4e505ddd 1000 hw_error("vfio: unsupported write size, %d bytes", size);
f15689c7
AW
1001 break;
1002 }
1003
1004 if (pwrite(vga->fd, &buf, size, offset) != size) {
1005 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1006 __func__, region->offset + addr, data, size);
1007 }
1008
385f57cf 1009 trace_vfio_vga_write(region->offset + addr, data, size);
f15689c7
AW
1010}
1011
c00d61d8 1012uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
f15689c7
AW
1013{
1014 VFIOVGARegion *region = opaque;
1015 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1016 union {
1017 uint8_t byte;
1018 uint16_t word;
1019 uint32_t dword;
1020 uint64_t qword;
1021 } buf;
1022 uint64_t data = 0;
1023 off_t offset = vga->fd_offset + region->offset + addr;
1024
1025 if (pread(vga->fd, &buf, size, offset) != size) {
1026 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1027 __func__, region->offset + addr, size);
1028 return (uint64_t)-1;
1029 }
1030
1031 switch (size) {
1032 case 1:
1033 data = buf.byte;
1034 break;
1035 case 2:
1036 data = le16_to_cpu(buf.word);
1037 break;
1038 case 4:
1039 data = le32_to_cpu(buf.dword);
1040 break;
1041 default:
4e505ddd 1042 hw_error("vfio: unsupported read size, %d bytes", size);
f15689c7
AW
1043 break;
1044 }
1045
385f57cf 1046 trace_vfio_vga_read(region->offset + addr, size, data);
f15689c7
AW
1047
1048 return data;
1049}
1050
1051static const MemoryRegionOps vfio_vga_ops = {
1052 .read = vfio_vga_read,
1053 .write = vfio_vga_write,
1054 .endianness = DEVICE_LITTLE_ENDIAN,
1055};
1056
95251725
YX
1057/*
1058 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1059 * size if the BAR is in an exclusive page in host so that we could map
1060 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1061 * page in guest. So we should set the priority of the expanded memory
1062 * region to zero in case of overlap with BARs which share the same page
1063 * with the sub-page BAR in guest. Besides, we should also recover the
1064 * size of this sub-page BAR when its base address is changed in guest
1065 * and not page aligned any more.
1066 */
1067static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
1068{
01b46064 1069 VFIOPCIDevice *vdev = VFIO_PCI(pdev);
95251725 1070 VFIORegion *region = &vdev->bars[bar].region;
3a286732 1071 MemoryRegion *mmap_mr, *region_mr, *base_mr;
95251725
YX
1072 PCIIORegion *r;
1073 pcibus_t bar_addr;
1074 uint64_t size = region->size;
1075
1076 /* Make sure that the whole region is allowed to be mmapped */
1077 if (region->nr_mmaps != 1 || !region->mmaps[0].mmap ||
1078 region->mmaps[0].size != region->size) {
1079 return;
1080 }
1081
1082 r = &pdev->io_regions[bar];
1083 bar_addr = r->addr;
3a286732
AW
1084 base_mr = vdev->bars[bar].mr;
1085 region_mr = region->mem;
95251725
YX
1086 mmap_mr = &region->mmaps[0].mem;
1087
1088 /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1089 if (bar_addr != PCI_BAR_UNMAPPED &&
8e3b0cbb
MAL
1090 !(bar_addr & ~qemu_real_host_page_mask())) {
1091 size = qemu_real_host_page_size();
95251725
YX
1092 }
1093
1094 memory_region_transaction_begin();
1095
3a286732
AW
1096 if (vdev->bars[bar].size < size) {
1097 memory_region_set_size(base_mr, size);
1098 }
1099 memory_region_set_size(region_mr, size);
95251725 1100 memory_region_set_size(mmap_mr, size);
3a286732
AW
1101 if (size != vdev->bars[bar].size && memory_region_is_mapped(base_mr)) {
1102 memory_region_del_subregion(r->address_space, base_mr);
95251725 1103 memory_region_add_subregion_overlap(r->address_space,
3a286732 1104 bar_addr, base_mr, 0);
95251725
YX
1105 }
1106
1107 memory_region_transaction_commit();
1108}
1109
65501a74
AW
1110/*
1111 * PCI config space
1112 */
c00d61d8 1113uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
65501a74 1114{
01b46064 1115 VFIOPCIDevice *vdev = VFIO_PCI(pdev);
4b5d5e87 1116 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
65501a74 1117
4b5d5e87
AW
1118 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1119 emu_bits = le32_to_cpu(emu_bits);
65501a74 1120
4b5d5e87
AW
1121 if (emu_bits) {
1122 emu_val = pci_default_read_config(pdev, addr, len);
1123 }
1124
1125 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1126 ssize_t ret;
1127
5546a621
EA
1128 ret = pread(vdev->vbasedev.fd, &phys_val, len,
1129 vdev->config_offset + addr);
4b5d5e87 1130 if (ret != len) {
7df9381b
AW
1131 error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1132 __func__, vdev->vbasedev.name, addr, len);
65501a74
AW
1133 return -errno;
1134 }
4b5d5e87 1135 phys_val = le32_to_cpu(phys_val);
65501a74
AW
1136 }
1137
4b5d5e87 1138 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
65501a74 1139
df92ee44 1140 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
65501a74
AW
1141
1142 return val;
1143}
1144
c00d61d8
AW
1145void vfio_pci_write_config(PCIDevice *pdev,
1146 uint32_t addr, uint32_t val, int len)
65501a74 1147{
01b46064 1148 VFIOPCIDevice *vdev = VFIO_PCI(pdev);
65501a74
AW
1149 uint32_t val_le = cpu_to_le32(val);
1150
df92ee44 1151 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
65501a74
AW
1152
1153 /* Write everything to VFIO, let it filter out what we can't write */
5546a621
EA
1154 if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
1155 != len) {
7df9381b
AW
1156 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1157 __func__, vdev->vbasedev.name, addr, val, len);
65501a74
AW
1158 }
1159
65501a74
AW
1160 /* MSI/MSI-X Enabling/Disabling */
1161 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1162 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1163 int is_enabled, was_enabled = msi_enabled(pdev);
1164
1165 pci_default_write_config(pdev, addr, val, len);
1166
1167 is_enabled = msi_enabled(pdev);
1168
c7679d45
AW
1169 if (!was_enabled) {
1170 if (is_enabled) {
0de70dc7 1171 vfio_msi_enable(vdev);
c7679d45
AW
1172 }
1173 } else {
1174 if (!is_enabled) {
0de70dc7 1175 vfio_msi_disable(vdev);
c7679d45
AW
1176 } else {
1177 vfio_update_msi(vdev);
1178 }
65501a74 1179 }
4b5d5e87 1180 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
65501a74
AW
1181 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1182 int is_enabled, was_enabled = msix_enabled(pdev);
1183
1184 pci_default_write_config(pdev, addr, val, len);
1185
1186 is_enabled = msix_enabled(pdev);
1187
1188 if (!was_enabled && is_enabled) {
0de70dc7 1189 vfio_msix_enable(vdev);
65501a74 1190 } else if (was_enabled && !is_enabled) {
0de70dc7 1191 vfio_msix_disable(vdev);
65501a74 1192 }
95251725
YX
1193 } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) ||
1194 range_covers_byte(addr, len, PCI_COMMAND)) {
1195 pcibus_t old_addr[PCI_NUM_REGIONS - 1];
1196 int bar;
1197
1198 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1199 old_addr[bar] = pdev->io_regions[bar].addr;
1200 }
1201
1202 pci_default_write_config(pdev, addr, val, len);
1203
1204 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1205 if (old_addr[bar] != pdev->io_regions[bar].addr &&
3a286732 1206 vdev->bars[bar].region.size > 0 &&
8e3b0cbb 1207 vdev->bars[bar].region.size < qemu_real_host_page_size()) {
95251725
YX
1208 vfio_sub_page_bar_update_mapping(pdev, bar);
1209 }
1210 }
4b5d5e87
AW
1211 } else {
1212 /* Write everything to QEMU to keep emulated bits correct */
1213 pci_default_write_config(pdev, addr, val, len);
65501a74
AW
1214 }
1215}
1216
65501a74
AW
1217/*
1218 * Interrupt setup
1219 */
9ee27d73 1220static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
65501a74 1221{
b3e27c3a
AW
1222 /*
1223 * More complicated than it looks. Disabling MSI/X transitions the
1224 * device to INTx mode (if supported). Therefore we need to first
1225 * disable MSI/X and then cleanup by disabling INTx.
1226 */
1227 if (vdev->interrupt == VFIO_INT_MSIX) {
0de70dc7 1228 vfio_msix_disable(vdev);
b3e27c3a 1229 } else if (vdev->interrupt == VFIO_INT_MSI) {
0de70dc7 1230 vfio_msi_disable(vdev);
b3e27c3a
AW
1231 }
1232
1233 if (vdev->interrupt == VFIO_INT_INTx) {
870cb6f1 1234 vfio_intx_disable(vdev);
65501a74
AW
1235 }
1236}
1237
7ef165b9 1238static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
65501a74
AW
1239{
1240 uint16_t ctrl;
1241 bool msi_64bit, msi_maskbit;
1242 int ret, entries;
1108b2f8 1243 Error *err = NULL;
65501a74 1244
5546a621 1245 if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
65501a74 1246 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
7ef165b9 1247 error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS");
65501a74
AW
1248 return -errno;
1249 }
1250 ctrl = le16_to_cpu(ctrl);
1251
1252 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1253 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1254 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1255
0de70dc7 1256 trace_vfio_msi_setup(vdev->vbasedev.name, pos);
65501a74 1257
1108b2f8 1258 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
65501a74 1259 if (ret < 0) {
e43b9a5a
AW
1260 if (ret == -ENOTSUP) {
1261 return 0;
1262 }
4b576648 1263 error_propagate_prepend(errp, err, "msi_init failed: ");
65501a74
AW
1264 return ret;
1265 }
1266 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1267
1268 return 0;
1269}
1270
db0da029
AW
1271static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
1272{
1273 off_t start, end;
1274 VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
1275
ae0215b2
AK
1276 /*
1277 * If the host driver allows mapping of a MSIX data, we are going to
1278 * do map the entire BAR and emulate MSIX table on top of that.
1279 */
1280 if (vfio_has_region_cap(&vdev->vbasedev, region->nr,
1281 VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) {
1282 return;
1283 }
1284
db0da029
AW
1285 /*
1286 * We expect to find a single mmap covering the whole BAR, anything else
1287 * means it's either unsupported or already setup.
1288 */
1289 if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
1290 region->size != region->mmaps[0].size) {
1291 return;
1292 }
1293
1294 /* MSI-X table start and end aligned to host page size */
8e3b0cbb 1295 start = vdev->msix->table_offset & qemu_real_host_page_mask();
db0da029
AW
1296 end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1297 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1298
1299 /*
1300 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1301 * NB - Host page size is necessarily a power of two and so is the PCI
1302 * BAR (not counting EA yet), therefore if we have host page aligned
1303 * @start and @end, then any remainder of the BAR before or after those
1304 * must be at least host page sized and therefore mmap'able.
1305 */
1306 if (!start) {
1307 if (end >= region->size) {
1308 region->nr_mmaps = 0;
1309 g_free(region->mmaps);
1310 region->mmaps = NULL;
1311 trace_vfio_msix_fixup(vdev->vbasedev.name,
1312 vdev->msix->table_bar, 0, 0);
1313 } else {
1314 region->mmaps[0].offset = end;
1315 region->mmaps[0].size = region->size - end;
1316 trace_vfio_msix_fixup(vdev->vbasedev.name,
1317 vdev->msix->table_bar, region->mmaps[0].offset,
1318 region->mmaps[0].offset + region->mmaps[0].size);
1319 }
1320
1321 /* Maybe it's aligned at the end of the BAR */
1322 } else if (end >= region->size) {
1323 region->mmaps[0].size = start;
1324 trace_vfio_msix_fixup(vdev->vbasedev.name,
1325 vdev->msix->table_bar, region->mmaps[0].offset,
1326 region->mmaps[0].offset + region->mmaps[0].size);
1327
1328 /* Otherwise it must split the BAR */
1329 } else {
1330 region->nr_mmaps = 2;
1331 region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
1332
1333 memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));
1334
1335 region->mmaps[0].size = start;
1336 trace_vfio_msix_fixup(vdev->vbasedev.name,
1337 vdev->msix->table_bar, region->mmaps[0].offset,
1338 region->mmaps[0].offset + region->mmaps[0].size);
1339
1340 region->mmaps[1].offset = end;
1341 region->mmaps[1].size = region->size - end;
1342 trace_vfio_msix_fixup(vdev->vbasedev.name,
1343 vdev->msix->table_bar, region->mmaps[1].offset,
1344 region->mmaps[1].offset + region->mmaps[1].size);
1345 }
1346}
1347
89d5202e
AW
1348static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
1349{
1350 int target_bar = -1;
1351 size_t msix_sz;
1352
1353 if (!vdev->msix || vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
1354 return;
1355 }
1356
1357 /* The actual minimum size of MSI-X structures */
1358 msix_sz = (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE) +
1359 (QEMU_ALIGN_UP(vdev->msix->entries, 64) / 8);
1360 /* Round up to host pages, we don't want to share a page */
1361 msix_sz = REAL_HOST_PAGE_ALIGN(msix_sz);
1362 /* PCI BARs must be a power of 2 */
1363 msix_sz = pow2ceil(msix_sz);
1364
1365 if (vdev->msix_relo == OFF_AUTOPCIBAR_AUTO) {
1366 /*
1367 * TODO: Lookup table for known devices.
1368 *
1369 * Logically we might use an algorithm here to select the BAR adding
631ba5a1 1370 * the least additional MMIO space, but we cannot programmatically
89d5202e
AW
1371 * predict the driver dependency on BAR ordering or sizing, therefore
1372 * 'auto' becomes a lookup for combinations reported to work.
1373 */
1374 if (target_bar < 0) {
1375 error_setg(errp, "No automatic MSI-X relocation available for "
1376 "device %04x:%04x", vdev->vendor_id, vdev->device_id);
1377 return;
1378 }
1379 } else {
1380 target_bar = (int)(vdev->msix_relo - OFF_AUTOPCIBAR_BAR0);
1381 }
1382
1383 /* I/O port BARs cannot host MSI-X structures */
1384 if (vdev->bars[target_bar].ioport) {
1385 error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1386 "I/O port BAR", target_bar);
1387 return;
1388 }
1389
1390 /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1391 if (!vdev->bars[target_bar].size &&
1392 target_bar > 0 && vdev->bars[target_bar - 1].mem64) {
1393 error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1394 "consumed by 64-bit BAR %d", target_bar, target_bar - 1);
1395 return;
1396 }
1397
1398 /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
e0255bb1 1399 if (vdev->bars[target_bar].size > 1 * GiB &&
89d5202e
AW
1400 !vdev->bars[target_bar].mem64) {
1401 error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1402 "no space to extend 32-bit BAR", target_bar);
1403 return;
1404 }
1405
1406 /*
1407 * If adding a new BAR, test if we can make it 64bit. We make it
1408 * prefetchable since QEMU MSI-X emulation has no read side effects
1409 * and doing so makes mapping more flexible.
1410 */
1411 if (!vdev->bars[target_bar].size) {
1412 if (target_bar < (PCI_ROM_SLOT - 1) &&
1413 !vdev->bars[target_bar + 1].size) {
1414 vdev->bars[target_bar].mem64 = true;
1415 vdev->bars[target_bar].type = PCI_BASE_ADDRESS_MEM_TYPE_64;
1416 }
1417 vdev->bars[target_bar].type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1418 vdev->bars[target_bar].size = msix_sz;
1419 vdev->msix->table_offset = 0;
1420 } else {
1421 vdev->bars[target_bar].size = MAX(vdev->bars[target_bar].size * 2,
1422 msix_sz * 2);
1423 /*
1424 * Due to above size calc, MSI-X always starts halfway into the BAR,
1425 * which will always be a separate host page.
1426 */
1427 vdev->msix->table_offset = vdev->bars[target_bar].size / 2;
1428 }
1429
1430 vdev->msix->table_bar = target_bar;
1431 vdev->msix->pba_bar = target_bar;
1432 /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1433 vdev->msix->pba_offset = vdev->msix->table_offset +
1434 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE);
1435
1436 trace_vfio_msix_relo(vdev->vbasedev.name,
1437 vdev->msix->table_bar, vdev->msix->table_offset);
1438}
1439
65501a74
AW
1440/*
1441 * We don't have any control over how pci_add_capability() inserts
1442 * capabilities into the chain. In order to setup MSI-X we need a
1443 * MemoryRegion for the BAR. In order to setup the BAR and not
1444 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1445 * need to first look for where the MSI-X table lives. So we
1446 * unfortunately split MSI-X setup across two functions.
1447 */
ec3bcf42 1448static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
65501a74
AW
1449{
1450 uint8_t pos;
1451 uint16_t ctrl;
1452 uint32_t table, pba;
5546a621 1453 int fd = vdev->vbasedev.fd;
b5bd049f 1454 VFIOMSIXInfo *msix;
65501a74
AW
1455
1456 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1457 if (!pos) {
ec3bcf42 1458 return;
65501a74
AW
1459 }
1460
5546a621 1461 if (pread(fd, &ctrl, sizeof(ctrl),
b58b17f7 1462 vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
008d0e2d 1463 error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS");
ec3bcf42 1464 return;
65501a74
AW
1465 }
1466
5546a621 1467 if (pread(fd, &table, sizeof(table),
65501a74 1468 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
008d0e2d 1469 error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE");
ec3bcf42 1470 return;
65501a74
AW
1471 }
1472
5546a621 1473 if (pread(fd, &pba, sizeof(pba),
65501a74 1474 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
008d0e2d 1475 error_setg_errno(errp, errno, "failed to read PCI MSIX PBA");
ec3bcf42 1476 return;
65501a74
AW
1477 }
1478
1479 ctrl = le16_to_cpu(ctrl);
1480 table = le32_to_cpu(table);
1481 pba = le32_to_cpu(pba);
1482
b5bd049f
AW
1483 msix = g_malloc0(sizeof(*msix));
1484 msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1485 msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1486 msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1487 msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1488 msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
65501a74 1489
43302969
GL
1490 /*
1491 * Test the size of the pba_offset variable and catch if it extends outside
1492 * of the specified BAR. If it is the case, we need to apply a hardware
1493 * specific quirk if the device is known or we have a broken configuration.
1494 */
b5bd049f 1495 if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
43302969
GL
1496 /*
1497 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1498 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1499 * the VF PBA offset while the BAR itself is only 8k. The correct value
1500 * is 0x1000, so we hard code that here.
1501 */
ff635e37
AW
1502 if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
1503 (vdev->device_id & 0xff00) == 0x5800) {
b5bd049f 1504 msix->pba_offset = 0x1000;
1bd9f1b1
CH
1505 /*
1506 * BAIDU KUNLUN Virtual Function devices for KUNLUN AI processor
1507 * return an incorrect value of 0x460000 for the VF PBA offset while
1508 * the BAR itself is only 0x10000. The correct value is 0xb400.
1509 */
1510 } else if (vfio_pci_is(vdev, PCI_VENDOR_ID_BAIDU,
1511 PCI_DEVICE_ID_KUNLUN_VF)) {
1512 msix->pba_offset = 0xb400;
c60807de 1513 } else if (vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
008d0e2d
EA
1514 error_setg(errp, "hardware reports invalid configuration, "
1515 "MSIX PBA outside of specified BAR");
b5bd049f 1516 g_free(msix);
ec3bcf42 1517 return;
43302969
GL
1518 }
1519 }
1520
0de70dc7 1521 trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
b5bd049f
AW
1522 msix->table_offset, msix->entries);
1523 vdev->msix = msix;
65501a74 1524
db0da029 1525 vfio_pci_fixup_msix_region(vdev);
89d5202e
AW
1526
1527 vfio_pci_relocate_msix(vdev, errp);
65501a74
AW
1528}
1529
7ef165b9 1530static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
65501a74
AW
1531{
1532 int ret;
ee640c62 1533 Error *err = NULL;
65501a74 1534
b21e2380
MA
1535 vdev->msix->pending = g_new0(unsigned long,
1536 BITS_TO_LONGS(vdev->msix->entries));
65501a74 1537 ret = msix_init(&vdev->pdev, vdev->msix->entries,
3a286732 1538 vdev->bars[vdev->msix->table_bar].mr,
65501a74 1539 vdev->msix->table_bar, vdev->msix->table_offset,
3a286732 1540 vdev->bars[vdev->msix->pba_bar].mr,
ee640c62
C
1541 vdev->msix->pba_bar, vdev->msix->pba_offset, pos,
1542 &err);
65501a74 1543 if (ret < 0) {
e43b9a5a 1544 if (ret == -ENOTSUP) {
e1eb292a 1545 warn_report_err(err);
e43b9a5a
AW
1546 return 0;
1547 }
ee640c62
C
1548
1549 error_propagate(errp, err);
65501a74
AW
1550 return ret;
1551 }
1552
95239e16
AW
1553 /*
1554 * The PCI spec suggests that devices provide additional alignment for
1555 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1556 * For an assigned device, this hopefully means that emulation of MSI-X
1557 * structures does not affect the performance of the device. If devices
1558 * fail to provide that alignment, a significant performance penalty may
1559 * result, for instance Mellanox MT27500 VFs:
1560 * http://www.spinics.net/lists/kvm/msg125881.html
1561 *
1562 * The PBA is simply not that important for such a serious regression and
1563 * most drivers do not appear to look at it. The solution for this is to
1564 * disable the PBA MemoryRegion unless it's being used. We disable it
1565 * here and only enable it if a masked vector fires through QEMU. As the
1566 * vector-use notifier is called, which occurs on unmask, we test whether
1567 * PBA emulation is needed and again disable if not.
1568 */
1569 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
1570
fcad0d21
AK
1571 /*
1572 * The emulated machine may provide a paravirt interface for MSIX setup
1573 * so it is not strictly necessary to emulate MSIX here. This becomes
1574 * helpful when frequently accessed MMIO registers are located in
1575 * subpages adjacent to the MSIX table but the MSIX data containing page
1576 * cannot be mapped because of a host page size bigger than the MSIX table
1577 * alignment.
1578 */
1579 if (object_property_get_bool(OBJECT(qdev_get_machine()),
1580 "vfio-no-msix-emulation", NULL)) {
1581 memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false);
1582 }
1583
65501a74
AW
1584 return 0;
1585}
1586
9ee27d73 1587static void vfio_teardown_msi(VFIOPCIDevice *vdev)
65501a74
AW
1588{
1589 msi_uninit(&vdev->pdev);
1590
1591 if (vdev->msix) {
a664477d 1592 msix_uninit(&vdev->pdev,
3a286732
AW
1593 vdev->bars[vdev->msix->table_bar].mr,
1594 vdev->bars[vdev->msix->pba_bar].mr);
95239e16 1595 g_free(vdev->msix->pending);
65501a74
AW
1596 }
1597}
1598
1599/*
1600 * Resource setup
1601 */
9ee27d73 1602static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
65501a74
AW
1603{
1604 int i;
1605
1606 for (i = 0; i < PCI_ROM_SLOT; i++) {
db0da029 1607 vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
65501a74
AW
1608 }
1609}
1610
3a286732 1611static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr)
65501a74
AW
1612{
1613 VFIOBAR *bar = &vdev->bars[nr];
1614
65501a74 1615 uint32_t pci_bar;
65501a74
AW
1616 int ret;
1617
1618 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2d82f8a3 1619 if (!bar->region.size) {
65501a74
AW
1620 return;
1621 }
1622
65501a74 1623 /* Determine what type of BAR this is for registration */
5546a621 1624 ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
65501a74
AW
1625 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
1626 if (ret != sizeof(pci_bar)) {
312fd5f2 1627 error_report("vfio: Failed to read BAR %d (%m)", nr);
65501a74
AW
1628 return;
1629 }
1630
1631 pci_bar = le32_to_cpu(pci_bar);
39360f0b
AW
1632 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1633 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
3a286732
AW
1634 bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1635 ~PCI_BASE_ADDRESS_MEM_MASK);
1636 bar->size = bar->region.size;
1637}
1638
1639static void vfio_bars_prepare(VFIOPCIDevice *vdev)
1640{
1641 int i;
1642
1643 for (i = 0; i < PCI_ROM_SLOT; i++) {
1644 vfio_bar_prepare(vdev, i);
1645 }
1646}
1647
1648static void vfio_bar_register(VFIOPCIDevice *vdev, int nr)
1649{
1650 VFIOBAR *bar = &vdev->bars[nr];
1651 char *name;
65501a74 1652
3a286732
AW
1653 if (!bar->size) {
1654 return;
65501a74 1655 }
7076eabc 1656
3a286732
AW
1657 bar->mr = g_new0(MemoryRegion, 1);
1658 name = g_strdup_printf("%s base BAR %d", vdev->vbasedev.name, nr);
1659 memory_region_init_io(bar->mr, OBJECT(vdev), NULL, NULL, name, bar->size);
1660 g_free(name);
1661
1662 if (bar->region.size) {
1663 memory_region_add_subregion(bar->mr, 0, bar->region.mem);
1664
1665 if (vfio_region_mmap(&bar->region)) {
1666 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1667 vdev->vbasedev.name, nr);
1668 }
1669 }
1670
1671 pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr);
65501a74
AW
1672}
1673
3a286732 1674static void vfio_bars_register(VFIOPCIDevice *vdev)
65501a74
AW
1675{
1676 int i;
1677
1678 for (i = 0; i < PCI_ROM_SLOT; i++) {
3a286732 1679 vfio_bar_register(vdev, i);
65501a74
AW
1680 }
1681}
1682
2d82f8a3 1683static void vfio_bars_exit(VFIOPCIDevice *vdev)
65501a74
AW
1684{
1685 int i;
1686
1687 for (i = 0; i < PCI_ROM_SLOT; i++) {
3a286732
AW
1688 VFIOBAR *bar = &vdev->bars[i];
1689
2d82f8a3 1690 vfio_bar_quirk_exit(vdev, i);
3a286732
AW
1691 vfio_region_exit(&bar->region);
1692 if (bar->region.size) {
1693 memory_region_del_subregion(bar->mr, bar->region.mem);
1694 }
65501a74 1695 }
f15689c7 1696
2d82f8a3 1697 if (vdev->vga) {
f15689c7 1698 pci_unregister_vga(&vdev->pdev);
2d82f8a3 1699 vfio_vga_quirk_exit(vdev);
f15689c7 1700 }
65501a74
AW
1701}
1702
2d82f8a3 1703static void vfio_bars_finalize(VFIOPCIDevice *vdev)
ba5e6bfa
PB
1704{
1705 int i;
1706
1707 for (i = 0; i < PCI_ROM_SLOT; i++) {
3a286732
AW
1708 VFIOBAR *bar = &vdev->bars[i];
1709
2d82f8a3 1710 vfio_bar_quirk_finalize(vdev, i);
3a286732
AW
1711 vfio_region_finalize(&bar->region);
1712 if (bar->size) {
1713 object_unparent(OBJECT(bar->mr));
1714 g_free(bar->mr);
1715 }
ba5e6bfa
PB
1716 }
1717
2d82f8a3
AW
1718 if (vdev->vga) {
1719 vfio_vga_quirk_finalize(vdev);
1720 for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1721 object_unparent(OBJECT(&vdev->vga->region[i].mem));
1722 }
1723 g_free(vdev->vga);
ba5e6bfa
PB
1724 }
1725}
1726
65501a74
AW
1727/*
1728 * General setup
1729 */
1730static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1731{
88caf177
CF
1732 uint8_t tmp;
1733 uint16_t next = PCI_CONFIG_SPACE_SIZE;
65501a74
AW
1734
1735 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
3fc1c182 1736 tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
65501a74
AW
1737 if (tmp > pos && tmp < next) {
1738 next = tmp;
1739 }
1740 }
1741
1742 return next - pos;
1743}
1744
325ae8d5
CF
1745
1746static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
1747{
1748 uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
1749
1750 for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
1751 tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
1752 if (tmp > pos && tmp < next) {
1753 next = tmp;
1754 }
1755 }
1756
1757 return next - pos;
1758}
1759
96adc5c7
AW
1760static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1761{
1762 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1763}
1764
9ee27d73 1765static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
96adc5c7
AW
1766 uint16_t val, uint16_t mask)
1767{
1768 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1769 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1770 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1771}
1772
1773static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1774{
1775 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1776}
1777
9ee27d73 1778static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
96adc5c7
AW
1779 uint32_t val, uint32_t mask)
1780{
1781 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1782 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1783 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1784}
1785
7ef165b9
EA
1786static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
1787 Error **errp)
96adc5c7
AW
1788{
1789 uint16_t flags;
1790 uint8_t type;
1791
1792 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
1793 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
1794
1795 if (type != PCI_EXP_TYPE_ENDPOINT &&
1796 type != PCI_EXP_TYPE_LEG_END &&
1797 type != PCI_EXP_TYPE_RC_END) {
1798
7ef165b9
EA
1799 error_setg(errp, "assignment of PCIe type 0x%x "
1800 "devices is not currently supported", type);
96adc5c7
AW
1801 return -EINVAL;
1802 }
1803
fd56e061
DG
1804 if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) {
1805 PCIBus *bus = pci_get_bus(&vdev->pdev);
0282abf0
AW
1806 PCIDevice *bridge;
1807
96adc5c7 1808 /*
0282abf0
AW
1809 * Traditionally PCI device assignment exposes the PCIe capability
1810 * as-is on non-express buses. The reason being that some drivers
1811 * simply assume that it's there, for example tg3. However when
1812 * we're running on a native PCIe machine type, like Q35, we need
1813 * to hide the PCIe capability. The reason for this is twofold;
1814 * first Windows guests get a Code 10 error when the PCIe capability
1815 * is exposed in this configuration. Therefore express devices won't
1816 * work at all unless they're attached to express buses in the VM.
1817 * Second, a native PCIe machine introduces the possibility of fine
1818 * granularity IOMMUs supporting both translation and isolation.
1819 * Guest code to discover the IOMMU visibility of a device, such as
1820 * IOMMU grouping code on Linux, is very aware of device types and
1821 * valid transitions between bus types. An express device on a non-
1822 * express bus is not a valid combination on bare metal systems.
1823 *
1824 * Drivers that require a PCIe capability to make the device
1825 * functional are simply going to need to have their devices placed
1826 * on a PCIe bus in the VM.
96adc5c7 1827 */
0282abf0
AW
1828 while (!pci_bus_is_root(bus)) {
1829 bridge = pci_bridge_get_device(bus);
fd56e061 1830 bus = pci_get_bus(bridge);
0282abf0
AW
1831 }
1832
1833 if (pci_bus_is_express(bus)) {
1834 return 0;
1835 }
1836
fd56e061 1837 } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) {
96adc5c7
AW
1838 /*
1839 * On a Root Complex bus Endpoints become Root Complex Integrated
1840 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1841 */
1842 if (type == PCI_EXP_TYPE_ENDPOINT) {
1843 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1844 PCI_EXP_TYPE_RC_END << 4,
1845 PCI_EXP_FLAGS_TYPE);
1846
1847 /* Link Capabilities, Status, and Control goes away */
1848 if (size > PCI_EXP_LNKCTL) {
1849 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
1850 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1851 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
1852
1853#ifndef PCI_EXP_LNKCAP2
1854#define PCI_EXP_LNKCAP2 44
1855#endif
1856#ifndef PCI_EXP_LNKSTA2
1857#define PCI_EXP_LNKSTA2 50
1858#endif
1859 /* Link 2 Capabilities, Status, and Control goes away */
1860 if (size > PCI_EXP_LNKCAP2) {
1861 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
1862 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
1863 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
1864 }
1865 }
1866
1867 } else if (type == PCI_EXP_TYPE_LEG_END) {
1868 /*
1869 * Legacy endpoints don't belong on the root complex. Windows
1870 * seems to be happier with devices if we skip the capability.
1871 */
1872 return 0;
1873 }
1874
1875 } else {
1876 /*
1877 * Convert Root Complex Integrated Endpoints to regular endpoints.
1878 * These devices don't support LNK/LNK2 capabilities, so make them up.
1879 */
1880 if (type == PCI_EXP_TYPE_RC_END) {
1881 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1882 PCI_EXP_TYPE_ENDPOINT << 4,
1883 PCI_EXP_FLAGS_TYPE);
1884 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
d96a0ac7
AW
1885 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1) |
1886 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT), ~0);
96adc5c7
AW
1887 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1888 }
96adc5c7
AW
1889 }
1890
47985727
AW
1891 /*
1892 * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
1893 * (Niantic errate #35) causing Windows to error with a Code 10 for the
1894 * device on Q35. Fixup any such devices to report version 1. If we
1895 * were to remove the capability entirely the guest would lose extended
1896 * config space.
1897 */
1898 if ((flags & PCI_EXP_FLAGS_VERS) == 0) {
1899 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1900 1, PCI_EXP_FLAGS_VERS);
1901 }
1902
9a7c2a59
MZ
1903 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
1904 errp);
1905 if (pos < 0) {
1906 return pos;
96adc5c7
AW
1907 }
1908
9a7c2a59
MZ
1909 vdev->pdev.exp.exp_cap = pos;
1910
96adc5c7
AW
1911 return pos;
1912}
1913
9ee27d73 1914static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
befe5176
AW
1915{
1916 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
1917
1918 if (cap & PCI_EXP_DEVCAP_FLR) {
df92ee44 1919 trace_vfio_check_pcie_flr(vdev->vbasedev.name);
befe5176
AW
1920 vdev->has_flr = true;
1921 }
1922}
1923
9ee27d73 1924static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
befe5176
AW
1925{
1926 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
1927
1928 if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
df92ee44 1929 trace_vfio_check_pm_reset(vdev->vbasedev.name);
befe5176
AW
1930 vdev->has_pm_reset = true;
1931 }
1932}
1933
9ee27d73 1934static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
befe5176
AW
1935{
1936 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
1937
1938 if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
df92ee44 1939 trace_vfio_check_af_flr(vdev->vbasedev.name);
befe5176
AW
1940 vdev->has_flr = true;
1941 }
1942}
1943
7ef165b9 1944static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
65501a74
AW
1945{
1946 PCIDevice *pdev = &vdev->pdev;
1947 uint8_t cap_id, next, size;
1948 int ret;
1949
1950 cap_id = pdev->config[pos];
3fc1c182 1951 next = pdev->config[pos + PCI_CAP_LIST_NEXT];
65501a74
AW
1952
1953 /*
1954 * If it becomes important to configure capabilities to their actual
1955 * size, use this as the default when it's something we don't recognize.
1956 * Since QEMU doesn't actually handle many of the config accesses,
1957 * exact size doesn't seem worthwhile.
1958 */
1959 size = vfio_std_cap_max_size(pdev, pos);
1960
1961 /*
1962 * pci_add_capability always inserts the new capability at the head
1963 * of the chain. Therefore to end up with a chain that matches the
1964 * physical device, we insert from the end by making this recursive.
3fc1c182 1965 * This is also why we pre-calculate size above as cached config space
65501a74
AW
1966 * will be changed as we unwind the stack.
1967 */
1968 if (next) {
7ef165b9 1969 ret = vfio_add_std_cap(vdev, next, errp);
65501a74 1970 if (ret) {
5b31c822 1971 return ret;
65501a74
AW
1972 }
1973 } else {
96adc5c7
AW
1974 /* Begin the rebuild, use QEMU emulated list bits */
1975 pdev->config[PCI_CAPABILITY_LIST] = 0;
1976 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
1977 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
e3f79f3b
AW
1978
1979 ret = vfio_add_virt_caps(vdev, errp);
1980 if (ret) {
1981 return ret;
1982 }
65501a74
AW
1983 }
1984
e3f79f3b
AW
1985 /* Scale down size, esp in case virt caps were added above */
1986 size = MIN(size, vfio_std_cap_max_size(pdev, pos));
1987
96adc5c7 1988 /* Use emulated next pointer to allow dropping caps */
3fc1c182 1989 pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
96adc5c7 1990
65501a74
AW
1991 switch (cap_id) {
1992 case PCI_CAP_ID_MSI:
7ef165b9 1993 ret = vfio_msi_setup(vdev, pos, errp);
65501a74 1994 break;
96adc5c7 1995 case PCI_CAP_ID_EXP:
befe5176 1996 vfio_check_pcie_flr(vdev, pos);
7ef165b9 1997 ret = vfio_setup_pcie_cap(vdev, pos, size, errp);
96adc5c7 1998 break;
65501a74 1999 case PCI_CAP_ID_MSIX:
7ef165b9 2000 ret = vfio_msix_setup(vdev, pos, errp);
65501a74 2001 break;
ba661818 2002 case PCI_CAP_ID_PM:
befe5176 2003 vfio_check_pm_reset(vdev, pos);
ba661818 2004 vdev->pm_cap = pos;
27841278 2005 ret = pci_add_capability(pdev, cap_id, pos, size, errp);
befe5176
AW
2006 break;
2007 case PCI_CAP_ID_AF:
2008 vfio_check_af_flr(vdev, pos);
27841278 2009 ret = pci_add_capability(pdev, cap_id, pos, size, errp);
befe5176 2010 break;
65501a74 2011 default:
27841278 2012 ret = pci_add_capability(pdev, cap_id, pos, size, errp);
65501a74
AW
2013 break;
2014 }
5b31c822 2015
65501a74 2016 if (ret < 0) {
7ef165b9
EA
2017 error_prepend(errp,
2018 "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
2019 cap_id, size, pos);
65501a74
AW
2020 return ret;
2021 }
2022
2023 return 0;
2024}
2025
7ef165b9 2026static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
325ae8d5
CF
2027{
2028 PCIDevice *pdev = &vdev->pdev;
2029 uint32_t header;
2030 uint16_t cap_id, next, size;
2031 uint8_t cap_ver;
2032 uint8_t *config;
2033
e37dac06 2034 /* Only add extended caps if we have them and the guest can see them */
fd56e061 2035 if (!pci_is_express(pdev) || !pci_bus_is_express(pci_get_bus(pdev)) ||
e37dac06 2036 !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
7ef165b9 2037 return;
e37dac06
AW
2038 }
2039
325ae8d5
CF
2040 /*
2041 * pcie_add_capability always inserts the new capability at the tail
2042 * of the chain. Therefore to end up with a chain that matches the
2043 * physical device, we cache the config space to avoid overwriting
2044 * the original config space when we parse the extended capabilities.
2045 */
2046 config = g_memdup(pdev->config, vdev->config_size);
2047
e37dac06
AW
2048 /*
2049 * Extended capabilities are chained with each pointing to the next, so we
2050 * can drop anything other than the head of the chain simply by modifying
d0d1cd70
AW
2051 * the previous next pointer. Seed the head of the chain here such that
2052 * we can simply skip any capabilities we want to drop below, regardless
2053 * of their position in the chain. If this stub capability still exists
2054 * after we add the capabilities we want to expose, update the capability
2055 * ID to zero. Note that we cannot seed with the capability header being
2056 * zero as this conflicts with definition of an absent capability chain
2057 * and prevents capabilities beyond the head of the list from being added.
2058 * By replacing the dummy capability ID with zero after walking the device
2059 * chain, we also transparently mark extended capabilities as absent if
2060 * no capabilities were added. Note that the PCIe spec defines an absence
2061 * of extended capabilities to be determined by a value of zero for the
2062 * capability ID, version, AND next pointer. A non-zero next pointer
2063 * should be sufficient to indicate additional capabilities are present,
2064 * which will occur if we call pcie_add_capability() below. The entire
2065 * first dword is emulated to support this.
2066 *
2067 * NB. The kernel side does similar masking, so be prepared that our
2068 * view of the device may also contain a capability ID zero in the head
2069 * of the chain. Skip it for the same reason that we cannot seed the
2070 * chain with a zero capability.
e37dac06
AW
2071 */
2072 pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
2073 PCI_EXT_CAP(0xFFFF, 0, 0));
2074 pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
2075 pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
2076
325ae8d5
CF
2077 for (next = PCI_CONFIG_SPACE_SIZE; next;
2078 next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
2079 header = pci_get_long(config + next);
2080 cap_id = PCI_EXT_CAP_ID(header);
2081 cap_ver = PCI_EXT_CAP_VER(header);
2082
2083 /*
2084 * If it becomes important to configure extended capabilities to their
2085 * actual size, use this as the default when it's something we don't
2086 * recognize. Since QEMU doesn't actually handle many of the config
2087 * accesses, exact size doesn't seem worthwhile.
2088 */
2089 size = vfio_ext_cap_max_size(config, next);
2090
325ae8d5
CF
2091 /* Use emulated next pointer to allow dropping extended caps */
2092 pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
2093 PCI_EXT_CAP_NEXT_MASK);
e37dac06
AW
2094
2095 switch (cap_id) {
d0d1cd70 2096 case 0: /* kernel masked capability */
e37dac06 2097 case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
383a7af7 2098 case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
3412d8ec 2099 case PCI_EXT_CAP_ID_REBAR: /* Can't expose read-only */
e37dac06
AW
2100 trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
2101 break;
2102 default:
2103 pcie_add_capability(pdev, cap_id, cap_ver, next, size);
2104 }
2105
2106 }
2107
2108 /* Cleanup chain head ID if necessary */
2109 if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
2110 pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
325ae8d5
CF
2111 }
2112
2113 g_free(config);
7ef165b9 2114 return;
325ae8d5
CF
2115}
2116
7ef165b9 2117static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
65501a74
AW
2118{
2119 PCIDevice *pdev = &vdev->pdev;
325ae8d5 2120 int ret;
65501a74
AW
2121
2122 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
2123 !pdev->config[PCI_CAPABILITY_LIST]) {
2124 return 0; /* Nothing to add */
2125 }
2126
7ef165b9 2127 ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp);
325ae8d5
CF
2128 if (ret) {
2129 return ret;
2130 }
2131
7ef165b9
EA
2132 vfio_add_ext_cap(vdev);
2133 return 0;
65501a74
AW
2134}
2135
9ee27d73 2136static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
f16f39c3
AW
2137{
2138 PCIDevice *pdev = &vdev->pdev;
2139 uint16_t cmd;
2140
2141 vfio_disable_interrupts(vdev);
2142
2143 /* Make sure the device is in D0 */
2144 if (vdev->pm_cap) {
2145 uint16_t pmcsr;
2146 uint8_t state;
2147
2148 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
2149 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2150 if (state) {
2151 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2152 vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
2153 /* vfio handles the necessary delay here */
2154 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
2155 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2156 if (state) {
4e505ddd 2157 error_report("vfio: Unable to power on device, stuck in D%d",
f16f39c3
AW
2158 state);
2159 }
2160 }
2161 }
2162
2163 /*
631ba5a1 2164 * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master.
f16f39c3
AW
2165 * Also put INTx Disable in known state.
2166 */
2167 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
2168 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
2169 PCI_COMMAND_INTX_DISABLE);
2170 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
2171}
2172
9ee27d73 2173static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
f16f39c3 2174{
7dfb3424 2175 Error *err = NULL;
a52a4c47 2176 int nr;
7dfb3424
EA
2177
2178 vfio_intx_enable(vdev, &err);
2179 if (err) {
c3b8e3e0 2180 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
7dfb3424 2181 }
a52a4c47
IY
2182
2183 for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
2184 off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr);
2185 uint32_t val = 0;
2186 uint32_t len = sizeof(val);
2187
2188 if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) {
2189 error_report("%s(%s) reset bar %d failed: %m", __func__,
2190 vdev->vbasedev.name, nr);
2191 }
2192 }
469d02de
AW
2193
2194 vfio_quirk_reset(vdev);
f16f39c3
AW
2195}
2196
7df9381b 2197static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
f16f39c3 2198{
7df9381b
AW
2199 char tmp[13];
2200
2201 sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
2202 addr->bus, addr->slot, addr->function);
2203
2204 return (strcmp(tmp, name) == 0);
f16f39c3
AW
2205}
2206
9ee27d73 2207static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
f16f39c3
AW
2208{
2209 VFIOGroup *group;
2210 struct vfio_pci_hot_reset_info *info;
2211 struct vfio_pci_dependent_device *devices;
2212 struct vfio_pci_hot_reset *reset;
2213 int32_t *fds;
2214 int ret, i, count;
2215 bool multi = false;
2216
df92ee44 2217 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
f16f39c3 2218
893bfc3c
C
2219 if (!single) {
2220 vfio_pci_pre_reset(vdev);
2221 }
b47d8efa 2222 vdev->vbasedev.needs_reset = false;
f16f39c3
AW
2223
2224 info = g_malloc0(sizeof(*info));
2225 info->argsz = sizeof(*info);
2226
5546a621 2227 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
f16f39c3
AW
2228 if (ret && errno != ENOSPC) {
2229 ret = -errno;
2230 if (!vdev->has_pm_reset) {
7df9381b
AW
2231 error_report("vfio: Cannot reset device %s, "
2232 "no available reset mechanism.", vdev->vbasedev.name);
f16f39c3
AW
2233 }
2234 goto out_single;
2235 }
2236
2237 count = info->count;
2238 info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
2239 info->argsz = sizeof(*info) + (count * sizeof(*devices));
2240 devices = &info->devices[0];
2241
5546a621 2242 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
f16f39c3
AW
2243 if (ret) {
2244 ret = -errno;
2245 error_report("vfio: hot reset info failed: %m");
2246 goto out_single;
2247 }
2248
df92ee44 2249 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
f16f39c3
AW
2250
2251 /* Verify that we have all the groups required */
2252 for (i = 0; i < info->count; i++) {
2253 PCIHostDeviceAddress host;
9ee27d73 2254 VFIOPCIDevice *tmp;
b47d8efa 2255 VFIODevice *vbasedev_iter;
f16f39c3
AW
2256
2257 host.domain = devices[i].segment;
2258 host.bus = devices[i].bus;
2259 host.slot = PCI_SLOT(devices[i].devfn);
2260 host.function = PCI_FUNC(devices[i].devfn);
2261
385f57cf 2262 trace_vfio_pci_hot_reset_dep_devices(host.domain,
f16f39c3
AW
2263 host.bus, host.slot, host.function, devices[i].group_id);
2264
7df9381b 2265 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
f16f39c3
AW
2266 continue;
2267 }
2268
62356b72 2269 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2270 if (group->groupid == devices[i].group_id) {
2271 break;
2272 }
2273 }
2274
2275 if (!group) {
2276 if (!vdev->has_pm_reset) {
df92ee44 2277 error_report("vfio: Cannot reset device %s, "
f16f39c3 2278 "depends on group %d which is not owned.",
df92ee44 2279 vdev->vbasedev.name, devices[i].group_id);
f16f39c3
AW
2280 }
2281 ret = -EPERM;
2282 goto out;
2283 }
2284
2285 /* Prep dependent devices for reset and clear our marker. */
b47d8efa 2286 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
7da624e2
AW
2287 if (!vbasedev_iter->dev->realized ||
2288 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
b47d8efa
EA
2289 continue;
2290 }
2291 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
7df9381b 2292 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
f16f39c3 2293 if (single) {
f16f39c3
AW
2294 ret = -EINVAL;
2295 goto out_single;
2296 }
2297 vfio_pci_pre_reset(tmp);
b47d8efa 2298 tmp->vbasedev.needs_reset = false;
f16f39c3
AW
2299 multi = true;
2300 break;
2301 }
2302 }
2303 }
2304
2305 if (!single && !multi) {
f16f39c3
AW
2306 ret = -EINVAL;
2307 goto out_single;
2308 }
2309
2310 /* Determine how many group fds need to be passed */
2311 count = 0;
62356b72 2312 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2313 for (i = 0; i < info->count; i++) {
2314 if (group->groupid == devices[i].group_id) {
2315 count++;
2316 break;
2317 }
2318 }
2319 }
2320
2321 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
2322 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
2323 fds = &reset->group_fds[0];
2324
2325 /* Fill in group fds */
62356b72 2326 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2327 for (i = 0; i < info->count; i++) {
2328 if (group->groupid == devices[i].group_id) {
2329 fds[reset->count++] = group->fd;
2330 break;
2331 }
2332 }
2333 }
2334
2335 /* Bus reset! */
5546a621 2336 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
f16f39c3
AW
2337 g_free(reset);
2338
df92ee44 2339 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
385f57cf 2340 ret ? "%m" : "Success");
f16f39c3
AW
2341
2342out:
2343 /* Re-enable INTx on affected devices */
2344 for (i = 0; i < info->count; i++) {
2345 PCIHostDeviceAddress host;
9ee27d73 2346 VFIOPCIDevice *tmp;
b47d8efa 2347 VFIODevice *vbasedev_iter;
f16f39c3
AW
2348
2349 host.domain = devices[i].segment;
2350 host.bus = devices[i].bus;
2351 host.slot = PCI_SLOT(devices[i].devfn);
2352 host.function = PCI_FUNC(devices[i].devfn);
2353
7df9381b 2354 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
f16f39c3
AW
2355 continue;
2356 }
2357
62356b72 2358 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2359 if (group->groupid == devices[i].group_id) {
2360 break;
2361 }
2362 }
2363
2364 if (!group) {
2365 break;
2366 }
2367
b47d8efa 2368 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
7da624e2
AW
2369 if (!vbasedev_iter->dev->realized ||
2370 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
b47d8efa
EA
2371 continue;
2372 }
2373 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
7df9381b 2374 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
f16f39c3
AW
2375 vfio_pci_post_reset(tmp);
2376 break;
2377 }
2378 }
2379 }
2380out_single:
893bfc3c
C
2381 if (!single) {
2382 vfio_pci_post_reset(vdev);
2383 }
f16f39c3
AW
2384 g_free(info);
2385
2386 return ret;
2387}
2388
2389/*
631ba5a1 2390 * We want to differentiate hot reset of multiple in-use devices vs hot reset
f16f39c3
AW
2391 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2392 * of doing hot resets when there is only a single device per bus. The in-use
2393 * here refers to how many VFIODevices are affected. A hot reset that affects
2394 * multiple devices, but only a single in-use device, means that we can call
2395 * it from our bus ->reset() callback since the extent is effectively a single
2396 * device. This allows us to make use of it in the hotplug path. When there
2397 * are multiple in-use devices, we can only trigger the hot reset during a
2398 * system reset and thus from our reset handler. We separate _one vs _multi
2399 * here so that we don't overlap and do a double reset on the system reset
2400 * path where both our reset handler and ->reset() callback are used. Calling
2401 * _one() will only do a hot reset for the one in-use devices case, calling
2402 * _multi() will do nothing if a _one() would have been sufficient.
2403 */
9ee27d73 2404static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
f16f39c3
AW
2405{
2406 return vfio_pci_hot_reset(vdev, true);
2407}
2408
b47d8efa 2409static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
f16f39c3 2410{
b47d8efa 2411 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
f16f39c3
AW
2412 return vfio_pci_hot_reset(vdev, false);
2413}
2414
b47d8efa
EA
2415static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
2416{
2417 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2418 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
2419 vbasedev->needs_reset = true;
2420 }
2421}
2422
e93b733b
KW
2423static Object *vfio_pci_get_object(VFIODevice *vbasedev)
2424{
2425 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2426
2427 return OBJECT(vdev);
2428}
2429
c5e2fb3c
KW
2430static bool vfio_msix_present(void *opaque, int version_id)
2431{
2432 PCIDevice *pdev = opaque;
2433
2434 return msix_present(pdev);
2435}
2436
2437const VMStateDescription vmstate_vfio_pci_config = {
2438 .name = "VFIOPCIDevice",
2439 .version_id = 1,
2440 .minimum_version_id = 1,
2441 .fields = (VMStateField[]) {
2442 VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice),
2443 VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, vfio_msix_present),
2444 VMSTATE_END_OF_LIST()
2445 }
2446};
2447
2448static void vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f)
2449{
2450 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2451
2452 vmstate_save_state(f, &vmstate_vfio_pci_config, vdev, NULL);
2453}
2454
2455static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f)
2456{
2457 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2458 PCIDevice *pdev = &vdev->pdev;
f36d4fb8
KJ
2459 pcibus_t old_addr[PCI_NUM_REGIONS - 1];
2460 int bar, ret;
2461
2462 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
2463 old_addr[bar] = pdev->io_regions[bar].addr;
2464 }
c5e2fb3c
KW
2465
2466 ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1);
2467 if (ret) {
2468 return ret;
2469 }
2470
2471 vfio_pci_write_config(pdev, PCI_COMMAND,
2472 pci_get_word(pdev->config + PCI_COMMAND), 2);
2473
f36d4fb8
KJ
2474 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
2475 /*
2476 * The address may not be changed in some scenarios
2477 * (e.g. the VF driver isn't loaded in VM).
2478 */
2479 if (old_addr[bar] != pdev->io_regions[bar].addr &&
2480 vdev->bars[bar].region.size > 0 &&
8e3b0cbb 2481 vdev->bars[bar].region.size < qemu_real_host_page_size()) {
f36d4fb8
KJ
2482 vfio_sub_page_bar_update_mapping(pdev, bar);
2483 }
2484 }
2485
c5e2fb3c
KW
2486 if (msi_enabled(pdev)) {
2487 vfio_msi_enable(vdev);
2488 } else if (msix_enabled(pdev)) {
2489 vfio_msix_enable(vdev);
2490 }
2491
2492 return ret;
2493}
2494
b47d8efa
EA
2495static VFIODeviceOps vfio_pci_ops = {
2496 .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
2497 .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
870cb6f1 2498 .vfio_eoi = vfio_intx_eoi,
e93b733b 2499 .vfio_get_object = vfio_pci_get_object,
c5e2fb3c
KW
2500 .vfio_save_config = vfio_pci_save_config,
2501 .vfio_load_config = vfio_pci_load_config,
b47d8efa
EA
2502};
2503
cde4279b 2504int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
e593c021
AW
2505{
2506 VFIODevice *vbasedev = &vdev->vbasedev;
2507 struct vfio_region_info *reg_info;
2508 int ret;
2509
4225f2b6
AW
2510 ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
2511 if (ret) {
cde4279b
EA
2512 error_setg_errno(errp, -ret,
2513 "failed getting region info for VGA region index %d",
2514 VFIO_PCI_VGA_REGION_INDEX);
4225f2b6
AW
2515 return ret;
2516 }
e593c021 2517
4225f2b6
AW
2518 if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
2519 !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2520 reg_info->size < 0xbffff + 1) {
cde4279b
EA
2521 error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2522 (unsigned long)reg_info->flags,
2523 (unsigned long)reg_info->size);
4225f2b6
AW
2524 g_free(reg_info);
2525 return -EINVAL;
2526 }
e593c021 2527
4225f2b6 2528 vdev->vga = g_new0(VFIOVGA, 1);
e593c021 2529
4225f2b6
AW
2530 vdev->vga->fd_offset = reg_info->offset;
2531 vdev->vga->fd = vdev->vbasedev.fd;
e593c021 2532
4225f2b6 2533 g_free(reg_info);
e593c021 2534
4225f2b6
AW
2535 vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2536 vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2537 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
e593c021 2538
182bca45
AW
2539 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2540 OBJECT(vdev), &vfio_vga_ops,
2541 &vdev->vga->region[QEMU_PCI_VGA_MEM],
2542 "vfio-vga-mmio@0xa0000",
2543 QEMU_PCI_VGA_MEM_SIZE);
2544
4225f2b6
AW
2545 vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2546 vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2547 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
e593c021 2548
182bca45
AW
2549 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2550 OBJECT(vdev), &vfio_vga_ops,
2551 &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
2552 "vfio-vga-io@0x3b0",
2553 QEMU_PCI_VGA_IO_LO_SIZE);
2554
4225f2b6
AW
2555 vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2556 vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2557 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
e593c021 2558
182bca45
AW
2559 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
2560 OBJECT(vdev), &vfio_vga_ops,
2561 &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
2562 "vfio-vga-io@0x3c0",
2563 QEMU_PCI_VGA_IO_HI_SIZE);
2564
2565 pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2566 &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2567 &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
2568
e593c021
AW
2569 return 0;
2570}
2571
e04cff9d 2572static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
65501a74 2573{
217e9fdc 2574 VFIODevice *vbasedev = &vdev->vbasedev;
46900226 2575 struct vfio_region_info *reg_info;
7b4b0e9e 2576 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
d13dd2d7 2577 int i, ret = -1;
65501a74
AW
2578
2579 /* Sanity check device */
d13dd2d7 2580 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2312d907 2581 error_setg(errp, "this isn't a PCI device");
e04cff9d 2582 return;
65501a74
AW
2583 }
2584
d13dd2d7 2585 if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2312d907
EA
2586 error_setg(errp, "unexpected number of io regions %u",
2587 vbasedev->num_regions);
e04cff9d 2588 return;
65501a74
AW
2589 }
2590
d13dd2d7 2591 if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2312d907 2592 error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
e04cff9d 2593 return;
65501a74
AW
2594 }
2595
2596 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
db0da029
AW
2597 char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
2598
2599 ret = vfio_region_setup(OBJECT(vdev), vbasedev,
2600 &vdev->bars[i].region, i, name);
2601 g_free(name);
2602
65501a74 2603 if (ret) {
2312d907 2604 error_setg_errno(errp, -ret, "failed to get region %d info", i);
e04cff9d 2605 return;
65501a74
AW
2606 }
2607
7076eabc 2608 QLIST_INIT(&vdev->bars[i].quirks);
46900226 2609 }
65501a74 2610
46900226
AW
2611 ret = vfio_get_region_info(vbasedev,
2612 VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
65501a74 2613 if (ret) {
2312d907 2614 error_setg_errno(errp, -ret, "failed to get config info");
e04cff9d 2615 return;
65501a74
AW
2616 }
2617
d13dd2d7 2618 trace_vfio_populate_device_config(vdev->vbasedev.name,
46900226
AW
2619 (unsigned long)reg_info->size,
2620 (unsigned long)reg_info->offset,
2621 (unsigned long)reg_info->flags);
65501a74 2622
46900226 2623 vdev->config_size = reg_info->size;
6a659bbf
AW
2624 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2625 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2626 }
46900226
AW
2627 vdev->config_offset = reg_info->offset;
2628
2629 g_free(reg_info);
65501a74 2630
e593c021 2631 if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2312d907 2632 ret = vfio_populate_vga(vdev, errp);
f15689c7 2633 if (ret) {
2312d907 2634 error_append_hint(errp, "device does not support "
cde4279b 2635 "requested feature x-vga\n");
e04cff9d 2636 return;
f15689c7 2637 }
f15689c7 2638 }
47cbe50c 2639
7b4b0e9e
VMP
2640 irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
2641
5546a621 2642 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
7b4b0e9e
VMP
2643 if (ret) {
2644 /* This can fail for an old kernel or legacy PCI dev */
772f1b37 2645 trace_vfio_populate_device_get_irq_info_failure(strerror(errno));
7b4b0e9e
VMP
2646 } else if (irq_info.count == 1) {
2647 vdev->pci_aer = true;
2648 } else {
e1eb292a
MA
2649 warn_report(VFIO_MSG_PREFIX
2650 "Could not enable error recovery for the device",
2651 vbasedev->name);
7b4b0e9e 2652 }
d13dd2d7
EA
2653}
2654
9ee27d73 2655static void vfio_put_device(VFIOPCIDevice *vdev)
65501a74 2656{
462037c9 2657 g_free(vdev->vbasedev.name);
db0da029
AW
2658 g_free(vdev->msix);
2659
d13dd2d7 2660 vfio_put_base_device(&vdev->vbasedev);
65501a74
AW
2661}
2662
7b4b0e9e
VMP
2663static void vfio_err_notifier_handler(void *opaque)
2664{
9ee27d73 2665 VFIOPCIDevice *vdev = opaque;
7b4b0e9e
VMP
2666
2667 if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2668 return;
2669 }
2670
2671 /*
2672 * TBD. Retrieve the error details and decide what action
2673 * needs to be taken. One of the actions could be to pass
2674 * the error to the guest and have the guest driver recover
2675 * from the error. This requires that PCIe capabilities be
2676 * exposed to the guest. For now, we just terminate the
2677 * guest to contain the error.
2678 */
2679
7df9381b 2680 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
7b4b0e9e 2681
ba29776f 2682 vm_stop(RUN_STATE_INTERNAL_ERROR);
7b4b0e9e
VMP
2683}
2684
2685/*
2686 * Registers error notifier for devices supporting error recovery.
2687 * If we encounter a failure in this function, we report an error
2688 * and continue after disabling error recovery support for the
2689 * device.
2690 */
9ee27d73 2691static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
7b4b0e9e 2692{
201a7331
EA
2693 Error *err = NULL;
2694 int32_t fd;
7b4b0e9e
VMP
2695
2696 if (!vdev->pci_aer) {
2697 return;
2698 }
2699
2700 if (event_notifier_init(&vdev->err_notifier, 0)) {
8fbf47c3 2701 error_report("vfio: Unable to init event notifier for error detection");
7b4b0e9e
VMP
2702 vdev->pci_aer = false;
2703 return;
2704 }
2705
201a7331
EA
2706 fd = event_notifier_get_fd(&vdev->err_notifier);
2707 qemu_set_fd_handler(fd, vfio_err_notifier_handler, NULL, vdev);
7b4b0e9e 2708
201a7331
EA
2709 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
2710 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
2711 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2712 qemu_set_fd_handler(fd, NULL, NULL, vdev);
7b4b0e9e
VMP
2713 event_notifier_cleanup(&vdev->err_notifier);
2714 vdev->pci_aer = false;
2715 }
7b4b0e9e
VMP
2716}
2717
9ee27d73 2718static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
7b4b0e9e 2719{
201a7331 2720 Error *err = NULL;
7b4b0e9e
VMP
2721
2722 if (!vdev->pci_aer) {
2723 return;
2724 }
2725
201a7331
EA
2726 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
2727 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
2728 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
7b4b0e9e 2729 }
7b4b0e9e
VMP
2730 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2731 NULL, NULL, vdev);
2732 event_notifier_cleanup(&vdev->err_notifier);
2733}
2734
47cbe50c
AW
2735static void vfio_req_notifier_handler(void *opaque)
2736{
2737 VFIOPCIDevice *vdev = opaque;
35c7cb4c 2738 Error *err = NULL;
47cbe50c
AW
2739
2740 if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2741 return;
2742 }
2743
a2596aee 2744 qdev_unplug(DEVICE(vdev), &err);
35c7cb4c 2745 if (err) {
e1eb292a 2746 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
35c7cb4c 2747 }
47cbe50c
AW
2748}
2749
2750static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2751{
2752 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
2753 .index = VFIO_PCI_REQ_IRQ_INDEX };
201a7331
EA
2754 Error *err = NULL;
2755 int32_t fd;
47cbe50c
AW
2756
2757 if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2758 return;
2759 }
2760
2761 if (ioctl(vdev->vbasedev.fd,
2762 VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
2763 return;
2764 }
2765
2766 if (event_notifier_init(&vdev->req_notifier, 0)) {
2767 error_report("vfio: Unable to init event notifier for device request");
2768 return;
2769 }
2770
201a7331
EA
2771 fd = event_notifier_get_fd(&vdev->req_notifier);
2772 qemu_set_fd_handler(fd, vfio_req_notifier_handler, NULL, vdev);
47cbe50c 2773
201a7331
EA
2774 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
2775 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
2776 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2777 qemu_set_fd_handler(fd, NULL, NULL, vdev);
47cbe50c
AW
2778 event_notifier_cleanup(&vdev->req_notifier);
2779 } else {
2780 vdev->req_enabled = true;
2781 }
47cbe50c
AW
2782}
2783
2784static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
2785{
201a7331 2786 Error *err = NULL;
47cbe50c
AW
2787
2788 if (!vdev->req_enabled) {
2789 return;
2790 }
2791
201a7331
EA
2792 if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
2793 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
2794 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
47cbe50c 2795 }
47cbe50c
AW
2796 qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
2797 NULL, NULL, vdev);
2798 event_notifier_cleanup(&vdev->req_notifier);
2799
2800 vdev->req_enabled = false;
2801}
2802
1a22aca1 2803static void vfio_realize(PCIDevice *pdev, Error **errp)
65501a74 2804{
01b46064 2805 VFIOPCIDevice *vdev = VFIO_PCI(pdev);
b47d8efa 2806 VFIODevice *vbasedev_iter;
65501a74 2807 VFIOGroup *group;
238e9172 2808 char *tmp, *subsys, group_path[PATH_MAX], *group_name;
ec3bcf42 2809 Error *err = NULL;
65501a74
AW
2810 ssize_t len;
2811 struct stat st;
2812 int groupid;
581406e0 2813 int i, ret;
238e9172 2814 bool is_mdev;
65501a74 2815
7df9381b 2816 if (!vdev->vbasedev.sysfsdev) {
4a946268
EA
2817 if (!(~vdev->host.domain || ~vdev->host.bus ||
2818 ~vdev->host.slot || ~vdev->host.function)) {
2819 error_setg(errp, "No provided host device");
6e4e6f0d
DJS
2820 error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
2821 "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
4a946268
EA
2822 return;
2823 }
7df9381b
AW
2824 vdev->vbasedev.sysfsdev =
2825 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2826 vdev->host.domain, vdev->host.bus,
2827 vdev->host.slot, vdev->host.function);
2828 }
2829
2830 if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
1a22aca1 2831 error_setg_errno(errp, errno, "no such host device");
c3b8e3e0 2832 error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.sysfsdev);
1a22aca1 2833 return;
65501a74
AW
2834 }
2835
3e015d81 2836 vdev->vbasedev.name = g_path_get_basename(vdev->vbasedev.sysfsdev);
b47d8efa 2837 vdev->vbasedev.ops = &vfio_pci_ops;
462037c9 2838 vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
a2596aee 2839 vdev->vbasedev.dev = DEVICE(vdev);
462037c9 2840
7df9381b
AW
2841 tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
2842 len = readlink(tmp, group_path, sizeof(group_path));
2843 g_free(tmp);
65501a74 2844
7df9381b 2845 if (len <= 0 || len >= sizeof(group_path)) {
1a22aca1
EA
2846 error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG,
2847 "no iommu_group found");
426ec904 2848 goto error;
65501a74
AW
2849 }
2850
7df9381b 2851 group_path[len] = 0;
65501a74 2852
7df9381b 2853 group_name = basename(group_path);
65501a74 2854 if (sscanf(group_name, "%d", &groupid) != 1) {
1a22aca1 2855 error_setg_errno(errp, errno, "failed to read %s", group_path);
426ec904 2856 goto error;
65501a74
AW
2857 }
2858
1a22aca1 2859 trace_vfio_realize(vdev->vbasedev.name, groupid);
65501a74 2860
1a22aca1 2861 group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp);
65501a74 2862 if (!group) {
426ec904 2863 goto error;
65501a74
AW
2864 }
2865
b47d8efa
EA
2866 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2867 if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
1a22aca1 2868 error_setg(errp, "device is already attached");
65501a74 2869 vfio_put_group(group);
426ec904 2870 goto error;
65501a74
AW
2871 }
2872 }
2873
238e9172 2874 /*
aff92b82 2875 * Mediated devices *might* operate compatibly with discarding of RAM, but
238e9172
AW
2876 * we cannot know for certain, it depends on whether the mdev vendor driver
2877 * stays in sync with the active working set of the guest driver. Prevent
2878 * the x-balloon-allowed option unless this is minimally an mdev device.
2879 */
2880 tmp = g_strdup_printf("%s/subsystem", vdev->vbasedev.sysfsdev);
2881 subsys = realpath(tmp, NULL);
2882 g_free(tmp);
a1c0f886 2883 is_mdev = subsys && (strcmp(subsys, "/sys/bus/mdev") == 0);
238e9172
AW
2884 free(subsys);
2885
2886 trace_vfio_mdev(vdev->vbasedev.name, is_mdev);
2887
aff92b82 2888 if (vdev->vbasedev.ram_block_discard_allowed && !is_mdev) {
238e9172
AW
2889 error_setg(errp, "x-balloon-allowed only potentially compatible "
2890 "with mdev devices");
2891 vfio_put_group(group);
2892 goto error;
2893 }
2894
1a22aca1 2895 ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp);
65501a74 2896 if (ret) {
65501a74 2897 vfio_put_group(group);
426ec904 2898 goto error;
65501a74
AW
2899 }
2900
e04cff9d
EA
2901 vfio_populate_device(vdev, &err);
2902 if (err) {
2903 error_propagate(errp, err);
2312d907 2904 goto error;
217e9fdc
PB
2905 }
2906
65501a74 2907 /* Get a copy of config space */
5546a621 2908 ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
65501a74
AW
2909 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
2910 vdev->config_offset);
2911 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
2912 ret = ret < 0 ? -errno : -EFAULT;
1a22aca1 2913 error_setg_errno(errp, -ret, "failed to read device config space");
426ec904 2914 goto error;
65501a74
AW
2915 }
2916
4b5d5e87
AW
2917 /* vfio emulates a lot for us, but some bits need extra love */
2918 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
2919
2920 /* QEMU can choose to expose the ROM or not */
2921 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
04f336b0
AW
2922 /* QEMU can also add or extend BARs */
2923 memset(vdev->emulated_config_bits + PCI_BASE_ADDRESS_0, 0xff, 6 * 4);
4b5d5e87 2924
89dcccc5
AW
2925 /*
2926 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
2927 * device ID is managed by the vendor and need only be a 16-bit value.
2928 * Allow any 16-bit value for subsystem so they can be hidden or changed.
2929 */
2930 if (vdev->vendor_id != PCI_ANY_ID) {
2931 if (vdev->vendor_id >= 0xffff) {
1a22aca1 2932 error_setg(errp, "invalid PCI vendor ID provided");
426ec904 2933 goto error;
89dcccc5
AW
2934 }
2935 vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
2936 trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
2937 } else {
2938 vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2939 }
2940
2941 if (vdev->device_id != PCI_ANY_ID) {
2942 if (vdev->device_id > 0xffff) {
1a22aca1 2943 error_setg(errp, "invalid PCI device ID provided");
426ec904 2944 goto error;
89dcccc5
AW
2945 }
2946 vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
2947 trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
2948 } else {
2949 vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2950 }
2951
2952 if (vdev->sub_vendor_id != PCI_ANY_ID) {
2953 if (vdev->sub_vendor_id > 0xffff) {
1a22aca1 2954 error_setg(errp, "invalid PCI subsystem vendor ID provided");
426ec904 2955 goto error;
89dcccc5
AW
2956 }
2957 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
2958 vdev->sub_vendor_id, ~0);
2959 trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
2960 vdev->sub_vendor_id);
2961 }
2962
2963 if (vdev->sub_device_id != PCI_ANY_ID) {
2964 if (vdev->sub_device_id > 0xffff) {
1a22aca1 2965 error_setg(errp, "invalid PCI subsystem device ID provided");
426ec904 2966 goto error;
89dcccc5
AW
2967 }
2968 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
2969 trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
2970 vdev->sub_device_id);
2971 }
ff635e37 2972
4b5d5e87
AW
2973 /* QEMU can change multi-function devices to single function, or reverse */
2974 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
2975 PCI_HEADER_TYPE_MULTI_FUNCTION;
2976
187d6232
AW
2977 /* Restore or clear multifunction, this is always controlled by QEMU */
2978 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
2979 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
2980 } else {
2981 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
2982 }
2983
65501a74
AW
2984 /*
2985 * Clear host resource mapping info. If we choose not to register a
2986 * BAR, such as might be the case with the option ROM, we can get
2987 * confusing, unwritable, residual addresses from the host here.
2988 */
2989 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
2990 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
2991
6f864e6e 2992 vfio_pci_size_rom(vdev);
65501a74 2993
89d5202e
AW
2994 vfio_bars_prepare(vdev);
2995
ec3bcf42
EA
2996 vfio_msix_early_setup(vdev, &err);
2997 if (err) {
2998 error_propagate(errp, err);
008d0e2d 2999 goto error;
65501a74
AW
3000 }
3001
3a286732 3002 vfio_bars_register(vdev);
65501a74 3003
1a22aca1 3004 ret = vfio_add_capabilities(vdev, errp);
65501a74
AW
3005 if (ret) {
3006 goto out_teardown;
3007 }
3008
182bca45
AW
3009 if (vdev->vga) {
3010 vfio_vga_quirk_setup(vdev);
3011 }
3012
581406e0
AW
3013 for (i = 0; i < PCI_ROM_SLOT; i++) {
3014 vfio_bar_quirk_setup(vdev, i);
3015 }
3016
6ced0bba
AW
3017 if (!vdev->igd_opregion &&
3018 vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
3019 struct vfio_region_info *opregion;
3020
3021 if (vdev->pdev.qdev.hotplugged) {
1a22aca1 3022 error_setg(errp,
426ec904
EA
3023 "cannot support IGD OpRegion feature on hotplugged "
3024 "device");
6ced0bba
AW
3025 goto out_teardown;
3026 }
3027
3028 ret = vfio_get_dev_region_info(&vdev->vbasedev,
3029 VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
3030 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
3031 if (ret) {
1a22aca1 3032 error_setg_errno(errp, -ret,
426ec904 3033 "does not support requested IGD OpRegion feature");
6ced0bba
AW
3034 goto out_teardown;
3035 }
3036
1a22aca1 3037 ret = vfio_pci_igd_opregion_init(vdev, opregion, errp);
6ced0bba
AW
3038 g_free(opregion);
3039 if (ret) {
6ced0bba
AW
3040 goto out_teardown;
3041 }
3042 }
3043
4b5d5e87
AW
3044 /* QEMU emulates all of MSI & MSIX */
3045 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
3046 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
3047 MSIX_CAP_LENGTH);
3048 }
3049
3050 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
3051 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
3052 vdev->msi_cap_size);
3053 }
3054
65501a74 3055 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
bc72ad67 3056 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
ea486926 3057 vfio_intx_mmap_enable, vdev);
ad54dbd8
DG
3058 pci_device_set_intx_routing_notifier(&vdev->pdev,
3059 vfio_intx_routing_notifier);
c5478fea
DG
3060 vdev->irqchip_change_notifier.notify = vfio_irqchip_change;
3061 kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier);
1a22aca1 3062 ret = vfio_intx_enable(vdev, errp);
65501a74 3063 if (ret) {
c5478fea 3064 goto out_deregister;
65501a74
AW
3065 }
3066 }
3067
a9994687
GH
3068 if (vdev->display != ON_OFF_AUTO_OFF) {
3069 ret = vfio_display_probe(vdev, errp);
3070 if (ret) {
c5478fea 3071 goto out_deregister;
a9994687
GH
3072 }
3073 }
b290659f
GH
3074 if (vdev->enable_ramfb && vdev->dpy == NULL) {
3075 error_setg(errp, "ramfb=on requires display=on");
c5478fea 3076 goto out_deregister;
b290659f 3077 }
c62a0c7c
GH
3078 if (vdev->display_xres || vdev->display_yres) {
3079 if (vdev->dpy == NULL) {
3080 error_setg(errp, "xres and yres properties require display=on");
c5478fea 3081 goto out_deregister;
c62a0c7c
GH
3082 }
3083 if (vdev->dpy->edid_regs == NULL) {
3084 error_setg(errp, "xres and yres properties need edid support");
c5478fea 3085 goto out_deregister;
c62a0c7c
GH
3086 }
3087 }
a9994687 3088
936555bc 3089 if (vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID)) {
ec132efa
AK
3090 ret = vfio_pci_nvidia_v100_ram_init(vdev, errp);
3091 if (ret && ret != -ENODEV) {
3092 error_report("Failed to setup NVIDIA V100 GPU RAM");
3093 }
3094 }
3095
936555bc 3096 if (vfio_pci_is(vdev, PCI_VENDOR_ID_IBM, PCI_ANY_ID)) {
ec132efa
AK
3097 ret = vfio_pci_nvlink2_init(vdev, errp);
3098 if (ret && ret != -ENODEV) {
3099 error_report("Failed to setup NVlink2 bridge");
3100 }
3101 }
3102
a2265105
KW
3103 if (!pdev->failover_pair_id) {
3104 ret = vfio_migration_probe(&vdev->vbasedev, errp);
3105 if (ret) {
3106 error_report("%s: Migration disabled", vdev->vbasedev.name);
3107 }
3108 }
3109
7b4b0e9e 3110 vfio_register_err_notifier(vdev);
47cbe50c 3111 vfio_register_req_notifier(vdev);
c9c50009 3112 vfio_setup_resetfn_quirk(vdev);
c29029dd 3113
1a22aca1 3114 return;
65501a74 3115
c5478fea 3116out_deregister:
65501a74 3117 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
c5478fea
DG
3118 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
3119out_teardown:
65501a74 3120 vfio_teardown_msi(vdev);
2d82f8a3 3121 vfio_bars_exit(vdev);
426ec904 3122error:
c3b8e3e0 3123 error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name);
77a10d04
PB
3124}
3125
3126static void vfio_instance_finalize(Object *obj)
3127{
01b46064 3128 VFIOPCIDevice *vdev = VFIO_PCI(obj);
77a10d04
PB
3129 VFIOGroup *group = vdev->vbasedev.group;
3130
a9994687 3131 vfio_display_finalize(vdev);
2d82f8a3 3132 vfio_bars_finalize(vdev);
4b5d5e87 3133 g_free(vdev->emulated_config_bits);
77a10d04 3134 g_free(vdev->rom);
c4c45e94
AW
3135 /*
3136 * XXX Leaking igd_opregion is not an oversight, we can't remove the
3137 * fw_cfg entry therefore leaking this allocation seems like the safest
3138 * option.
3139 *
3140 * g_free(vdev->igd_opregion);
3141 */
65501a74
AW
3142 vfio_put_device(vdev);
3143 vfio_put_group(group);
65501a74
AW
3144}
3145
3146static void vfio_exitfn(PCIDevice *pdev)
3147{
01b46064 3148 VFIOPCIDevice *vdev = VFIO_PCI(pdev);
65501a74 3149
47cbe50c 3150 vfio_unregister_req_notifier(vdev);
7b4b0e9e 3151 vfio_unregister_err_notifier(vdev);
65501a74 3152 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
0446f812
PX
3153 if (vdev->irqchip_change_notifier.notify) {
3154 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
3155 }
65501a74 3156 vfio_disable_interrupts(vdev);
ea486926 3157 if (vdev->intx.mmap_timer) {
bc72ad67 3158 timer_free(vdev->intx.mmap_timer);
ea486926 3159 }
65501a74 3160 vfio_teardown_msi(vdev);
2d82f8a3 3161 vfio_bars_exit(vdev);
a2265105 3162 vfio_migration_finalize(&vdev->vbasedev);
65501a74
AW
3163}
3164
3165static void vfio_pci_reset(DeviceState *dev)
3166{
01b46064 3167 VFIOPCIDevice *vdev = VFIO_PCI(dev);
65501a74 3168
df92ee44 3169 trace_vfio_pci_reset(vdev->vbasedev.name);
5834a83f 3170
f16f39c3 3171 vfio_pci_pre_reset(vdev);
ba661818 3172
8983e3e3
TZ
3173 if (vdev->display != ON_OFF_AUTO_OFF) {
3174 vfio_display_reset(vdev);
3175 }
3176
5655f931
AW
3177 if (vdev->resetfn && !vdev->resetfn(vdev)) {
3178 goto post_reset;
3179 }
3180
b47d8efa
EA
3181 if (vdev->vbasedev.reset_works &&
3182 (vdev->has_flr || !vdev->has_pm_reset) &&
5546a621 3183 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
df92ee44 3184 trace_vfio_pci_reset_flr(vdev->vbasedev.name);
f16f39c3 3185 goto post_reset;
ba661818
AW
3186 }
3187
f16f39c3
AW
3188 /* See if we can do our own bus reset */
3189 if (!vfio_pci_hot_reset_one(vdev)) {
3190 goto post_reset;
3191 }
5834a83f 3192
f16f39c3 3193 /* If nothing else works and the device supports PM reset, use it */
b47d8efa 3194 if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
5546a621 3195 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
df92ee44 3196 trace_vfio_pci_reset_pm(vdev->vbasedev.name);
f16f39c3 3197 goto post_reset;
65501a74 3198 }
5834a83f 3199
f16f39c3
AW
3200post_reset:
3201 vfio_pci_post_reset(vdev);
65501a74
AW
3202}
3203
abc5b3bf
GA
3204static void vfio_instance_init(Object *obj)
3205{
3206 PCIDevice *pci_dev = PCI_DEVICE(obj);
01b46064 3207 VFIOPCIDevice *vdev = VFIO_PCI(obj);
abc5b3bf
GA
3208
3209 device_add_bootindex_property(obj, &vdev->bootindex,
3210 "bootindex", NULL,
40c2281c 3211 &pci_dev->qdev);
4a946268
EA
3212 vdev->host.domain = ~0U;
3213 vdev->host.bus = ~0U;
3214 vdev->host.slot = ~0U;
3215 vdev->host.function = ~0U;
dfbee78d
AW
3216
3217 vdev->nv_gpudirect_clique = 0xFF;
d61a363d
YB
3218
3219 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
3220 * line, therefore, no need to wait to realize like other devices */
3221 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
abc5b3bf
GA
3222}
3223
65501a74 3224static Property vfio_pci_dev_properties[] = {
9ee27d73 3225 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
7df9381b 3226 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
bb0990d1
KW
3227 DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice,
3228 vbasedev.pre_copy_dirty_page_tracking,
3229 ON_OFF_AUTO_ON),
a9994687 3230 DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice,
8151a9c5 3231 display, ON_OFF_AUTO_OFF),
c62a0c7c
GH
3232 DEFINE_PROP_UINT32("xres", VFIOPCIDevice, display_xres, 0),
3233 DEFINE_PROP_UINT32("yres", VFIOPCIDevice, display_yres, 0),
9ee27d73 3234 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
ea486926 3235 intx.mmap_timeout, 1100),
9ee27d73 3236 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
f15689c7 3237 VFIO_FEATURE_ENABLE_VGA_BIT, false),
47cbe50c
AW
3238 DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
3239 VFIO_FEATURE_ENABLE_REQ_BIT, true),
6ced0bba
AW
3240 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
3241 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
cf254988
AW
3242 DEFINE_PROP_BOOL("x-enable-migration", VFIOPCIDevice,
3243 vbasedev.enable_migration, false),
5e15d79b 3244 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
238e9172 3245 DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
aff92b82 3246 vbasedev.ram_block_discard_allowed, false),
46746dba
AW
3247 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
3248 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
3249 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
db32d0f4
AW
3250 DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice,
3251 no_geforce_quirks, false),
c958c51d
AW
3252 DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice, no_kvm_ioeventfd,
3253 false),
2b1dbd0d
AW
3254 DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice, no_vfio_ioeventfd,
3255 false),
89dcccc5
AW
3256 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
3257 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
3258 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
3259 sub_vendor_id, PCI_ANY_ID),
3260 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
3261 sub_device_id, PCI_ANY_ID),
c4c45e94 3262 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
dfbee78d
AW
3263 DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice,
3264 nv_gpudirect_clique,
3265 qdev_prop_nv_gpudirect_clique, uint8_t),
89d5202e
AW
3266 DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo,
3267 OFF_AUTOPCIBAR_OFF),
65501a74
AW
3268 /*
3269 * TODO - support passed fds... is this necessary?
9ee27d73
EA
3270 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
3271 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
65501a74
AW
3272 */
3273 DEFINE_PROP_END_OF_LIST(),
3274};
3275
65501a74
AW
3276static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
3277{
3278 DeviceClass *dc = DEVICE_CLASS(klass);
3279 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
3280
3281 dc->reset = vfio_pci_reset;
4f67d30b 3282 device_class_set_props(dc, vfio_pci_dev_properties);
d9f0e638 3283 dc->desc = "VFIO-based PCI device assignment";
125ee0ed 3284 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1a22aca1 3285 pdc->realize = vfio_realize;
65501a74
AW
3286 pdc->exit = vfio_exitfn;
3287 pdc->config_read = vfio_pci_read_config;
3288 pdc->config_write = vfio_pci_write_config;
3289}
3290
3291static const TypeInfo vfio_pci_dev_info = {
2683ccd5 3292 .name = TYPE_VFIO_PCI,
65501a74 3293 .parent = TYPE_PCI_DEVICE,
9ee27d73 3294 .instance_size = sizeof(VFIOPCIDevice),
65501a74 3295 .class_init = vfio_pci_dev_class_init,
abc5b3bf 3296 .instance_init = vfio_instance_init,
77a10d04 3297 .instance_finalize = vfio_instance_finalize,
a5fa336f
EH
3298 .interfaces = (InterfaceInfo[]) {
3299 { INTERFACE_PCIE_DEVICE },
3300 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
3301 { }
3302 },
65501a74
AW
3303};
3304
b290659f
GH
3305static Property vfio_pci_dev_nohotplug_properties[] = {
3306 DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice, enable_ramfb, false),
3307 DEFINE_PROP_END_OF_LIST(),
3308};
3309
3310static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass, void *data)
3311{
3312 DeviceClass *dc = DEVICE_CLASS(klass);
3313
4f67d30b 3314 device_class_set_props(dc, vfio_pci_dev_nohotplug_properties);
b290659f
GH
3315 dc->hotpluggable = false;
3316}
3317
3318static const TypeInfo vfio_pci_nohotplug_dev_info = {
f75ca627 3319 .name = TYPE_VFIO_PCI_NOHOTPLUG,
0c0c8f8a 3320 .parent = TYPE_VFIO_PCI,
b290659f
GH
3321 .instance_size = sizeof(VFIOPCIDevice),
3322 .class_init = vfio_pci_nohotplug_dev_class_init,
3323};
3324
65501a74
AW
3325static void register_vfio_pci_dev_type(void)
3326{
3327 type_register_static(&vfio_pci_dev_info);
b290659f 3328 type_register_static(&vfio_pci_nohotplug_dev_info);
65501a74
AW
3329}
3330
3331type_init(register_vfio_pci_dev_type)