]> git.proxmox.com Git - mirror_qemu.git/blob - hw/vfio/pci.c
Merge remote-tracking branch 'remotes/kraxel/tags/pull-ipxe-20160704-1' into staging
[mirror_qemu.git] / hw / vfio / pci.c
1 /*
2 * vfio based device assignment support
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
21 #include "qemu/osdep.h"
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
24
25 #include "hw/pci/msi.h"
26 #include "hw/pci/msix.h"
27 #include "hw/pci/pci_bridge.h"
28 #include "qemu/error-report.h"
29 #include "qemu/range.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/sysemu.h"
32 #include "pci.h"
33 #include "trace.h"
34
35 #define MSIX_CAP_LENGTH 12
36
37 static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
38 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
39
40 /*
41 * Disabling BAR mmaping can be slow, but toggling it around INTx can
42 * also be a huge overhead. We try to get the best of both worlds by
43 * waiting until an interrupt to disable mmaps (subsequent transitions
44 * to the same state are effectively no overhead). If the interrupt has
45 * been serviced and the time gap is long enough, we re-enable mmaps for
46 * performance. This works well for things like graphics cards, which
47 * may not use their interrupt at all and are penalized to an unusable
48 * level by read/write BAR traps. Other devices, like NICs, have more
49 * regular interrupts and see much better latency by staying in non-mmap
50 * mode. We therefore set the default mmap_timeout such that a ping
51 * is just enough to keep the mmap disabled. Users can experiment with
52 * other options with the x-intx-mmap-timeout-ms parameter (a value of
53 * zero disables the timer).
54 */
55 static void vfio_intx_mmap_enable(void *opaque)
56 {
57 VFIOPCIDevice *vdev = opaque;
58
59 if (vdev->intx.pending) {
60 timer_mod(vdev->intx.mmap_timer,
61 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
62 return;
63 }
64
65 vfio_mmap_set_enabled(vdev, true);
66 }
67
68 static void vfio_intx_interrupt(void *opaque)
69 {
70 VFIOPCIDevice *vdev = opaque;
71
72 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
73 return;
74 }
75
76 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
77
78 vdev->intx.pending = true;
79 pci_irq_assert(&vdev->pdev);
80 vfio_mmap_set_enabled(vdev, false);
81 if (vdev->intx.mmap_timeout) {
82 timer_mod(vdev->intx.mmap_timer,
83 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
84 }
85 }
86
87 static void vfio_intx_eoi(VFIODevice *vbasedev)
88 {
89 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
90
91 if (!vdev->intx.pending) {
92 return;
93 }
94
95 trace_vfio_intx_eoi(vbasedev->name);
96
97 vdev->intx.pending = false;
98 pci_irq_deassert(&vdev->pdev);
99 vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
100 }
101
102 static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev)
103 {
104 #ifdef CONFIG_KVM
105 struct kvm_irqfd irqfd = {
106 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
107 .gsi = vdev->intx.route.irq,
108 .flags = KVM_IRQFD_FLAG_RESAMPLE,
109 };
110 struct vfio_irq_set *irq_set;
111 int ret, argsz;
112 int32_t *pfd;
113
114 if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
115 vdev->intx.route.mode != PCI_INTX_ENABLED ||
116 !kvm_resamplefds_enabled()) {
117 return;
118 }
119
120 /* Get to a known interrupt state */
121 qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
122 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
123 vdev->intx.pending = false;
124 pci_irq_deassert(&vdev->pdev);
125
126 /* Get an eventfd for resample/unmask */
127 if (event_notifier_init(&vdev->intx.unmask, 0)) {
128 error_report("vfio: Error: event_notifier_init failed eoi");
129 goto fail;
130 }
131
132 /* KVM triggers it, VFIO listens for it */
133 irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
134
135 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
136 error_report("vfio: Error: Failed to setup resample irqfd: %m");
137 goto fail_irqfd;
138 }
139
140 argsz = sizeof(*irq_set) + sizeof(*pfd);
141
142 irq_set = g_malloc0(argsz);
143 irq_set->argsz = argsz;
144 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
145 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
146 irq_set->start = 0;
147 irq_set->count = 1;
148 pfd = (int32_t *)&irq_set->data;
149
150 *pfd = irqfd.resamplefd;
151
152 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
153 g_free(irq_set);
154 if (ret) {
155 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
156 goto fail_vfio;
157 }
158
159 /* Let'em rip */
160 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
161
162 vdev->intx.kvm_accel = true;
163
164 trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
165
166 return;
167
168 fail_vfio:
169 irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
170 kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
171 fail_irqfd:
172 event_notifier_cleanup(&vdev->intx.unmask);
173 fail:
174 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
175 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
176 #endif
177 }
178
179 static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
180 {
181 #ifdef CONFIG_KVM
182 struct kvm_irqfd irqfd = {
183 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
184 .gsi = vdev->intx.route.irq,
185 .flags = KVM_IRQFD_FLAG_DEASSIGN,
186 };
187
188 if (!vdev->intx.kvm_accel) {
189 return;
190 }
191
192 /*
193 * Get to a known state, hardware masked, QEMU ready to accept new
194 * interrupts, QEMU IRQ de-asserted.
195 */
196 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
197 vdev->intx.pending = false;
198 pci_irq_deassert(&vdev->pdev);
199
200 /* Tell KVM to stop listening for an INTx irqfd */
201 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
202 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
203 }
204
205 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
206 event_notifier_cleanup(&vdev->intx.unmask);
207
208 /* QEMU starts listening for interrupt events. */
209 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
210
211 vdev->intx.kvm_accel = false;
212
213 /* If we've missed an event, let it re-fire through QEMU */
214 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
215
216 trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
217 #endif
218 }
219
220 static void vfio_intx_update(PCIDevice *pdev)
221 {
222 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
223 PCIINTxRoute route;
224
225 if (vdev->interrupt != VFIO_INT_INTx) {
226 return;
227 }
228
229 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
230
231 if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
232 return; /* Nothing changed */
233 }
234
235 trace_vfio_intx_update(vdev->vbasedev.name,
236 vdev->intx.route.irq, route.irq);
237
238 vfio_intx_disable_kvm(vdev);
239
240 vdev->intx.route = route;
241
242 if (route.mode != PCI_INTX_ENABLED) {
243 return;
244 }
245
246 vfio_intx_enable_kvm(vdev);
247
248 /* Re-enable the interrupt in cased we missed an EOI */
249 vfio_intx_eoi(&vdev->vbasedev);
250 }
251
252 static int vfio_intx_enable(VFIOPCIDevice *vdev)
253 {
254 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
255 int ret, argsz;
256 struct vfio_irq_set *irq_set;
257 int32_t *pfd;
258
259 if (!pin) {
260 return 0;
261 }
262
263 vfio_disable_interrupts(vdev);
264
265 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
266 pci_config_set_interrupt_pin(vdev->pdev.config, pin);
267
268 #ifdef CONFIG_KVM
269 /*
270 * Only conditional to avoid generating error messages on platforms
271 * where we won't actually use the result anyway.
272 */
273 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
274 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
275 vdev->intx.pin);
276 }
277 #endif
278
279 ret = event_notifier_init(&vdev->intx.interrupt, 0);
280 if (ret) {
281 error_report("vfio: Error: event_notifier_init failed");
282 return ret;
283 }
284
285 argsz = sizeof(*irq_set) + sizeof(*pfd);
286
287 irq_set = g_malloc0(argsz);
288 irq_set->argsz = argsz;
289 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
290 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
291 irq_set->start = 0;
292 irq_set->count = 1;
293 pfd = (int32_t *)&irq_set->data;
294
295 *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
296 qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
297
298 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
299 g_free(irq_set);
300 if (ret) {
301 error_report("vfio: Error: Failed to setup INTx fd: %m");
302 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
303 event_notifier_cleanup(&vdev->intx.interrupt);
304 return -errno;
305 }
306
307 vfio_intx_enable_kvm(vdev);
308
309 vdev->interrupt = VFIO_INT_INTx;
310
311 trace_vfio_intx_enable(vdev->vbasedev.name);
312
313 return 0;
314 }
315
316 static void vfio_intx_disable(VFIOPCIDevice *vdev)
317 {
318 int fd;
319
320 timer_del(vdev->intx.mmap_timer);
321 vfio_intx_disable_kvm(vdev);
322 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
323 vdev->intx.pending = false;
324 pci_irq_deassert(&vdev->pdev);
325 vfio_mmap_set_enabled(vdev, true);
326
327 fd = event_notifier_get_fd(&vdev->intx.interrupt);
328 qemu_set_fd_handler(fd, NULL, NULL, vdev);
329 event_notifier_cleanup(&vdev->intx.interrupt);
330
331 vdev->interrupt = VFIO_INT_NONE;
332
333 trace_vfio_intx_disable(vdev->vbasedev.name);
334 }
335
336 /*
337 * MSI/X
338 */
339 static void vfio_msi_interrupt(void *opaque)
340 {
341 VFIOMSIVector *vector = opaque;
342 VFIOPCIDevice *vdev = vector->vdev;
343 MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
344 void (*notify)(PCIDevice *dev, unsigned vector);
345 MSIMessage msg;
346 int nr = vector - vdev->msi_vectors;
347
348 if (!event_notifier_test_and_clear(&vector->interrupt)) {
349 return;
350 }
351
352 if (vdev->interrupt == VFIO_INT_MSIX) {
353 get_msg = msix_get_message;
354 notify = msix_notify;
355
356 /* A masked vector firing needs to use the PBA, enable it */
357 if (msix_is_masked(&vdev->pdev, nr)) {
358 set_bit(nr, vdev->msix->pending);
359 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
360 trace_vfio_msix_pba_enable(vdev->vbasedev.name);
361 }
362 } else if (vdev->interrupt == VFIO_INT_MSI) {
363 get_msg = msi_get_message;
364 notify = msi_notify;
365 } else {
366 abort();
367 }
368
369 msg = get_msg(&vdev->pdev, nr);
370 trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
371 notify(&vdev->pdev, nr);
372 }
373
374 static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
375 {
376 struct vfio_irq_set *irq_set;
377 int ret = 0, i, argsz;
378 int32_t *fds;
379
380 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
381
382 irq_set = g_malloc0(argsz);
383 irq_set->argsz = argsz;
384 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
385 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
386 irq_set->start = 0;
387 irq_set->count = vdev->nr_vectors;
388 fds = (int32_t *)&irq_set->data;
389
390 for (i = 0; i < vdev->nr_vectors; i++) {
391 int fd = -1;
392
393 /*
394 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
395 * bits, therefore we always use the KVM signaling path when setup.
396 * MSI-X mask and pending bits are emulated, so we want to use the
397 * KVM signaling path only when configured and unmasked.
398 */
399 if (vdev->msi_vectors[i].use) {
400 if (vdev->msi_vectors[i].virq < 0 ||
401 (msix && msix_is_masked(&vdev->pdev, i))) {
402 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
403 } else {
404 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
405 }
406 }
407
408 fds[i] = fd;
409 }
410
411 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
412
413 g_free(irq_set);
414
415 return ret;
416 }
417
418 static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
419 MSIMessage *msg, bool msix)
420 {
421 int virq;
422
423 if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi) || !msg) {
424 return;
425 }
426
427 if (event_notifier_init(&vector->kvm_interrupt, 0)) {
428 return;
429 }
430
431 virq = kvm_irqchip_add_msi_route(kvm_state, *msg, &vdev->pdev);
432 if (virq < 0) {
433 event_notifier_cleanup(&vector->kvm_interrupt);
434 return;
435 }
436
437 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
438 NULL, virq) < 0) {
439 kvm_irqchip_release_virq(kvm_state, virq);
440 event_notifier_cleanup(&vector->kvm_interrupt);
441 return;
442 }
443
444 vector->virq = virq;
445 }
446
447 static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
448 {
449 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
450 vector->virq);
451 kvm_irqchip_release_virq(kvm_state, vector->virq);
452 vector->virq = -1;
453 event_notifier_cleanup(&vector->kvm_interrupt);
454 }
455
456 static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
457 PCIDevice *pdev)
458 {
459 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
460 }
461
462 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
463 MSIMessage *msg, IOHandler *handler)
464 {
465 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
466 VFIOMSIVector *vector;
467 int ret;
468
469 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
470
471 vector = &vdev->msi_vectors[nr];
472
473 if (!vector->use) {
474 vector->vdev = vdev;
475 vector->virq = -1;
476 if (event_notifier_init(&vector->interrupt, 0)) {
477 error_report("vfio: Error: event_notifier_init failed");
478 }
479 vector->use = true;
480 msix_vector_use(pdev, nr);
481 }
482
483 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
484 handler, NULL, vector);
485
486 /*
487 * Attempt to enable route through KVM irqchip,
488 * default to userspace handling if unavailable.
489 */
490 if (vector->virq >= 0) {
491 if (!msg) {
492 vfio_remove_kvm_msi_virq(vector);
493 } else {
494 vfio_update_kvm_msi_virq(vector, *msg, pdev);
495 }
496 } else {
497 vfio_add_kvm_msi_virq(vdev, vector, msg, true);
498 }
499
500 /*
501 * We don't want to have the host allocate all possible MSI vectors
502 * for a device if they're not in use, so we shutdown and incrementally
503 * increase them as needed.
504 */
505 if (vdev->nr_vectors < nr + 1) {
506 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
507 vdev->nr_vectors = nr + 1;
508 ret = vfio_enable_vectors(vdev, true);
509 if (ret) {
510 error_report("vfio: failed to enable vectors, %d", ret);
511 }
512 } else {
513 int argsz;
514 struct vfio_irq_set *irq_set;
515 int32_t *pfd;
516
517 argsz = sizeof(*irq_set) + sizeof(*pfd);
518
519 irq_set = g_malloc0(argsz);
520 irq_set->argsz = argsz;
521 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
522 VFIO_IRQ_SET_ACTION_TRIGGER;
523 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
524 irq_set->start = nr;
525 irq_set->count = 1;
526 pfd = (int32_t *)&irq_set->data;
527
528 if (vector->virq >= 0) {
529 *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
530 } else {
531 *pfd = event_notifier_get_fd(&vector->interrupt);
532 }
533
534 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
535 g_free(irq_set);
536 if (ret) {
537 error_report("vfio: failed to modify vector, %d", ret);
538 }
539 }
540
541 /* Disable PBA emulation when nothing more is pending. */
542 clear_bit(nr, vdev->msix->pending);
543 if (find_first_bit(vdev->msix->pending,
544 vdev->nr_vectors) == vdev->nr_vectors) {
545 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
546 trace_vfio_msix_pba_disable(vdev->vbasedev.name);
547 }
548
549 return 0;
550 }
551
552 static int vfio_msix_vector_use(PCIDevice *pdev,
553 unsigned int nr, MSIMessage msg)
554 {
555 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
556 }
557
558 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
559 {
560 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
561 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
562
563 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
564
565 /*
566 * There are still old guests that mask and unmask vectors on every
567 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
568 * the KVM setup in place, simply switch VFIO to use the non-bypass
569 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
570 * core will mask the interrupt and set pending bits, allowing it to
571 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
572 */
573 if (vector->virq >= 0) {
574 int argsz;
575 struct vfio_irq_set *irq_set;
576 int32_t *pfd;
577
578 argsz = sizeof(*irq_set) + sizeof(*pfd);
579
580 irq_set = g_malloc0(argsz);
581 irq_set->argsz = argsz;
582 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
583 VFIO_IRQ_SET_ACTION_TRIGGER;
584 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
585 irq_set->start = nr;
586 irq_set->count = 1;
587 pfd = (int32_t *)&irq_set->data;
588
589 *pfd = event_notifier_get_fd(&vector->interrupt);
590
591 ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
592
593 g_free(irq_set);
594 }
595 }
596
597 static void vfio_msix_enable(VFIOPCIDevice *vdev)
598 {
599 vfio_disable_interrupts(vdev);
600
601 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
602
603 vdev->interrupt = VFIO_INT_MSIX;
604
605 /*
606 * Some communication channels between VF & PF or PF & fw rely on the
607 * physical state of the device and expect that enabling MSI-X from the
608 * guest enables the same on the host. When our guest is Linux, the
609 * guest driver call to pci_enable_msix() sets the enabling bit in the
610 * MSI-X capability, but leaves the vector table masked. We therefore
611 * can't rely on a vector_use callback (from request_irq() in the guest)
612 * to switch the physical device into MSI-X mode because that may come a
613 * long time after pci_enable_msix(). This code enables vector 0 with
614 * triggering to userspace, then immediately release the vector, leaving
615 * the physical device with no vectors enabled, but MSI-X enabled, just
616 * like the guest view.
617 */
618 vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
619 vfio_msix_vector_release(&vdev->pdev, 0);
620
621 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
622 vfio_msix_vector_release, NULL)) {
623 error_report("vfio: msix_set_vector_notifiers failed");
624 }
625
626 trace_vfio_msix_enable(vdev->vbasedev.name);
627 }
628
629 static void vfio_msi_enable(VFIOPCIDevice *vdev)
630 {
631 int ret, i;
632
633 vfio_disable_interrupts(vdev);
634
635 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
636 retry:
637 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
638
639 for (i = 0; i < vdev->nr_vectors; i++) {
640 VFIOMSIVector *vector = &vdev->msi_vectors[i];
641 MSIMessage msg = msi_get_message(&vdev->pdev, i);
642
643 vector->vdev = vdev;
644 vector->virq = -1;
645 vector->use = true;
646
647 if (event_notifier_init(&vector->interrupt, 0)) {
648 error_report("vfio: Error: event_notifier_init failed");
649 }
650
651 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
652 vfio_msi_interrupt, NULL, vector);
653
654 /*
655 * Attempt to enable route through KVM irqchip,
656 * default to userspace handling if unavailable.
657 */
658 vfio_add_kvm_msi_virq(vdev, vector, &msg, false);
659 }
660
661 /* Set interrupt type prior to possible interrupts */
662 vdev->interrupt = VFIO_INT_MSI;
663
664 ret = vfio_enable_vectors(vdev, false);
665 if (ret) {
666 if (ret < 0) {
667 error_report("vfio: Error: Failed to setup MSI fds: %m");
668 } else if (ret != vdev->nr_vectors) {
669 error_report("vfio: Error: Failed to enable %d "
670 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
671 }
672
673 for (i = 0; i < vdev->nr_vectors; i++) {
674 VFIOMSIVector *vector = &vdev->msi_vectors[i];
675 if (vector->virq >= 0) {
676 vfio_remove_kvm_msi_virq(vector);
677 }
678 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
679 NULL, NULL, NULL);
680 event_notifier_cleanup(&vector->interrupt);
681 }
682
683 g_free(vdev->msi_vectors);
684
685 if (ret > 0 && ret != vdev->nr_vectors) {
686 vdev->nr_vectors = ret;
687 goto retry;
688 }
689 vdev->nr_vectors = 0;
690
691 /*
692 * Failing to setup MSI doesn't really fall within any specification.
693 * Let's try leaving interrupts disabled and hope the guest figures
694 * out to fall back to INTx for this device.
695 */
696 error_report("vfio: Error: Failed to enable MSI");
697 vdev->interrupt = VFIO_INT_NONE;
698
699 return;
700 }
701
702 trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
703 }
704
705 static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
706 {
707 int i;
708
709 for (i = 0; i < vdev->nr_vectors; i++) {
710 VFIOMSIVector *vector = &vdev->msi_vectors[i];
711 if (vdev->msi_vectors[i].use) {
712 if (vector->virq >= 0) {
713 vfio_remove_kvm_msi_virq(vector);
714 }
715 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
716 NULL, NULL, NULL);
717 event_notifier_cleanup(&vector->interrupt);
718 }
719 }
720
721 g_free(vdev->msi_vectors);
722 vdev->msi_vectors = NULL;
723 vdev->nr_vectors = 0;
724 vdev->interrupt = VFIO_INT_NONE;
725
726 vfio_intx_enable(vdev);
727 }
728
729 static void vfio_msix_disable(VFIOPCIDevice *vdev)
730 {
731 int i;
732
733 msix_unset_vector_notifiers(&vdev->pdev);
734
735 /*
736 * MSI-X will only release vectors if MSI-X is still enabled on the
737 * device, check through the rest and release it ourselves if necessary.
738 */
739 for (i = 0; i < vdev->nr_vectors; i++) {
740 if (vdev->msi_vectors[i].use) {
741 vfio_msix_vector_release(&vdev->pdev, i);
742 msix_vector_unuse(&vdev->pdev, i);
743 }
744 }
745
746 if (vdev->nr_vectors) {
747 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
748 }
749
750 vfio_msi_disable_common(vdev);
751
752 memset(vdev->msix->pending, 0,
753 BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
754
755 trace_vfio_msix_disable(vdev->vbasedev.name);
756 }
757
758 static void vfio_msi_disable(VFIOPCIDevice *vdev)
759 {
760 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
761 vfio_msi_disable_common(vdev);
762
763 trace_vfio_msi_disable(vdev->vbasedev.name);
764 }
765
766 static void vfio_update_msi(VFIOPCIDevice *vdev)
767 {
768 int i;
769
770 for (i = 0; i < vdev->nr_vectors; i++) {
771 VFIOMSIVector *vector = &vdev->msi_vectors[i];
772 MSIMessage msg;
773
774 if (!vector->use || vector->virq < 0) {
775 continue;
776 }
777
778 msg = msi_get_message(&vdev->pdev, i);
779 vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
780 }
781 }
782
783 static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
784 {
785 struct vfio_region_info *reg_info;
786 uint64_t size;
787 off_t off = 0;
788 ssize_t bytes;
789
790 if (vfio_get_region_info(&vdev->vbasedev,
791 VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
792 error_report("vfio: Error getting ROM info: %m");
793 return;
794 }
795
796 trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
797 (unsigned long)reg_info->offset,
798 (unsigned long)reg_info->flags);
799
800 vdev->rom_size = size = reg_info->size;
801 vdev->rom_offset = reg_info->offset;
802
803 g_free(reg_info);
804
805 if (!vdev->rom_size) {
806 vdev->rom_read_failed = true;
807 error_report("vfio-pci: Cannot read device rom at "
808 "%s", vdev->vbasedev.name);
809 error_printf("Device option ROM contents are probably invalid "
810 "(check dmesg).\nSkip option ROM probe with rombar=0, "
811 "or load from file with romfile=\n");
812 return;
813 }
814
815 vdev->rom = g_malloc(size);
816 memset(vdev->rom, 0xff, size);
817
818 while (size) {
819 bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
820 size, vdev->rom_offset + off);
821 if (bytes == 0) {
822 break;
823 } else if (bytes > 0) {
824 off += bytes;
825 size -= bytes;
826 } else {
827 if (errno == EINTR || errno == EAGAIN) {
828 continue;
829 }
830 error_report("vfio: Error reading device ROM: %m");
831 break;
832 }
833 }
834
835 /*
836 * Test the ROM signature against our device, if the vendor is correct
837 * but the device ID doesn't match, store the correct device ID and
838 * recompute the checksum. Intel IGD devices need this and are known
839 * to have bogus checksums so we can't simply adjust the checksum.
840 */
841 if (pci_get_word(vdev->rom) == 0xaa55 &&
842 pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
843 !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
844 uint16_t vid, did;
845
846 vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
847 did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
848
849 if (vid == vdev->vendor_id && did != vdev->device_id) {
850 int i;
851 uint8_t csum, *data = vdev->rom;
852
853 pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
854 vdev->device_id);
855 data[6] = 0;
856
857 for (csum = 0, i = 0; i < vdev->rom_size; i++) {
858 csum += data[i];
859 }
860
861 data[6] = -csum;
862 }
863 }
864 }
865
866 static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
867 {
868 VFIOPCIDevice *vdev = opaque;
869 union {
870 uint8_t byte;
871 uint16_t word;
872 uint32_t dword;
873 uint64_t qword;
874 } val;
875 uint64_t data = 0;
876
877 /* Load the ROM lazily when the guest tries to read it */
878 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
879 vfio_pci_load_rom(vdev);
880 }
881
882 memcpy(&val, vdev->rom + addr,
883 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
884
885 switch (size) {
886 case 1:
887 data = val.byte;
888 break;
889 case 2:
890 data = le16_to_cpu(val.word);
891 break;
892 case 4:
893 data = le32_to_cpu(val.dword);
894 break;
895 default:
896 hw_error("vfio: unsupported read size, %d bytes\n", size);
897 break;
898 }
899
900 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
901
902 return data;
903 }
904
905 static void vfio_rom_write(void *opaque, hwaddr addr,
906 uint64_t data, unsigned size)
907 {
908 }
909
910 static const MemoryRegionOps vfio_rom_ops = {
911 .read = vfio_rom_read,
912 .write = vfio_rom_write,
913 .endianness = DEVICE_LITTLE_ENDIAN,
914 };
915
916 static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
917 {
918 uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
919 off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
920 DeviceState *dev = DEVICE(vdev);
921 char *name;
922 int fd = vdev->vbasedev.fd;
923
924 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
925 /* Since pci handles romfile, just print a message and return */
926 if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
927 error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified romfile\n",
928 vdev->vbasedev.name);
929 }
930 return;
931 }
932
933 /*
934 * Use the same size ROM BAR as the physical device. The contents
935 * will get filled in later when the guest tries to read it.
936 */
937 if (pread(fd, &orig, 4, offset) != 4 ||
938 pwrite(fd, &size, 4, offset) != 4 ||
939 pread(fd, &size, 4, offset) != 4 ||
940 pwrite(fd, &orig, 4, offset) != 4) {
941 error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
942 return;
943 }
944
945 size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
946
947 if (!size) {
948 return;
949 }
950
951 if (vfio_blacklist_opt_rom(vdev)) {
952 if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
953 error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified non zero value for rombar\n",
954 vdev->vbasedev.name);
955 } else {
956 error_printf("Warning : Rom loading for device at %s has been disabled due to system instability issues. Specify rombar=1 or romfile to force\n",
957 vdev->vbasedev.name);
958 return;
959 }
960 }
961
962 trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
963
964 name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
965
966 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
967 &vfio_rom_ops, vdev, name, size);
968 g_free(name);
969
970 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
971 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
972
973 vdev->pdev.has_rom = true;
974 vdev->rom_read_failed = false;
975 }
976
977 void vfio_vga_write(void *opaque, hwaddr addr,
978 uint64_t data, unsigned size)
979 {
980 VFIOVGARegion *region = opaque;
981 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
982 union {
983 uint8_t byte;
984 uint16_t word;
985 uint32_t dword;
986 uint64_t qword;
987 } buf;
988 off_t offset = vga->fd_offset + region->offset + addr;
989
990 switch (size) {
991 case 1:
992 buf.byte = data;
993 break;
994 case 2:
995 buf.word = cpu_to_le16(data);
996 break;
997 case 4:
998 buf.dword = cpu_to_le32(data);
999 break;
1000 default:
1001 hw_error("vfio: unsupported write size, %d bytes", size);
1002 break;
1003 }
1004
1005 if (pwrite(vga->fd, &buf, size, offset) != size) {
1006 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1007 __func__, region->offset + addr, data, size);
1008 }
1009
1010 trace_vfio_vga_write(region->offset + addr, data, size);
1011 }
1012
1013 uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1014 {
1015 VFIOVGARegion *region = opaque;
1016 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1017 union {
1018 uint8_t byte;
1019 uint16_t word;
1020 uint32_t dword;
1021 uint64_t qword;
1022 } buf;
1023 uint64_t data = 0;
1024 off_t offset = vga->fd_offset + region->offset + addr;
1025
1026 if (pread(vga->fd, &buf, size, offset) != size) {
1027 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1028 __func__, region->offset + addr, size);
1029 return (uint64_t)-1;
1030 }
1031
1032 switch (size) {
1033 case 1:
1034 data = buf.byte;
1035 break;
1036 case 2:
1037 data = le16_to_cpu(buf.word);
1038 break;
1039 case 4:
1040 data = le32_to_cpu(buf.dword);
1041 break;
1042 default:
1043 hw_error("vfio: unsupported read size, %d bytes", size);
1044 break;
1045 }
1046
1047 trace_vfio_vga_read(region->offset + addr, size, data);
1048
1049 return data;
1050 }
1051
1052 static const MemoryRegionOps vfio_vga_ops = {
1053 .read = vfio_vga_read,
1054 .write = vfio_vga_write,
1055 .endianness = DEVICE_LITTLE_ENDIAN,
1056 };
1057
1058 /*
1059 * PCI config space
1060 */
1061 uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1062 {
1063 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1064 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1065
1066 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1067 emu_bits = le32_to_cpu(emu_bits);
1068
1069 if (emu_bits) {
1070 emu_val = pci_default_read_config(pdev, addr, len);
1071 }
1072
1073 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1074 ssize_t ret;
1075
1076 ret = pread(vdev->vbasedev.fd, &phys_val, len,
1077 vdev->config_offset + addr);
1078 if (ret != len) {
1079 error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1080 __func__, vdev->vbasedev.name, addr, len);
1081 return -errno;
1082 }
1083 phys_val = le32_to_cpu(phys_val);
1084 }
1085
1086 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1087
1088 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1089
1090 return val;
1091 }
1092
1093 void vfio_pci_write_config(PCIDevice *pdev,
1094 uint32_t addr, uint32_t val, int len)
1095 {
1096 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1097 uint32_t val_le = cpu_to_le32(val);
1098
1099 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1100
1101 /* Write everything to VFIO, let it filter out what we can't write */
1102 if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
1103 != len) {
1104 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1105 __func__, vdev->vbasedev.name, addr, val, len);
1106 }
1107
1108 /* MSI/MSI-X Enabling/Disabling */
1109 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1110 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1111 int is_enabled, was_enabled = msi_enabled(pdev);
1112
1113 pci_default_write_config(pdev, addr, val, len);
1114
1115 is_enabled = msi_enabled(pdev);
1116
1117 if (!was_enabled) {
1118 if (is_enabled) {
1119 vfio_msi_enable(vdev);
1120 }
1121 } else {
1122 if (!is_enabled) {
1123 vfio_msi_disable(vdev);
1124 } else {
1125 vfio_update_msi(vdev);
1126 }
1127 }
1128 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1129 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1130 int is_enabled, was_enabled = msix_enabled(pdev);
1131
1132 pci_default_write_config(pdev, addr, val, len);
1133
1134 is_enabled = msix_enabled(pdev);
1135
1136 if (!was_enabled && is_enabled) {
1137 vfio_msix_enable(vdev);
1138 } else if (was_enabled && !is_enabled) {
1139 vfio_msix_disable(vdev);
1140 }
1141 } else {
1142 /* Write everything to QEMU to keep emulated bits correct */
1143 pci_default_write_config(pdev, addr, val, len);
1144 }
1145 }
1146
1147 /*
1148 * Interrupt setup
1149 */
1150 static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1151 {
1152 /*
1153 * More complicated than it looks. Disabling MSI/X transitions the
1154 * device to INTx mode (if supported). Therefore we need to first
1155 * disable MSI/X and then cleanup by disabling INTx.
1156 */
1157 if (vdev->interrupt == VFIO_INT_MSIX) {
1158 vfio_msix_disable(vdev);
1159 } else if (vdev->interrupt == VFIO_INT_MSI) {
1160 vfio_msi_disable(vdev);
1161 }
1162
1163 if (vdev->interrupt == VFIO_INT_INTx) {
1164 vfio_intx_disable(vdev);
1165 }
1166 }
1167
1168 static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos)
1169 {
1170 uint16_t ctrl;
1171 bool msi_64bit, msi_maskbit;
1172 int ret, entries;
1173
1174 if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
1175 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
1176 return -errno;
1177 }
1178 ctrl = le16_to_cpu(ctrl);
1179
1180 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1181 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1182 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1183
1184 trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1185
1186 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit);
1187 if (ret < 0) {
1188 if (ret == -ENOTSUP) {
1189 return 0;
1190 }
1191 error_report("vfio: msi_init failed");
1192 return ret;
1193 }
1194 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1195
1196 return 0;
1197 }
1198
1199 static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
1200 {
1201 off_t start, end;
1202 VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
1203
1204 /*
1205 * We expect to find a single mmap covering the whole BAR, anything else
1206 * means it's either unsupported or already setup.
1207 */
1208 if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
1209 region->size != region->mmaps[0].size) {
1210 return;
1211 }
1212
1213 /* MSI-X table start and end aligned to host page size */
1214 start = vdev->msix->table_offset & qemu_real_host_page_mask;
1215 end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1216 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1217
1218 /*
1219 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1220 * NB - Host page size is necessarily a power of two and so is the PCI
1221 * BAR (not counting EA yet), therefore if we have host page aligned
1222 * @start and @end, then any remainder of the BAR before or after those
1223 * must be at least host page sized and therefore mmap'able.
1224 */
1225 if (!start) {
1226 if (end >= region->size) {
1227 region->nr_mmaps = 0;
1228 g_free(region->mmaps);
1229 region->mmaps = NULL;
1230 trace_vfio_msix_fixup(vdev->vbasedev.name,
1231 vdev->msix->table_bar, 0, 0);
1232 } else {
1233 region->mmaps[0].offset = end;
1234 region->mmaps[0].size = region->size - end;
1235 trace_vfio_msix_fixup(vdev->vbasedev.name,
1236 vdev->msix->table_bar, region->mmaps[0].offset,
1237 region->mmaps[0].offset + region->mmaps[0].size);
1238 }
1239
1240 /* Maybe it's aligned at the end of the BAR */
1241 } else if (end >= region->size) {
1242 region->mmaps[0].size = start;
1243 trace_vfio_msix_fixup(vdev->vbasedev.name,
1244 vdev->msix->table_bar, region->mmaps[0].offset,
1245 region->mmaps[0].offset + region->mmaps[0].size);
1246
1247 /* Otherwise it must split the BAR */
1248 } else {
1249 region->nr_mmaps = 2;
1250 region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
1251
1252 memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));
1253
1254 region->mmaps[0].size = start;
1255 trace_vfio_msix_fixup(vdev->vbasedev.name,
1256 vdev->msix->table_bar, region->mmaps[0].offset,
1257 region->mmaps[0].offset + region->mmaps[0].size);
1258
1259 region->mmaps[1].offset = end;
1260 region->mmaps[1].size = region->size - end;
1261 trace_vfio_msix_fixup(vdev->vbasedev.name,
1262 vdev->msix->table_bar, region->mmaps[1].offset,
1263 region->mmaps[1].offset + region->mmaps[1].size);
1264 }
1265 }
1266
1267 /*
1268 * We don't have any control over how pci_add_capability() inserts
1269 * capabilities into the chain. In order to setup MSI-X we need a
1270 * MemoryRegion for the BAR. In order to setup the BAR and not
1271 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1272 * need to first look for where the MSI-X table lives. So we
1273 * unfortunately split MSI-X setup across two functions.
1274 */
1275 static int vfio_msix_early_setup(VFIOPCIDevice *vdev)
1276 {
1277 uint8_t pos;
1278 uint16_t ctrl;
1279 uint32_t table, pba;
1280 int fd = vdev->vbasedev.fd;
1281 VFIOMSIXInfo *msix;
1282
1283 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1284 if (!pos) {
1285 return 0;
1286 }
1287
1288 if (pread(fd, &ctrl, sizeof(ctrl),
1289 vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
1290 return -errno;
1291 }
1292
1293 if (pread(fd, &table, sizeof(table),
1294 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
1295 return -errno;
1296 }
1297
1298 if (pread(fd, &pba, sizeof(pba),
1299 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
1300 return -errno;
1301 }
1302
1303 ctrl = le16_to_cpu(ctrl);
1304 table = le32_to_cpu(table);
1305 pba = le32_to_cpu(pba);
1306
1307 msix = g_malloc0(sizeof(*msix));
1308 msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1309 msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1310 msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1311 msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1312 msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1313
1314 /*
1315 * Test the size of the pba_offset variable and catch if it extends outside
1316 * of the specified BAR. If it is the case, we need to apply a hardware
1317 * specific quirk if the device is known or we have a broken configuration.
1318 */
1319 if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1320 /*
1321 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1322 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1323 * the VF PBA offset while the BAR itself is only 8k. The correct value
1324 * is 0x1000, so we hard code that here.
1325 */
1326 if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
1327 (vdev->device_id & 0xff00) == 0x5800) {
1328 msix->pba_offset = 0x1000;
1329 } else {
1330 error_report("vfio: Hardware reports invalid configuration, "
1331 "MSIX PBA outside of specified BAR");
1332 g_free(msix);
1333 return -EINVAL;
1334 }
1335 }
1336
1337 trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1338 msix->table_offset, msix->entries);
1339 vdev->msix = msix;
1340
1341 vfio_pci_fixup_msix_region(vdev);
1342
1343 return 0;
1344 }
1345
1346 static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos)
1347 {
1348 int ret;
1349
1350 vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
1351 sizeof(unsigned long));
1352 ret = msix_init(&vdev->pdev, vdev->msix->entries,
1353 vdev->bars[vdev->msix->table_bar].region.mem,
1354 vdev->msix->table_bar, vdev->msix->table_offset,
1355 vdev->bars[vdev->msix->pba_bar].region.mem,
1356 vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
1357 if (ret < 0) {
1358 if (ret == -ENOTSUP) {
1359 return 0;
1360 }
1361 error_report("vfio: msix_init failed");
1362 return ret;
1363 }
1364
1365 /*
1366 * The PCI spec suggests that devices provide additional alignment for
1367 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1368 * For an assigned device, this hopefully means that emulation of MSI-X
1369 * structures does not affect the performance of the device. If devices
1370 * fail to provide that alignment, a significant performance penalty may
1371 * result, for instance Mellanox MT27500 VFs:
1372 * http://www.spinics.net/lists/kvm/msg125881.html
1373 *
1374 * The PBA is simply not that important for such a serious regression and
1375 * most drivers do not appear to look at it. The solution for this is to
1376 * disable the PBA MemoryRegion unless it's being used. We disable it
1377 * here and only enable it if a masked vector fires through QEMU. As the
1378 * vector-use notifier is called, which occurs on unmask, we test whether
1379 * PBA emulation is needed and again disable if not.
1380 */
1381 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
1382
1383 return 0;
1384 }
1385
1386 static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1387 {
1388 msi_uninit(&vdev->pdev);
1389
1390 if (vdev->msix) {
1391 msix_uninit(&vdev->pdev,
1392 vdev->bars[vdev->msix->table_bar].region.mem,
1393 vdev->bars[vdev->msix->pba_bar].region.mem);
1394 g_free(vdev->msix->pending);
1395 }
1396 }
1397
1398 /*
1399 * Resource setup
1400 */
1401 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1402 {
1403 int i;
1404
1405 for (i = 0; i < PCI_ROM_SLOT; i++) {
1406 vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
1407 }
1408 }
1409
1410 static void vfio_bar_setup(VFIOPCIDevice *vdev, int nr)
1411 {
1412 VFIOBAR *bar = &vdev->bars[nr];
1413
1414 uint32_t pci_bar;
1415 uint8_t type;
1416 int ret;
1417
1418 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1419 if (!bar->region.size) {
1420 return;
1421 }
1422
1423 /* Determine what type of BAR this is for registration */
1424 ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
1425 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
1426 if (ret != sizeof(pci_bar)) {
1427 error_report("vfio: Failed to read BAR %d (%m)", nr);
1428 return;
1429 }
1430
1431 pci_bar = le32_to_cpu(pci_bar);
1432 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1433 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
1434 type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1435 ~PCI_BASE_ADDRESS_MEM_MASK);
1436
1437 if (vfio_region_mmap(&bar->region)) {
1438 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1439 vdev->vbasedev.name, nr);
1440 }
1441
1442 pci_register_bar(&vdev->pdev, nr, type, bar->region.mem);
1443 }
1444
1445 static void vfio_bars_setup(VFIOPCIDevice *vdev)
1446 {
1447 int i;
1448
1449 for (i = 0; i < PCI_ROM_SLOT; i++) {
1450 vfio_bar_setup(vdev, i);
1451 }
1452 }
1453
1454 static void vfio_bars_exit(VFIOPCIDevice *vdev)
1455 {
1456 int i;
1457
1458 for (i = 0; i < PCI_ROM_SLOT; i++) {
1459 vfio_bar_quirk_exit(vdev, i);
1460 vfio_region_exit(&vdev->bars[i].region);
1461 }
1462
1463 if (vdev->vga) {
1464 pci_unregister_vga(&vdev->pdev);
1465 vfio_vga_quirk_exit(vdev);
1466 }
1467 }
1468
1469 static void vfio_bars_finalize(VFIOPCIDevice *vdev)
1470 {
1471 int i;
1472
1473 for (i = 0; i < PCI_ROM_SLOT; i++) {
1474 vfio_bar_quirk_finalize(vdev, i);
1475 vfio_region_finalize(&vdev->bars[i].region);
1476 }
1477
1478 if (vdev->vga) {
1479 vfio_vga_quirk_finalize(vdev);
1480 for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1481 object_unparent(OBJECT(&vdev->vga->region[i].mem));
1482 }
1483 g_free(vdev->vga);
1484 }
1485 }
1486
1487 /*
1488 * General setup
1489 */
1490 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1491 {
1492 uint8_t tmp;
1493 uint16_t next = PCI_CONFIG_SPACE_SIZE;
1494
1495 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1496 tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
1497 if (tmp > pos && tmp < next) {
1498 next = tmp;
1499 }
1500 }
1501
1502 return next - pos;
1503 }
1504
1505
1506 static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
1507 {
1508 uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
1509
1510 for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
1511 tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
1512 if (tmp > pos && tmp < next) {
1513 next = tmp;
1514 }
1515 }
1516
1517 return next - pos;
1518 }
1519
1520 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1521 {
1522 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1523 }
1524
1525 static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1526 uint16_t val, uint16_t mask)
1527 {
1528 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1529 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1530 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1531 }
1532
1533 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1534 {
1535 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1536 }
1537
1538 static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1539 uint32_t val, uint32_t mask)
1540 {
1541 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1542 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1543 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1544 }
1545
1546 static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
1547 {
1548 uint16_t flags;
1549 uint8_t type;
1550
1551 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
1552 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
1553
1554 if (type != PCI_EXP_TYPE_ENDPOINT &&
1555 type != PCI_EXP_TYPE_LEG_END &&
1556 type != PCI_EXP_TYPE_RC_END) {
1557
1558 error_report("vfio: Assignment of PCIe type 0x%x "
1559 "devices is not currently supported", type);
1560 return -EINVAL;
1561 }
1562
1563 if (!pci_bus_is_express(vdev->pdev.bus)) {
1564 PCIBus *bus = vdev->pdev.bus;
1565 PCIDevice *bridge;
1566
1567 /*
1568 * Traditionally PCI device assignment exposes the PCIe capability
1569 * as-is on non-express buses. The reason being that some drivers
1570 * simply assume that it's there, for example tg3. However when
1571 * we're running on a native PCIe machine type, like Q35, we need
1572 * to hide the PCIe capability. The reason for this is twofold;
1573 * first Windows guests get a Code 10 error when the PCIe capability
1574 * is exposed in this configuration. Therefore express devices won't
1575 * work at all unless they're attached to express buses in the VM.
1576 * Second, a native PCIe machine introduces the possibility of fine
1577 * granularity IOMMUs supporting both translation and isolation.
1578 * Guest code to discover the IOMMU visibility of a device, such as
1579 * IOMMU grouping code on Linux, is very aware of device types and
1580 * valid transitions between bus types. An express device on a non-
1581 * express bus is not a valid combination on bare metal systems.
1582 *
1583 * Drivers that require a PCIe capability to make the device
1584 * functional are simply going to need to have their devices placed
1585 * on a PCIe bus in the VM.
1586 */
1587 while (!pci_bus_is_root(bus)) {
1588 bridge = pci_bridge_get_device(bus);
1589 bus = bridge->bus;
1590 }
1591
1592 if (pci_bus_is_express(bus)) {
1593 return 0;
1594 }
1595
1596 } else if (pci_bus_is_root(vdev->pdev.bus)) {
1597 /*
1598 * On a Root Complex bus Endpoints become Root Complex Integrated
1599 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1600 */
1601 if (type == PCI_EXP_TYPE_ENDPOINT) {
1602 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1603 PCI_EXP_TYPE_RC_END << 4,
1604 PCI_EXP_FLAGS_TYPE);
1605
1606 /* Link Capabilities, Status, and Control goes away */
1607 if (size > PCI_EXP_LNKCTL) {
1608 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
1609 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1610 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
1611
1612 #ifndef PCI_EXP_LNKCAP2
1613 #define PCI_EXP_LNKCAP2 44
1614 #endif
1615 #ifndef PCI_EXP_LNKSTA2
1616 #define PCI_EXP_LNKSTA2 50
1617 #endif
1618 /* Link 2 Capabilities, Status, and Control goes away */
1619 if (size > PCI_EXP_LNKCAP2) {
1620 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
1621 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
1622 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
1623 }
1624 }
1625
1626 } else if (type == PCI_EXP_TYPE_LEG_END) {
1627 /*
1628 * Legacy endpoints don't belong on the root complex. Windows
1629 * seems to be happier with devices if we skip the capability.
1630 */
1631 return 0;
1632 }
1633
1634 } else {
1635 /*
1636 * Convert Root Complex Integrated Endpoints to regular endpoints.
1637 * These devices don't support LNK/LNK2 capabilities, so make them up.
1638 */
1639 if (type == PCI_EXP_TYPE_RC_END) {
1640 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1641 PCI_EXP_TYPE_ENDPOINT << 4,
1642 PCI_EXP_FLAGS_TYPE);
1643 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
1644 PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
1645 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1646 }
1647
1648 /* Mark the Link Status bits as emulated to allow virtual negotiation */
1649 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
1650 pci_get_word(vdev->pdev.config + pos +
1651 PCI_EXP_LNKSTA),
1652 PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
1653 }
1654
1655 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
1656 if (pos >= 0) {
1657 vdev->pdev.exp.exp_cap = pos;
1658 }
1659
1660 return pos;
1661 }
1662
1663 static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
1664 {
1665 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
1666
1667 if (cap & PCI_EXP_DEVCAP_FLR) {
1668 trace_vfio_check_pcie_flr(vdev->vbasedev.name);
1669 vdev->has_flr = true;
1670 }
1671 }
1672
1673 static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
1674 {
1675 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
1676
1677 if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
1678 trace_vfio_check_pm_reset(vdev->vbasedev.name);
1679 vdev->has_pm_reset = true;
1680 }
1681 }
1682
1683 static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
1684 {
1685 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
1686
1687 if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
1688 trace_vfio_check_af_flr(vdev->vbasedev.name);
1689 vdev->has_flr = true;
1690 }
1691 }
1692
1693 static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
1694 {
1695 PCIDevice *pdev = &vdev->pdev;
1696 uint8_t cap_id, next, size;
1697 int ret;
1698
1699 cap_id = pdev->config[pos];
1700 next = pdev->config[pos + PCI_CAP_LIST_NEXT];
1701
1702 /*
1703 * If it becomes important to configure capabilities to their actual
1704 * size, use this as the default when it's something we don't recognize.
1705 * Since QEMU doesn't actually handle many of the config accesses,
1706 * exact size doesn't seem worthwhile.
1707 */
1708 size = vfio_std_cap_max_size(pdev, pos);
1709
1710 /*
1711 * pci_add_capability always inserts the new capability at the head
1712 * of the chain. Therefore to end up with a chain that matches the
1713 * physical device, we insert from the end by making this recursive.
1714 * This is also why we pre-calculate size above as cached config space
1715 * will be changed as we unwind the stack.
1716 */
1717 if (next) {
1718 ret = vfio_add_std_cap(vdev, next);
1719 if (ret) {
1720 return ret;
1721 }
1722 } else {
1723 /* Begin the rebuild, use QEMU emulated list bits */
1724 pdev->config[PCI_CAPABILITY_LIST] = 0;
1725 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
1726 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1727 }
1728
1729 /* Use emulated next pointer to allow dropping caps */
1730 pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
1731
1732 switch (cap_id) {
1733 case PCI_CAP_ID_MSI:
1734 ret = vfio_msi_setup(vdev, pos);
1735 break;
1736 case PCI_CAP_ID_EXP:
1737 vfio_check_pcie_flr(vdev, pos);
1738 ret = vfio_setup_pcie_cap(vdev, pos, size);
1739 break;
1740 case PCI_CAP_ID_MSIX:
1741 ret = vfio_msix_setup(vdev, pos);
1742 break;
1743 case PCI_CAP_ID_PM:
1744 vfio_check_pm_reset(vdev, pos);
1745 vdev->pm_cap = pos;
1746 ret = pci_add_capability(pdev, cap_id, pos, size);
1747 break;
1748 case PCI_CAP_ID_AF:
1749 vfio_check_af_flr(vdev, pos);
1750 ret = pci_add_capability(pdev, cap_id, pos, size);
1751 break;
1752 default:
1753 ret = pci_add_capability(pdev, cap_id, pos, size);
1754 break;
1755 }
1756
1757 if (ret < 0) {
1758 error_report("vfio: %s Error adding PCI capability "
1759 "0x%x[0x%x]@0x%x: %d", vdev->vbasedev.name,
1760 cap_id, size, pos, ret);
1761 return ret;
1762 }
1763
1764 return 0;
1765 }
1766
1767 static int vfio_add_ext_cap(VFIOPCIDevice *vdev)
1768 {
1769 PCIDevice *pdev = &vdev->pdev;
1770 uint32_t header;
1771 uint16_t cap_id, next, size;
1772 uint8_t cap_ver;
1773 uint8_t *config;
1774
1775 /* Only add extended caps if we have them and the guest can see them */
1776 if (!pci_is_express(pdev) || !pci_bus_is_express(pdev->bus) ||
1777 !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
1778 return 0;
1779 }
1780
1781 /*
1782 * pcie_add_capability always inserts the new capability at the tail
1783 * of the chain. Therefore to end up with a chain that matches the
1784 * physical device, we cache the config space to avoid overwriting
1785 * the original config space when we parse the extended capabilities.
1786 */
1787 config = g_memdup(pdev->config, vdev->config_size);
1788
1789 /*
1790 * Extended capabilities are chained with each pointing to the next, so we
1791 * can drop anything other than the head of the chain simply by modifying
1792 * the previous next pointer. For the head of the chain, we can modify the
1793 * capability ID to something that cannot match a valid capability. ID
1794 * 0 is reserved for this since absence of capabilities is indicated by
1795 * 0 for the ID, version, AND next pointer. However, pcie_add_capability()
1796 * uses ID 0 as reserved for list management and will incorrectly match and
1797 * assert if we attempt to pre-load the head of the chain with with this
1798 * ID. Use ID 0xFFFF temporarily since it is also seems to be reserved in
1799 * part for identifying absence of capabilities in a root complex register
1800 * block. If the ID still exists after adding capabilities, switch back to
1801 * zero. We'll mark this entire first dword as emulated for this purpose.
1802 */
1803 pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
1804 PCI_EXT_CAP(0xFFFF, 0, 0));
1805 pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
1806 pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
1807
1808 for (next = PCI_CONFIG_SPACE_SIZE; next;
1809 next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
1810 header = pci_get_long(config + next);
1811 cap_id = PCI_EXT_CAP_ID(header);
1812 cap_ver = PCI_EXT_CAP_VER(header);
1813
1814 /*
1815 * If it becomes important to configure extended capabilities to their
1816 * actual size, use this as the default when it's something we don't
1817 * recognize. Since QEMU doesn't actually handle many of the config
1818 * accesses, exact size doesn't seem worthwhile.
1819 */
1820 size = vfio_ext_cap_max_size(config, next);
1821
1822 /* Use emulated next pointer to allow dropping extended caps */
1823 pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
1824 PCI_EXT_CAP_NEXT_MASK);
1825
1826 switch (cap_id) {
1827 case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
1828 trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
1829 break;
1830 default:
1831 pcie_add_capability(pdev, cap_id, cap_ver, next, size);
1832 }
1833
1834 }
1835
1836 /* Cleanup chain head ID if necessary */
1837 if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
1838 pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
1839 }
1840
1841 g_free(config);
1842 return 0;
1843 }
1844
1845 static int vfio_add_capabilities(VFIOPCIDevice *vdev)
1846 {
1847 PCIDevice *pdev = &vdev->pdev;
1848 int ret;
1849
1850 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
1851 !pdev->config[PCI_CAPABILITY_LIST]) {
1852 return 0; /* Nothing to add */
1853 }
1854
1855 ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
1856 if (ret) {
1857 return ret;
1858 }
1859
1860 return vfio_add_ext_cap(vdev);
1861 }
1862
1863 static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
1864 {
1865 PCIDevice *pdev = &vdev->pdev;
1866 uint16_t cmd;
1867
1868 vfio_disable_interrupts(vdev);
1869
1870 /* Make sure the device is in D0 */
1871 if (vdev->pm_cap) {
1872 uint16_t pmcsr;
1873 uint8_t state;
1874
1875 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
1876 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1877 if (state) {
1878 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1879 vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
1880 /* vfio handles the necessary delay here */
1881 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
1882 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1883 if (state) {
1884 error_report("vfio: Unable to power on device, stuck in D%d",
1885 state);
1886 }
1887 }
1888 }
1889
1890 /*
1891 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
1892 * Also put INTx Disable in known state.
1893 */
1894 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
1895 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1896 PCI_COMMAND_INTX_DISABLE);
1897 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
1898 }
1899
1900 static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
1901 {
1902 vfio_intx_enable(vdev);
1903 }
1904
1905 static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
1906 {
1907 char tmp[13];
1908
1909 sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
1910 addr->bus, addr->slot, addr->function);
1911
1912 return (strcmp(tmp, name) == 0);
1913 }
1914
1915 static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
1916 {
1917 VFIOGroup *group;
1918 struct vfio_pci_hot_reset_info *info;
1919 struct vfio_pci_dependent_device *devices;
1920 struct vfio_pci_hot_reset *reset;
1921 int32_t *fds;
1922 int ret, i, count;
1923 bool multi = false;
1924
1925 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
1926
1927 vfio_pci_pre_reset(vdev);
1928 vdev->vbasedev.needs_reset = false;
1929
1930 info = g_malloc0(sizeof(*info));
1931 info->argsz = sizeof(*info);
1932
1933 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1934 if (ret && errno != ENOSPC) {
1935 ret = -errno;
1936 if (!vdev->has_pm_reset) {
1937 error_report("vfio: Cannot reset device %s, "
1938 "no available reset mechanism.", vdev->vbasedev.name);
1939 }
1940 goto out_single;
1941 }
1942
1943 count = info->count;
1944 info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
1945 info->argsz = sizeof(*info) + (count * sizeof(*devices));
1946 devices = &info->devices[0];
1947
1948 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1949 if (ret) {
1950 ret = -errno;
1951 error_report("vfio: hot reset info failed: %m");
1952 goto out_single;
1953 }
1954
1955 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
1956
1957 /* Verify that we have all the groups required */
1958 for (i = 0; i < info->count; i++) {
1959 PCIHostDeviceAddress host;
1960 VFIOPCIDevice *tmp;
1961 VFIODevice *vbasedev_iter;
1962
1963 host.domain = devices[i].segment;
1964 host.bus = devices[i].bus;
1965 host.slot = PCI_SLOT(devices[i].devfn);
1966 host.function = PCI_FUNC(devices[i].devfn);
1967
1968 trace_vfio_pci_hot_reset_dep_devices(host.domain,
1969 host.bus, host.slot, host.function, devices[i].group_id);
1970
1971 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
1972 continue;
1973 }
1974
1975 QLIST_FOREACH(group, &vfio_group_list, next) {
1976 if (group->groupid == devices[i].group_id) {
1977 break;
1978 }
1979 }
1980
1981 if (!group) {
1982 if (!vdev->has_pm_reset) {
1983 error_report("vfio: Cannot reset device %s, "
1984 "depends on group %d which is not owned.",
1985 vdev->vbasedev.name, devices[i].group_id);
1986 }
1987 ret = -EPERM;
1988 goto out;
1989 }
1990
1991 /* Prep dependent devices for reset and clear our marker. */
1992 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
1993 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
1994 continue;
1995 }
1996 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
1997 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
1998 if (single) {
1999 ret = -EINVAL;
2000 goto out_single;
2001 }
2002 vfio_pci_pre_reset(tmp);
2003 tmp->vbasedev.needs_reset = false;
2004 multi = true;
2005 break;
2006 }
2007 }
2008 }
2009
2010 if (!single && !multi) {
2011 ret = -EINVAL;
2012 goto out_single;
2013 }
2014
2015 /* Determine how many group fds need to be passed */
2016 count = 0;
2017 QLIST_FOREACH(group, &vfio_group_list, next) {
2018 for (i = 0; i < info->count; i++) {
2019 if (group->groupid == devices[i].group_id) {
2020 count++;
2021 break;
2022 }
2023 }
2024 }
2025
2026 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
2027 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
2028 fds = &reset->group_fds[0];
2029
2030 /* Fill in group fds */
2031 QLIST_FOREACH(group, &vfio_group_list, next) {
2032 for (i = 0; i < info->count; i++) {
2033 if (group->groupid == devices[i].group_id) {
2034 fds[reset->count++] = group->fd;
2035 break;
2036 }
2037 }
2038 }
2039
2040 /* Bus reset! */
2041 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
2042 g_free(reset);
2043
2044 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
2045 ret ? "%m" : "Success");
2046
2047 out:
2048 /* Re-enable INTx on affected devices */
2049 for (i = 0; i < info->count; i++) {
2050 PCIHostDeviceAddress host;
2051 VFIOPCIDevice *tmp;
2052 VFIODevice *vbasedev_iter;
2053
2054 host.domain = devices[i].segment;
2055 host.bus = devices[i].bus;
2056 host.slot = PCI_SLOT(devices[i].devfn);
2057 host.function = PCI_FUNC(devices[i].devfn);
2058
2059 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
2060 continue;
2061 }
2062
2063 QLIST_FOREACH(group, &vfio_group_list, next) {
2064 if (group->groupid == devices[i].group_id) {
2065 break;
2066 }
2067 }
2068
2069 if (!group) {
2070 break;
2071 }
2072
2073 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2074 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2075 continue;
2076 }
2077 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2078 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2079 vfio_pci_post_reset(tmp);
2080 break;
2081 }
2082 }
2083 }
2084 out_single:
2085 vfio_pci_post_reset(vdev);
2086 g_free(info);
2087
2088 return ret;
2089 }
2090
2091 /*
2092 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2093 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2094 * of doing hot resets when there is only a single device per bus. The in-use
2095 * here refers to how many VFIODevices are affected. A hot reset that affects
2096 * multiple devices, but only a single in-use device, means that we can call
2097 * it from our bus ->reset() callback since the extent is effectively a single
2098 * device. This allows us to make use of it in the hotplug path. When there
2099 * are multiple in-use devices, we can only trigger the hot reset during a
2100 * system reset and thus from our reset handler. We separate _one vs _multi
2101 * here so that we don't overlap and do a double reset on the system reset
2102 * path where both our reset handler and ->reset() callback are used. Calling
2103 * _one() will only do a hot reset for the one in-use devices case, calling
2104 * _multi() will do nothing if a _one() would have been sufficient.
2105 */
2106 static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
2107 {
2108 return vfio_pci_hot_reset(vdev, true);
2109 }
2110
2111 static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
2112 {
2113 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2114 return vfio_pci_hot_reset(vdev, false);
2115 }
2116
2117 static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
2118 {
2119 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2120 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
2121 vbasedev->needs_reset = true;
2122 }
2123 }
2124
2125 static VFIODeviceOps vfio_pci_ops = {
2126 .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
2127 .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
2128 .vfio_eoi = vfio_intx_eoi,
2129 };
2130
2131 int vfio_populate_vga(VFIOPCIDevice *vdev)
2132 {
2133 VFIODevice *vbasedev = &vdev->vbasedev;
2134 struct vfio_region_info *reg_info;
2135 int ret;
2136
2137 ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
2138 if (ret) {
2139 return ret;
2140 }
2141
2142 if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
2143 !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2144 reg_info->size < 0xbffff + 1) {
2145 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
2146 (unsigned long)reg_info->flags,
2147 (unsigned long)reg_info->size);
2148 g_free(reg_info);
2149 return -EINVAL;
2150 }
2151
2152 vdev->vga = g_new0(VFIOVGA, 1);
2153
2154 vdev->vga->fd_offset = reg_info->offset;
2155 vdev->vga->fd = vdev->vbasedev.fd;
2156
2157 g_free(reg_info);
2158
2159 vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2160 vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2161 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
2162
2163 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2164 OBJECT(vdev), &vfio_vga_ops,
2165 &vdev->vga->region[QEMU_PCI_VGA_MEM],
2166 "vfio-vga-mmio@0xa0000",
2167 QEMU_PCI_VGA_MEM_SIZE);
2168
2169 vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2170 vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2171 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
2172
2173 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2174 OBJECT(vdev), &vfio_vga_ops,
2175 &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
2176 "vfio-vga-io@0x3b0",
2177 QEMU_PCI_VGA_IO_LO_SIZE);
2178
2179 vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2180 vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2181 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
2182
2183 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
2184 OBJECT(vdev), &vfio_vga_ops,
2185 &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
2186 "vfio-vga-io@0x3c0",
2187 QEMU_PCI_VGA_IO_HI_SIZE);
2188
2189 pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2190 &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2191 &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
2192
2193 return 0;
2194 }
2195
2196 static int vfio_populate_device(VFIOPCIDevice *vdev)
2197 {
2198 VFIODevice *vbasedev = &vdev->vbasedev;
2199 struct vfio_region_info *reg_info;
2200 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
2201 int i, ret = -1;
2202
2203 /* Sanity check device */
2204 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2205 error_report("vfio: Um, this isn't a PCI device");
2206 goto error;
2207 }
2208
2209 if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2210 error_report("vfio: unexpected number of io regions %u",
2211 vbasedev->num_regions);
2212 goto error;
2213 }
2214
2215 if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2216 error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs);
2217 goto error;
2218 }
2219
2220 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2221 char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
2222
2223 ret = vfio_region_setup(OBJECT(vdev), vbasedev,
2224 &vdev->bars[i].region, i, name);
2225 g_free(name);
2226
2227 if (ret) {
2228 error_report("vfio: Error getting region %d info: %m", i);
2229 goto error;
2230 }
2231
2232 QLIST_INIT(&vdev->bars[i].quirks);
2233 }
2234
2235 ret = vfio_get_region_info(vbasedev,
2236 VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
2237 if (ret) {
2238 error_report("vfio: Error getting config info: %m");
2239 goto error;
2240 }
2241
2242 trace_vfio_populate_device_config(vdev->vbasedev.name,
2243 (unsigned long)reg_info->size,
2244 (unsigned long)reg_info->offset,
2245 (unsigned long)reg_info->flags);
2246
2247 vdev->config_size = reg_info->size;
2248 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2249 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2250 }
2251 vdev->config_offset = reg_info->offset;
2252
2253 g_free(reg_info);
2254
2255 if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2256 ret = vfio_populate_vga(vdev);
2257 if (ret) {
2258 error_report(
2259 "vfio: Device does not support requested feature x-vga");
2260 goto error;
2261 }
2262 }
2263
2264 irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
2265
2266 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
2267 if (ret) {
2268 /* This can fail for an old kernel or legacy PCI dev */
2269 trace_vfio_populate_device_get_irq_info_failure();
2270 ret = 0;
2271 } else if (irq_info.count == 1) {
2272 vdev->pci_aer = true;
2273 } else {
2274 error_report("vfio: %s "
2275 "Could not enable error recovery for the device",
2276 vbasedev->name);
2277 }
2278
2279 error:
2280 return ret;
2281 }
2282
2283 static void vfio_put_device(VFIOPCIDevice *vdev)
2284 {
2285 g_free(vdev->vbasedev.name);
2286 g_free(vdev->msix);
2287
2288 vfio_put_base_device(&vdev->vbasedev);
2289 }
2290
2291 static void vfio_err_notifier_handler(void *opaque)
2292 {
2293 VFIOPCIDevice *vdev = opaque;
2294
2295 if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2296 return;
2297 }
2298
2299 /*
2300 * TBD. Retrieve the error details and decide what action
2301 * needs to be taken. One of the actions could be to pass
2302 * the error to the guest and have the guest driver recover
2303 * from the error. This requires that PCIe capabilities be
2304 * exposed to the guest. For now, we just terminate the
2305 * guest to contain the error.
2306 */
2307
2308 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
2309
2310 vm_stop(RUN_STATE_INTERNAL_ERROR);
2311 }
2312
2313 /*
2314 * Registers error notifier for devices supporting error recovery.
2315 * If we encounter a failure in this function, we report an error
2316 * and continue after disabling error recovery support for the
2317 * device.
2318 */
2319 static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2320 {
2321 int ret;
2322 int argsz;
2323 struct vfio_irq_set *irq_set;
2324 int32_t *pfd;
2325
2326 if (!vdev->pci_aer) {
2327 return;
2328 }
2329
2330 if (event_notifier_init(&vdev->err_notifier, 0)) {
2331 error_report("vfio: Unable to init event notifier for error detection");
2332 vdev->pci_aer = false;
2333 return;
2334 }
2335
2336 argsz = sizeof(*irq_set) + sizeof(*pfd);
2337
2338 irq_set = g_malloc0(argsz);
2339 irq_set->argsz = argsz;
2340 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2341 VFIO_IRQ_SET_ACTION_TRIGGER;
2342 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2343 irq_set->start = 0;
2344 irq_set->count = 1;
2345 pfd = (int32_t *)&irq_set->data;
2346
2347 *pfd = event_notifier_get_fd(&vdev->err_notifier);
2348 qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);
2349
2350 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2351 if (ret) {
2352 error_report("vfio: Failed to set up error notification");
2353 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2354 event_notifier_cleanup(&vdev->err_notifier);
2355 vdev->pci_aer = false;
2356 }
2357 g_free(irq_set);
2358 }
2359
2360 static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2361 {
2362 int argsz;
2363 struct vfio_irq_set *irq_set;
2364 int32_t *pfd;
2365 int ret;
2366
2367 if (!vdev->pci_aer) {
2368 return;
2369 }
2370
2371 argsz = sizeof(*irq_set) + sizeof(*pfd);
2372
2373 irq_set = g_malloc0(argsz);
2374 irq_set->argsz = argsz;
2375 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2376 VFIO_IRQ_SET_ACTION_TRIGGER;
2377 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2378 irq_set->start = 0;
2379 irq_set->count = 1;
2380 pfd = (int32_t *)&irq_set->data;
2381 *pfd = -1;
2382
2383 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2384 if (ret) {
2385 error_report("vfio: Failed to de-assign error fd: %m");
2386 }
2387 g_free(irq_set);
2388 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2389 NULL, NULL, vdev);
2390 event_notifier_cleanup(&vdev->err_notifier);
2391 }
2392
2393 static void vfio_req_notifier_handler(void *opaque)
2394 {
2395 VFIOPCIDevice *vdev = opaque;
2396
2397 if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2398 return;
2399 }
2400
2401 qdev_unplug(&vdev->pdev.qdev, NULL);
2402 }
2403
2404 static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2405 {
2406 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
2407 .index = VFIO_PCI_REQ_IRQ_INDEX };
2408 int argsz;
2409 struct vfio_irq_set *irq_set;
2410 int32_t *pfd;
2411
2412 if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2413 return;
2414 }
2415
2416 if (ioctl(vdev->vbasedev.fd,
2417 VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
2418 return;
2419 }
2420
2421 if (event_notifier_init(&vdev->req_notifier, 0)) {
2422 error_report("vfio: Unable to init event notifier for device request");
2423 return;
2424 }
2425
2426 argsz = sizeof(*irq_set) + sizeof(*pfd);
2427
2428 irq_set = g_malloc0(argsz);
2429 irq_set->argsz = argsz;
2430 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2431 VFIO_IRQ_SET_ACTION_TRIGGER;
2432 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2433 irq_set->start = 0;
2434 irq_set->count = 1;
2435 pfd = (int32_t *)&irq_set->data;
2436
2437 *pfd = event_notifier_get_fd(&vdev->req_notifier);
2438 qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev);
2439
2440 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2441 error_report("vfio: Failed to set up device request notification");
2442 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2443 event_notifier_cleanup(&vdev->req_notifier);
2444 } else {
2445 vdev->req_enabled = true;
2446 }
2447
2448 g_free(irq_set);
2449 }
2450
2451 static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
2452 {
2453 int argsz;
2454 struct vfio_irq_set *irq_set;
2455 int32_t *pfd;
2456
2457 if (!vdev->req_enabled) {
2458 return;
2459 }
2460
2461 argsz = sizeof(*irq_set) + sizeof(*pfd);
2462
2463 irq_set = g_malloc0(argsz);
2464 irq_set->argsz = argsz;
2465 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2466 VFIO_IRQ_SET_ACTION_TRIGGER;
2467 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2468 irq_set->start = 0;
2469 irq_set->count = 1;
2470 pfd = (int32_t *)&irq_set->data;
2471 *pfd = -1;
2472
2473 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2474 error_report("vfio: Failed to de-assign device request fd: %m");
2475 }
2476 g_free(irq_set);
2477 qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
2478 NULL, NULL, vdev);
2479 event_notifier_cleanup(&vdev->req_notifier);
2480
2481 vdev->req_enabled = false;
2482 }
2483
2484 static int vfio_initfn(PCIDevice *pdev)
2485 {
2486 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2487 VFIODevice *vbasedev_iter;
2488 VFIOGroup *group;
2489 char *tmp, group_path[PATH_MAX], *group_name;
2490 ssize_t len;
2491 struct stat st;
2492 int groupid;
2493 int i, ret;
2494
2495 if (!vdev->vbasedev.sysfsdev) {
2496 vdev->vbasedev.sysfsdev =
2497 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2498 vdev->host.domain, vdev->host.bus,
2499 vdev->host.slot, vdev->host.function);
2500 }
2501
2502 if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
2503 error_report("vfio: error: no such host device: %s",
2504 vdev->vbasedev.sysfsdev);
2505 return -errno;
2506 }
2507
2508 vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
2509 vdev->vbasedev.ops = &vfio_pci_ops;
2510 vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
2511
2512 tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
2513 len = readlink(tmp, group_path, sizeof(group_path));
2514 g_free(tmp);
2515
2516 if (len <= 0 || len >= sizeof(group_path)) {
2517 error_report("vfio: error no iommu_group for device");
2518 return len < 0 ? -errno : -ENAMETOOLONG;
2519 }
2520
2521 group_path[len] = 0;
2522
2523 group_name = basename(group_path);
2524 if (sscanf(group_name, "%d", &groupid) != 1) {
2525 error_report("vfio: error reading %s: %m", group_path);
2526 return -errno;
2527 }
2528
2529 trace_vfio_initfn(vdev->vbasedev.name, groupid);
2530
2531 group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev));
2532 if (!group) {
2533 error_report("vfio: failed to get group %d", groupid);
2534 return -ENOENT;
2535 }
2536
2537 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2538 if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
2539 error_report("vfio: error: device %s is already attached",
2540 vdev->vbasedev.name);
2541 vfio_put_group(group);
2542 return -EBUSY;
2543 }
2544 }
2545
2546 ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev);
2547 if (ret) {
2548 error_report("vfio: failed to get device %s", vdev->vbasedev.name);
2549 vfio_put_group(group);
2550 return ret;
2551 }
2552
2553 ret = vfio_populate_device(vdev);
2554 if (ret) {
2555 return ret;
2556 }
2557
2558 /* Get a copy of config space */
2559 ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
2560 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
2561 vdev->config_offset);
2562 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
2563 ret = ret < 0 ? -errno : -EFAULT;
2564 error_report("vfio: Failed to read device config space");
2565 return ret;
2566 }
2567
2568 /* vfio emulates a lot for us, but some bits need extra love */
2569 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
2570
2571 /* QEMU can choose to expose the ROM or not */
2572 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
2573
2574 /*
2575 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
2576 * device ID is managed by the vendor and need only be a 16-bit value.
2577 * Allow any 16-bit value for subsystem so they can be hidden or changed.
2578 */
2579 if (vdev->vendor_id != PCI_ANY_ID) {
2580 if (vdev->vendor_id >= 0xffff) {
2581 error_report("vfio: Invalid PCI vendor ID provided");
2582 return -EINVAL;
2583 }
2584 vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
2585 trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
2586 } else {
2587 vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2588 }
2589
2590 if (vdev->device_id != PCI_ANY_ID) {
2591 if (vdev->device_id > 0xffff) {
2592 error_report("vfio: Invalid PCI device ID provided");
2593 return -EINVAL;
2594 }
2595 vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
2596 trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
2597 } else {
2598 vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2599 }
2600
2601 if (vdev->sub_vendor_id != PCI_ANY_ID) {
2602 if (vdev->sub_vendor_id > 0xffff) {
2603 error_report("vfio: Invalid PCI subsystem vendor ID provided");
2604 return -EINVAL;
2605 }
2606 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
2607 vdev->sub_vendor_id, ~0);
2608 trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
2609 vdev->sub_vendor_id);
2610 }
2611
2612 if (vdev->sub_device_id != PCI_ANY_ID) {
2613 if (vdev->sub_device_id > 0xffff) {
2614 error_report("vfio: Invalid PCI subsystem device ID provided");
2615 return -EINVAL;
2616 }
2617 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
2618 trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
2619 vdev->sub_device_id);
2620 }
2621
2622 /* QEMU can change multi-function devices to single function, or reverse */
2623 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
2624 PCI_HEADER_TYPE_MULTI_FUNCTION;
2625
2626 /* Restore or clear multifunction, this is always controlled by QEMU */
2627 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
2628 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
2629 } else {
2630 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
2631 }
2632
2633 /*
2634 * Clear host resource mapping info. If we choose not to register a
2635 * BAR, such as might be the case with the option ROM, we can get
2636 * confusing, unwritable, residual addresses from the host here.
2637 */
2638 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
2639 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
2640
2641 vfio_pci_size_rom(vdev);
2642
2643 ret = vfio_msix_early_setup(vdev);
2644 if (ret) {
2645 return ret;
2646 }
2647
2648 vfio_bars_setup(vdev);
2649
2650 ret = vfio_add_capabilities(vdev);
2651 if (ret) {
2652 goto out_teardown;
2653 }
2654
2655 if (vdev->vga) {
2656 vfio_vga_quirk_setup(vdev);
2657 }
2658
2659 for (i = 0; i < PCI_ROM_SLOT; i++) {
2660 vfio_bar_quirk_setup(vdev, i);
2661 }
2662
2663 if (!vdev->igd_opregion &&
2664 vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
2665 struct vfio_region_info *opregion;
2666
2667 if (vdev->pdev.qdev.hotplugged) {
2668 error_report("Cannot support IGD OpRegion feature on hotplugged "
2669 "device %s", vdev->vbasedev.name);
2670 ret = -EINVAL;
2671 goto out_teardown;
2672 }
2673
2674 ret = vfio_get_dev_region_info(&vdev->vbasedev,
2675 VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
2676 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
2677 if (ret) {
2678 error_report("Device %s does not support requested IGD OpRegion "
2679 "feature", vdev->vbasedev.name);
2680 goto out_teardown;
2681 }
2682
2683 ret = vfio_pci_igd_opregion_init(vdev, opregion);
2684 g_free(opregion);
2685 if (ret) {
2686 error_report("Device %s IGD OpRegion initialization failed",
2687 vdev->vbasedev.name);
2688 goto out_teardown;
2689 }
2690 }
2691
2692 /* QEMU emulates all of MSI & MSIX */
2693 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
2694 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
2695 MSIX_CAP_LENGTH);
2696 }
2697
2698 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
2699 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
2700 vdev->msi_cap_size);
2701 }
2702
2703 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
2704 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
2705 vfio_intx_mmap_enable, vdev);
2706 pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
2707 ret = vfio_intx_enable(vdev);
2708 if (ret) {
2709 goto out_teardown;
2710 }
2711 }
2712
2713 vfio_register_err_notifier(vdev);
2714 vfio_register_req_notifier(vdev);
2715 vfio_setup_resetfn_quirk(vdev);
2716
2717 return 0;
2718
2719 out_teardown:
2720 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2721 vfio_teardown_msi(vdev);
2722 vfio_bars_exit(vdev);
2723 return ret;
2724 }
2725
2726 static void vfio_instance_finalize(Object *obj)
2727 {
2728 PCIDevice *pci_dev = PCI_DEVICE(obj);
2729 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev);
2730 VFIOGroup *group = vdev->vbasedev.group;
2731
2732 vfio_bars_finalize(vdev);
2733 g_free(vdev->emulated_config_bits);
2734 g_free(vdev->rom);
2735 /*
2736 * XXX Leaking igd_opregion is not an oversight, we can't remove the
2737 * fw_cfg entry therefore leaking this allocation seems like the safest
2738 * option.
2739 *
2740 * g_free(vdev->igd_opregion);
2741 */
2742 vfio_put_device(vdev);
2743 vfio_put_group(group);
2744 }
2745
2746 static void vfio_exitfn(PCIDevice *pdev)
2747 {
2748 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2749
2750 vfio_unregister_req_notifier(vdev);
2751 vfio_unregister_err_notifier(vdev);
2752 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2753 vfio_disable_interrupts(vdev);
2754 if (vdev->intx.mmap_timer) {
2755 timer_free(vdev->intx.mmap_timer);
2756 }
2757 vfio_teardown_msi(vdev);
2758 vfio_bars_exit(vdev);
2759 }
2760
2761 static void vfio_pci_reset(DeviceState *dev)
2762 {
2763 PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
2764 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2765
2766 trace_vfio_pci_reset(vdev->vbasedev.name);
2767
2768 vfio_pci_pre_reset(vdev);
2769
2770 if (vdev->resetfn && !vdev->resetfn(vdev)) {
2771 goto post_reset;
2772 }
2773
2774 if (vdev->vbasedev.reset_works &&
2775 (vdev->has_flr || !vdev->has_pm_reset) &&
2776 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2777 trace_vfio_pci_reset_flr(vdev->vbasedev.name);
2778 goto post_reset;
2779 }
2780
2781 /* See if we can do our own bus reset */
2782 if (!vfio_pci_hot_reset_one(vdev)) {
2783 goto post_reset;
2784 }
2785
2786 /* If nothing else works and the device supports PM reset, use it */
2787 if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
2788 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2789 trace_vfio_pci_reset_pm(vdev->vbasedev.name);
2790 goto post_reset;
2791 }
2792
2793 post_reset:
2794 vfio_pci_post_reset(vdev);
2795 }
2796
2797 static void vfio_instance_init(Object *obj)
2798 {
2799 PCIDevice *pci_dev = PCI_DEVICE(obj);
2800 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
2801
2802 device_add_bootindex_property(obj, &vdev->bootindex,
2803 "bootindex", NULL,
2804 &pci_dev->qdev, NULL);
2805 }
2806
2807 static Property vfio_pci_dev_properties[] = {
2808 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
2809 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
2810 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
2811 intx.mmap_timeout, 1100),
2812 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
2813 VFIO_FEATURE_ENABLE_VGA_BIT, false),
2814 DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
2815 VFIO_FEATURE_ENABLE_REQ_BIT, true),
2816 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
2817 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
2818 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
2819 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
2820 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
2821 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
2822 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
2823 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
2824 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
2825 sub_vendor_id, PCI_ANY_ID),
2826 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
2827 sub_device_id, PCI_ANY_ID),
2828 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
2829 /*
2830 * TODO - support passed fds... is this necessary?
2831 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
2832 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
2833 */
2834 DEFINE_PROP_END_OF_LIST(),
2835 };
2836
2837 static const VMStateDescription vfio_pci_vmstate = {
2838 .name = "vfio-pci",
2839 .unmigratable = 1,
2840 };
2841
2842 static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
2843 {
2844 DeviceClass *dc = DEVICE_CLASS(klass);
2845 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
2846
2847 dc->reset = vfio_pci_reset;
2848 dc->props = vfio_pci_dev_properties;
2849 dc->vmsd = &vfio_pci_vmstate;
2850 dc->desc = "VFIO-based PCI device assignment";
2851 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2852 pdc->init = vfio_initfn;
2853 pdc->exit = vfio_exitfn;
2854 pdc->config_read = vfio_pci_read_config;
2855 pdc->config_write = vfio_pci_write_config;
2856 pdc->is_express = 1; /* We might be */
2857 }
2858
2859 static const TypeInfo vfio_pci_dev_info = {
2860 .name = "vfio-pci",
2861 .parent = TYPE_PCI_DEVICE,
2862 .instance_size = sizeof(VFIOPCIDevice),
2863 .class_init = vfio_pci_dev_class_init,
2864 .instance_init = vfio_instance_init,
2865 .instance_finalize = vfio_instance_finalize,
2866 };
2867
2868 static void register_vfio_pci_dev_type(void)
2869 {
2870 type_register_static(&vfio_pci_dev_info);
2871 }
2872
2873 type_init(register_vfio_pci_dev_type)