]> git.proxmox.com Git - mirror_qemu.git/blob - hw/vfio/pci.c
vfio/pci: Split quirks to a separate file
[mirror_qemu.git] / hw / vfio / pci.c
1 /*
2 * vfio based device assignment support
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
21 #include <linux/vfio.h>
22 #include <sys/ioctl.h>
23 #include <sys/mman.h>
24 #include <sys/stat.h>
25 #include <sys/types.h>
26 #include <unistd.h>
27
28 #include "config.h"
29 #include "hw/pci/msi.h"
30 #include "hw/pci/msix.h"
31 #include "qemu/error-report.h"
32 #include "qemu/range.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "pci.h"
36 #include "trace.h"
37
38 #define MSIX_CAP_LENGTH 12
39
40 static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
41 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
42
43 /*
44 * Disabling BAR mmaping can be slow, but toggling it around INTx can
45 * also be a huge overhead. We try to get the best of both worlds by
46 * waiting until an interrupt to disable mmaps (subsequent transitions
47 * to the same state are effectively no overhead). If the interrupt has
48 * been serviced and the time gap is long enough, we re-enable mmaps for
49 * performance. This works well for things like graphics cards, which
50 * may not use their interrupt at all and are penalized to an unusable
51 * level by read/write BAR traps. Other devices, like NICs, have more
52 * regular interrupts and see much better latency by staying in non-mmap
53 * mode. We therefore set the default mmap_timeout such that a ping
54 * is just enough to keep the mmap disabled. Users can experiment with
55 * other options with the x-intx-mmap-timeout-ms parameter (a value of
56 * zero disables the timer).
57 */
58 static void vfio_intx_mmap_enable(void *opaque)
59 {
60 VFIOPCIDevice *vdev = opaque;
61
62 if (vdev->intx.pending) {
63 timer_mod(vdev->intx.mmap_timer,
64 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
65 return;
66 }
67
68 vfio_mmap_set_enabled(vdev, true);
69 }
70
71 static void vfio_intx_interrupt(void *opaque)
72 {
73 VFIOPCIDevice *vdev = opaque;
74
75 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
76 return;
77 }
78
79 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
80
81 vdev->intx.pending = true;
82 pci_irq_assert(&vdev->pdev);
83 vfio_mmap_set_enabled(vdev, false);
84 if (vdev->intx.mmap_timeout) {
85 timer_mod(vdev->intx.mmap_timer,
86 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
87 }
88 }
89
90 static void vfio_intx_eoi(VFIODevice *vbasedev)
91 {
92 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
93
94 if (!vdev->intx.pending) {
95 return;
96 }
97
98 trace_vfio_intx_eoi(vbasedev->name);
99
100 vdev->intx.pending = false;
101 pci_irq_deassert(&vdev->pdev);
102 vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
103 }
104
105 static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev)
106 {
107 #ifdef CONFIG_KVM
108 struct kvm_irqfd irqfd = {
109 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
110 .gsi = vdev->intx.route.irq,
111 .flags = KVM_IRQFD_FLAG_RESAMPLE,
112 };
113 struct vfio_irq_set *irq_set;
114 int ret, argsz;
115 int32_t *pfd;
116
117 if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
118 vdev->intx.route.mode != PCI_INTX_ENABLED ||
119 !kvm_resamplefds_enabled()) {
120 return;
121 }
122
123 /* Get to a known interrupt state */
124 qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
125 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
126 vdev->intx.pending = false;
127 pci_irq_deassert(&vdev->pdev);
128
129 /* Get an eventfd for resample/unmask */
130 if (event_notifier_init(&vdev->intx.unmask, 0)) {
131 error_report("vfio: Error: event_notifier_init failed eoi");
132 goto fail;
133 }
134
135 /* KVM triggers it, VFIO listens for it */
136 irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
137
138 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
139 error_report("vfio: Error: Failed to setup resample irqfd: %m");
140 goto fail_irqfd;
141 }
142
143 argsz = sizeof(*irq_set) + sizeof(*pfd);
144
145 irq_set = g_malloc0(argsz);
146 irq_set->argsz = argsz;
147 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
148 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
149 irq_set->start = 0;
150 irq_set->count = 1;
151 pfd = (int32_t *)&irq_set->data;
152
153 *pfd = irqfd.resamplefd;
154
155 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
156 g_free(irq_set);
157 if (ret) {
158 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
159 goto fail_vfio;
160 }
161
162 /* Let'em rip */
163 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
164
165 vdev->intx.kvm_accel = true;
166
167 trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
168
169 return;
170
171 fail_vfio:
172 irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
173 kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
174 fail_irqfd:
175 event_notifier_cleanup(&vdev->intx.unmask);
176 fail:
177 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
178 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
179 #endif
180 }
181
182 static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
183 {
184 #ifdef CONFIG_KVM
185 struct kvm_irqfd irqfd = {
186 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
187 .gsi = vdev->intx.route.irq,
188 .flags = KVM_IRQFD_FLAG_DEASSIGN,
189 };
190
191 if (!vdev->intx.kvm_accel) {
192 return;
193 }
194
195 /*
196 * Get to a known state, hardware masked, QEMU ready to accept new
197 * interrupts, QEMU IRQ de-asserted.
198 */
199 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
200 vdev->intx.pending = false;
201 pci_irq_deassert(&vdev->pdev);
202
203 /* Tell KVM to stop listening for an INTx irqfd */
204 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
205 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
206 }
207
208 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
209 event_notifier_cleanup(&vdev->intx.unmask);
210
211 /* QEMU starts listening for interrupt events. */
212 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
213
214 vdev->intx.kvm_accel = false;
215
216 /* If we've missed an event, let it re-fire through QEMU */
217 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
218
219 trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
220 #endif
221 }
222
223 static void vfio_intx_update(PCIDevice *pdev)
224 {
225 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
226 PCIINTxRoute route;
227
228 if (vdev->interrupt != VFIO_INT_INTx) {
229 return;
230 }
231
232 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
233
234 if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
235 return; /* Nothing changed */
236 }
237
238 trace_vfio_intx_update(vdev->vbasedev.name,
239 vdev->intx.route.irq, route.irq);
240
241 vfio_intx_disable_kvm(vdev);
242
243 vdev->intx.route = route;
244
245 if (route.mode != PCI_INTX_ENABLED) {
246 return;
247 }
248
249 vfio_intx_enable_kvm(vdev);
250
251 /* Re-enable the interrupt in cased we missed an EOI */
252 vfio_intx_eoi(&vdev->vbasedev);
253 }
254
255 static int vfio_intx_enable(VFIOPCIDevice *vdev)
256 {
257 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
258 int ret, argsz;
259 struct vfio_irq_set *irq_set;
260 int32_t *pfd;
261
262 if (!pin) {
263 return 0;
264 }
265
266 vfio_disable_interrupts(vdev);
267
268 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
269 pci_config_set_interrupt_pin(vdev->pdev.config, pin);
270
271 #ifdef CONFIG_KVM
272 /*
273 * Only conditional to avoid generating error messages on platforms
274 * where we won't actually use the result anyway.
275 */
276 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
277 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
278 vdev->intx.pin);
279 }
280 #endif
281
282 ret = event_notifier_init(&vdev->intx.interrupt, 0);
283 if (ret) {
284 error_report("vfio: Error: event_notifier_init failed");
285 return ret;
286 }
287
288 argsz = sizeof(*irq_set) + sizeof(*pfd);
289
290 irq_set = g_malloc0(argsz);
291 irq_set->argsz = argsz;
292 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
293 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
294 irq_set->start = 0;
295 irq_set->count = 1;
296 pfd = (int32_t *)&irq_set->data;
297
298 *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
299 qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
300
301 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
302 g_free(irq_set);
303 if (ret) {
304 error_report("vfio: Error: Failed to setup INTx fd: %m");
305 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
306 event_notifier_cleanup(&vdev->intx.interrupt);
307 return -errno;
308 }
309
310 vfio_intx_enable_kvm(vdev);
311
312 vdev->interrupt = VFIO_INT_INTx;
313
314 trace_vfio_intx_enable(vdev->vbasedev.name);
315
316 return 0;
317 }
318
319 static void vfio_intx_disable(VFIOPCIDevice *vdev)
320 {
321 int fd;
322
323 timer_del(vdev->intx.mmap_timer);
324 vfio_intx_disable_kvm(vdev);
325 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
326 vdev->intx.pending = false;
327 pci_irq_deassert(&vdev->pdev);
328 vfio_mmap_set_enabled(vdev, true);
329
330 fd = event_notifier_get_fd(&vdev->intx.interrupt);
331 qemu_set_fd_handler(fd, NULL, NULL, vdev);
332 event_notifier_cleanup(&vdev->intx.interrupt);
333
334 vdev->interrupt = VFIO_INT_NONE;
335
336 trace_vfio_intx_disable(vdev->vbasedev.name);
337 }
338
339 /*
340 * MSI/X
341 */
342 static void vfio_msi_interrupt(void *opaque)
343 {
344 VFIOMSIVector *vector = opaque;
345 VFIOPCIDevice *vdev = vector->vdev;
346 MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
347 void (*notify)(PCIDevice *dev, unsigned vector);
348 MSIMessage msg;
349 int nr = vector - vdev->msi_vectors;
350
351 if (!event_notifier_test_and_clear(&vector->interrupt)) {
352 return;
353 }
354
355 if (vdev->interrupt == VFIO_INT_MSIX) {
356 get_msg = msix_get_message;
357 notify = msix_notify;
358 } else if (vdev->interrupt == VFIO_INT_MSI) {
359 get_msg = msi_get_message;
360 notify = msi_notify;
361 } else {
362 abort();
363 }
364
365 msg = get_msg(&vdev->pdev, nr);
366 trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
367 notify(&vdev->pdev, nr);
368 }
369
370 static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
371 {
372 struct vfio_irq_set *irq_set;
373 int ret = 0, i, argsz;
374 int32_t *fds;
375
376 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
377
378 irq_set = g_malloc0(argsz);
379 irq_set->argsz = argsz;
380 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
381 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
382 irq_set->start = 0;
383 irq_set->count = vdev->nr_vectors;
384 fds = (int32_t *)&irq_set->data;
385
386 for (i = 0; i < vdev->nr_vectors; i++) {
387 int fd = -1;
388
389 /*
390 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
391 * bits, therefore we always use the KVM signaling path when setup.
392 * MSI-X mask and pending bits are emulated, so we want to use the
393 * KVM signaling path only when configured and unmasked.
394 */
395 if (vdev->msi_vectors[i].use) {
396 if (vdev->msi_vectors[i].virq < 0 ||
397 (msix && msix_is_masked(&vdev->pdev, i))) {
398 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
399 } else {
400 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
401 }
402 }
403
404 fds[i] = fd;
405 }
406
407 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
408
409 g_free(irq_set);
410
411 return ret;
412 }
413
414 static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
415 MSIMessage *msg, bool msix)
416 {
417 int virq;
418
419 if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi) || !msg) {
420 return;
421 }
422
423 if (event_notifier_init(&vector->kvm_interrupt, 0)) {
424 return;
425 }
426
427 virq = kvm_irqchip_add_msi_route(kvm_state, *msg);
428 if (virq < 0) {
429 event_notifier_cleanup(&vector->kvm_interrupt);
430 return;
431 }
432
433 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
434 NULL, virq) < 0) {
435 kvm_irqchip_release_virq(kvm_state, virq);
436 event_notifier_cleanup(&vector->kvm_interrupt);
437 return;
438 }
439
440 vector->virq = virq;
441 }
442
443 static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
444 {
445 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
446 vector->virq);
447 kvm_irqchip_release_virq(kvm_state, vector->virq);
448 vector->virq = -1;
449 event_notifier_cleanup(&vector->kvm_interrupt);
450 }
451
452 static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg)
453 {
454 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg);
455 }
456
457 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
458 MSIMessage *msg, IOHandler *handler)
459 {
460 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
461 VFIOMSIVector *vector;
462 int ret;
463
464 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
465
466 vector = &vdev->msi_vectors[nr];
467
468 if (!vector->use) {
469 vector->vdev = vdev;
470 vector->virq = -1;
471 if (event_notifier_init(&vector->interrupt, 0)) {
472 error_report("vfio: Error: event_notifier_init failed");
473 }
474 vector->use = true;
475 msix_vector_use(pdev, nr);
476 }
477
478 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
479 handler, NULL, vector);
480
481 /*
482 * Attempt to enable route through KVM irqchip,
483 * default to userspace handling if unavailable.
484 */
485 if (vector->virq >= 0) {
486 if (!msg) {
487 vfio_remove_kvm_msi_virq(vector);
488 } else {
489 vfio_update_kvm_msi_virq(vector, *msg);
490 }
491 } else {
492 vfio_add_kvm_msi_virq(vdev, vector, msg, true);
493 }
494
495 /*
496 * We don't want to have the host allocate all possible MSI vectors
497 * for a device if they're not in use, so we shutdown and incrementally
498 * increase them as needed.
499 */
500 if (vdev->nr_vectors < nr + 1) {
501 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
502 vdev->nr_vectors = nr + 1;
503 ret = vfio_enable_vectors(vdev, true);
504 if (ret) {
505 error_report("vfio: failed to enable vectors, %d", ret);
506 }
507 } else {
508 int argsz;
509 struct vfio_irq_set *irq_set;
510 int32_t *pfd;
511
512 argsz = sizeof(*irq_set) + sizeof(*pfd);
513
514 irq_set = g_malloc0(argsz);
515 irq_set->argsz = argsz;
516 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
517 VFIO_IRQ_SET_ACTION_TRIGGER;
518 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
519 irq_set->start = nr;
520 irq_set->count = 1;
521 pfd = (int32_t *)&irq_set->data;
522
523 if (vector->virq >= 0) {
524 *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
525 } else {
526 *pfd = event_notifier_get_fd(&vector->interrupt);
527 }
528
529 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
530 g_free(irq_set);
531 if (ret) {
532 error_report("vfio: failed to modify vector, %d", ret);
533 }
534 }
535
536 return 0;
537 }
538
539 static int vfio_msix_vector_use(PCIDevice *pdev,
540 unsigned int nr, MSIMessage msg)
541 {
542 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
543 }
544
545 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
546 {
547 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
548 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
549
550 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
551
552 /*
553 * There are still old guests that mask and unmask vectors on every
554 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
555 * the KVM setup in place, simply switch VFIO to use the non-bypass
556 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
557 * core will mask the interrupt and set pending bits, allowing it to
558 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
559 */
560 if (vector->virq >= 0) {
561 int argsz;
562 struct vfio_irq_set *irq_set;
563 int32_t *pfd;
564
565 argsz = sizeof(*irq_set) + sizeof(*pfd);
566
567 irq_set = g_malloc0(argsz);
568 irq_set->argsz = argsz;
569 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
570 VFIO_IRQ_SET_ACTION_TRIGGER;
571 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
572 irq_set->start = nr;
573 irq_set->count = 1;
574 pfd = (int32_t *)&irq_set->data;
575
576 *pfd = event_notifier_get_fd(&vector->interrupt);
577
578 ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
579
580 g_free(irq_set);
581 }
582 }
583
584 static void vfio_msix_enable(VFIOPCIDevice *vdev)
585 {
586 vfio_disable_interrupts(vdev);
587
588 vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector));
589
590 vdev->interrupt = VFIO_INT_MSIX;
591
592 /*
593 * Some communication channels between VF & PF or PF & fw rely on the
594 * physical state of the device and expect that enabling MSI-X from the
595 * guest enables the same on the host. When our guest is Linux, the
596 * guest driver call to pci_enable_msix() sets the enabling bit in the
597 * MSI-X capability, but leaves the vector table masked. We therefore
598 * can't rely on a vector_use callback (from request_irq() in the guest)
599 * to switch the physical device into MSI-X mode because that may come a
600 * long time after pci_enable_msix(). This code enables vector 0 with
601 * triggering to userspace, then immediately release the vector, leaving
602 * the physical device with no vectors enabled, but MSI-X enabled, just
603 * like the guest view.
604 */
605 vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
606 vfio_msix_vector_release(&vdev->pdev, 0);
607
608 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
609 vfio_msix_vector_release, NULL)) {
610 error_report("vfio: msix_set_vector_notifiers failed");
611 }
612
613 trace_vfio_msix_enable(vdev->vbasedev.name);
614 }
615
616 static void vfio_msi_enable(VFIOPCIDevice *vdev)
617 {
618 int ret, i;
619
620 vfio_disable_interrupts(vdev);
621
622 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
623 retry:
624 vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
625
626 for (i = 0; i < vdev->nr_vectors; i++) {
627 VFIOMSIVector *vector = &vdev->msi_vectors[i];
628 MSIMessage msg = msi_get_message(&vdev->pdev, i);
629
630 vector->vdev = vdev;
631 vector->virq = -1;
632 vector->use = true;
633
634 if (event_notifier_init(&vector->interrupt, 0)) {
635 error_report("vfio: Error: event_notifier_init failed");
636 }
637
638 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
639 vfio_msi_interrupt, NULL, vector);
640
641 /*
642 * Attempt to enable route through KVM irqchip,
643 * default to userspace handling if unavailable.
644 */
645 vfio_add_kvm_msi_virq(vdev, vector, &msg, false);
646 }
647
648 /* Set interrupt type prior to possible interrupts */
649 vdev->interrupt = VFIO_INT_MSI;
650
651 ret = vfio_enable_vectors(vdev, false);
652 if (ret) {
653 if (ret < 0) {
654 error_report("vfio: Error: Failed to setup MSI fds: %m");
655 } else if (ret != vdev->nr_vectors) {
656 error_report("vfio: Error: Failed to enable %d "
657 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
658 }
659
660 for (i = 0; i < vdev->nr_vectors; i++) {
661 VFIOMSIVector *vector = &vdev->msi_vectors[i];
662 if (vector->virq >= 0) {
663 vfio_remove_kvm_msi_virq(vector);
664 }
665 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
666 NULL, NULL, NULL);
667 event_notifier_cleanup(&vector->interrupt);
668 }
669
670 g_free(vdev->msi_vectors);
671
672 if (ret > 0 && ret != vdev->nr_vectors) {
673 vdev->nr_vectors = ret;
674 goto retry;
675 }
676 vdev->nr_vectors = 0;
677
678 /*
679 * Failing to setup MSI doesn't really fall within any specification.
680 * Let's try leaving interrupts disabled and hope the guest figures
681 * out to fall back to INTx for this device.
682 */
683 error_report("vfio: Error: Failed to enable MSI");
684 vdev->interrupt = VFIO_INT_NONE;
685
686 return;
687 }
688
689 trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
690 }
691
692 static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
693 {
694 int i;
695
696 for (i = 0; i < vdev->nr_vectors; i++) {
697 VFIOMSIVector *vector = &vdev->msi_vectors[i];
698 if (vdev->msi_vectors[i].use) {
699 if (vector->virq >= 0) {
700 vfio_remove_kvm_msi_virq(vector);
701 }
702 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
703 NULL, NULL, NULL);
704 event_notifier_cleanup(&vector->interrupt);
705 }
706 }
707
708 g_free(vdev->msi_vectors);
709 vdev->msi_vectors = NULL;
710 vdev->nr_vectors = 0;
711 vdev->interrupt = VFIO_INT_NONE;
712
713 vfio_intx_enable(vdev);
714 }
715
716 static void vfio_msix_disable(VFIOPCIDevice *vdev)
717 {
718 int i;
719
720 msix_unset_vector_notifiers(&vdev->pdev);
721
722 /*
723 * MSI-X will only release vectors if MSI-X is still enabled on the
724 * device, check through the rest and release it ourselves if necessary.
725 */
726 for (i = 0; i < vdev->nr_vectors; i++) {
727 if (vdev->msi_vectors[i].use) {
728 vfio_msix_vector_release(&vdev->pdev, i);
729 msix_vector_unuse(&vdev->pdev, i);
730 }
731 }
732
733 if (vdev->nr_vectors) {
734 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
735 }
736
737 vfio_msi_disable_common(vdev);
738
739 trace_vfio_msix_disable(vdev->vbasedev.name);
740 }
741
742 static void vfio_msi_disable(VFIOPCIDevice *vdev)
743 {
744 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
745 vfio_msi_disable_common(vdev);
746
747 trace_vfio_msi_disable(vdev->vbasedev.name);
748 }
749
750 static void vfio_update_msi(VFIOPCIDevice *vdev)
751 {
752 int i;
753
754 for (i = 0; i < vdev->nr_vectors; i++) {
755 VFIOMSIVector *vector = &vdev->msi_vectors[i];
756 MSIMessage msg;
757
758 if (!vector->use || vector->virq < 0) {
759 continue;
760 }
761
762 msg = msi_get_message(&vdev->pdev, i);
763 vfio_update_kvm_msi_virq(vector, msg);
764 }
765 }
766
767 static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
768 {
769 struct vfio_region_info reg_info = {
770 .argsz = sizeof(reg_info),
771 .index = VFIO_PCI_ROM_REGION_INDEX
772 };
773 uint64_t size;
774 off_t off = 0;
775 ssize_t bytes;
776
777 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
778 error_report("vfio: Error getting ROM info: %m");
779 return;
780 }
781
782 trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info.size,
783 (unsigned long)reg_info.offset,
784 (unsigned long)reg_info.flags);
785
786 vdev->rom_size = size = reg_info.size;
787 vdev->rom_offset = reg_info.offset;
788
789 if (!vdev->rom_size) {
790 vdev->rom_read_failed = true;
791 error_report("vfio-pci: Cannot read device rom at "
792 "%s", vdev->vbasedev.name);
793 error_printf("Device option ROM contents are probably invalid "
794 "(check dmesg).\nSkip option ROM probe with rombar=0, "
795 "or load from file with romfile=\n");
796 return;
797 }
798
799 vdev->rom = g_malloc(size);
800 memset(vdev->rom, 0xff, size);
801
802 while (size) {
803 bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
804 size, vdev->rom_offset + off);
805 if (bytes == 0) {
806 break;
807 } else if (bytes > 0) {
808 off += bytes;
809 size -= bytes;
810 } else {
811 if (errno == EINTR || errno == EAGAIN) {
812 continue;
813 }
814 error_report("vfio: Error reading device ROM: %m");
815 break;
816 }
817 }
818 }
819
820 static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
821 {
822 VFIOPCIDevice *vdev = opaque;
823 union {
824 uint8_t byte;
825 uint16_t word;
826 uint32_t dword;
827 uint64_t qword;
828 } val;
829 uint64_t data = 0;
830
831 /* Load the ROM lazily when the guest tries to read it */
832 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
833 vfio_pci_load_rom(vdev);
834 }
835
836 memcpy(&val, vdev->rom + addr,
837 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
838
839 switch (size) {
840 case 1:
841 data = val.byte;
842 break;
843 case 2:
844 data = le16_to_cpu(val.word);
845 break;
846 case 4:
847 data = le32_to_cpu(val.dword);
848 break;
849 default:
850 hw_error("vfio: unsupported read size, %d bytes\n", size);
851 break;
852 }
853
854 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
855
856 return data;
857 }
858
859 static void vfio_rom_write(void *opaque, hwaddr addr,
860 uint64_t data, unsigned size)
861 {
862 }
863
864 static const MemoryRegionOps vfio_rom_ops = {
865 .read = vfio_rom_read,
866 .write = vfio_rom_write,
867 .endianness = DEVICE_LITTLE_ENDIAN,
868 };
869
870 static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
871 {
872 uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
873 off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
874 DeviceState *dev = DEVICE(vdev);
875 char name[32];
876 int fd = vdev->vbasedev.fd;
877
878 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
879 /* Since pci handles romfile, just print a message and return */
880 if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
881 error_printf("Warning : Device at %04x:%02x:%02x.%x "
882 "is known to cause system instability issues during "
883 "option rom execution. "
884 "Proceeding anyway since user specified romfile\n",
885 vdev->host.domain, vdev->host.bus, vdev->host.slot,
886 vdev->host.function);
887 }
888 return;
889 }
890
891 /*
892 * Use the same size ROM BAR as the physical device. The contents
893 * will get filled in later when the guest tries to read it.
894 */
895 if (pread(fd, &orig, 4, offset) != 4 ||
896 pwrite(fd, &size, 4, offset) != 4 ||
897 pread(fd, &size, 4, offset) != 4 ||
898 pwrite(fd, &orig, 4, offset) != 4) {
899 error_report("%s(%04x:%02x:%02x.%x) failed: %m",
900 __func__, vdev->host.domain, vdev->host.bus,
901 vdev->host.slot, vdev->host.function);
902 return;
903 }
904
905 size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
906
907 if (!size) {
908 return;
909 }
910
911 if (vfio_blacklist_opt_rom(vdev)) {
912 if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
913 error_printf("Warning : Device at %04x:%02x:%02x.%x "
914 "is known to cause system instability issues during "
915 "option rom execution. "
916 "Proceeding anyway since user specified non zero value for "
917 "rombar\n",
918 vdev->host.domain, vdev->host.bus, vdev->host.slot,
919 vdev->host.function);
920 } else {
921 error_printf("Warning : Rom loading for device at "
922 "%04x:%02x:%02x.%x has been disabled due to "
923 "system instability issues. "
924 "Specify rombar=1 or romfile to force\n",
925 vdev->host.domain, vdev->host.bus, vdev->host.slot,
926 vdev->host.function);
927 return;
928 }
929 }
930
931 trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
932
933 snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom",
934 vdev->host.domain, vdev->host.bus, vdev->host.slot,
935 vdev->host.function);
936
937 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
938 &vfio_rom_ops, vdev, name, size);
939
940 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
941 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
942
943 vdev->pdev.has_rom = true;
944 vdev->rom_read_failed = false;
945 }
946
947 void vfio_vga_write(void *opaque, hwaddr addr,
948 uint64_t data, unsigned size)
949 {
950 VFIOVGARegion *region = opaque;
951 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
952 union {
953 uint8_t byte;
954 uint16_t word;
955 uint32_t dword;
956 uint64_t qword;
957 } buf;
958 off_t offset = vga->fd_offset + region->offset + addr;
959
960 switch (size) {
961 case 1:
962 buf.byte = data;
963 break;
964 case 2:
965 buf.word = cpu_to_le16(data);
966 break;
967 case 4:
968 buf.dword = cpu_to_le32(data);
969 break;
970 default:
971 hw_error("vfio: unsupported write size, %d bytes", size);
972 break;
973 }
974
975 if (pwrite(vga->fd, &buf, size, offset) != size) {
976 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
977 __func__, region->offset + addr, data, size);
978 }
979
980 trace_vfio_vga_write(region->offset + addr, data, size);
981 }
982
983 uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
984 {
985 VFIOVGARegion *region = opaque;
986 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
987 union {
988 uint8_t byte;
989 uint16_t word;
990 uint32_t dword;
991 uint64_t qword;
992 } buf;
993 uint64_t data = 0;
994 off_t offset = vga->fd_offset + region->offset + addr;
995
996 if (pread(vga->fd, &buf, size, offset) != size) {
997 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
998 __func__, region->offset + addr, size);
999 return (uint64_t)-1;
1000 }
1001
1002 switch (size) {
1003 case 1:
1004 data = buf.byte;
1005 break;
1006 case 2:
1007 data = le16_to_cpu(buf.word);
1008 break;
1009 case 4:
1010 data = le32_to_cpu(buf.dword);
1011 break;
1012 default:
1013 hw_error("vfio: unsupported read size, %d bytes", size);
1014 break;
1015 }
1016
1017 trace_vfio_vga_read(region->offset + addr, size, data);
1018
1019 return data;
1020 }
1021
1022 static const MemoryRegionOps vfio_vga_ops = {
1023 .read = vfio_vga_read,
1024 .write = vfio_vga_write,
1025 .endianness = DEVICE_LITTLE_ENDIAN,
1026 };
1027
1028 /*
1029 * PCI config space
1030 */
1031 uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1032 {
1033 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1034 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1035
1036 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1037 emu_bits = le32_to_cpu(emu_bits);
1038
1039 if (emu_bits) {
1040 emu_val = pci_default_read_config(pdev, addr, len);
1041 }
1042
1043 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1044 ssize_t ret;
1045
1046 ret = pread(vdev->vbasedev.fd, &phys_val, len,
1047 vdev->config_offset + addr);
1048 if (ret != len) {
1049 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
1050 __func__, vdev->host.domain, vdev->host.bus,
1051 vdev->host.slot, vdev->host.function, addr, len);
1052 return -errno;
1053 }
1054 phys_val = le32_to_cpu(phys_val);
1055 }
1056
1057 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1058
1059 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1060
1061 return val;
1062 }
1063
1064 void vfio_pci_write_config(PCIDevice *pdev,
1065 uint32_t addr, uint32_t val, int len)
1066 {
1067 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1068 uint32_t val_le = cpu_to_le32(val);
1069
1070 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1071
1072 /* Write everything to VFIO, let it filter out what we can't write */
1073 if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
1074 != len) {
1075 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
1076 __func__, vdev->host.domain, vdev->host.bus,
1077 vdev->host.slot, vdev->host.function, addr, val, len);
1078 }
1079
1080 /* MSI/MSI-X Enabling/Disabling */
1081 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1082 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1083 int is_enabled, was_enabled = msi_enabled(pdev);
1084
1085 pci_default_write_config(pdev, addr, val, len);
1086
1087 is_enabled = msi_enabled(pdev);
1088
1089 if (!was_enabled) {
1090 if (is_enabled) {
1091 vfio_msi_enable(vdev);
1092 }
1093 } else {
1094 if (!is_enabled) {
1095 vfio_msi_disable(vdev);
1096 } else {
1097 vfio_update_msi(vdev);
1098 }
1099 }
1100 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1101 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1102 int is_enabled, was_enabled = msix_enabled(pdev);
1103
1104 pci_default_write_config(pdev, addr, val, len);
1105
1106 is_enabled = msix_enabled(pdev);
1107
1108 if (!was_enabled && is_enabled) {
1109 vfio_msix_enable(vdev);
1110 } else if (was_enabled && !is_enabled) {
1111 vfio_msix_disable(vdev);
1112 }
1113 } else {
1114 /* Write everything to QEMU to keep emulated bits correct */
1115 pci_default_write_config(pdev, addr, val, len);
1116 }
1117 }
1118
1119 /*
1120 * Interrupt setup
1121 */
1122 static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1123 {
1124 /*
1125 * More complicated than it looks. Disabling MSI/X transitions the
1126 * device to INTx mode (if supported). Therefore we need to first
1127 * disable MSI/X and then cleanup by disabling INTx.
1128 */
1129 if (vdev->interrupt == VFIO_INT_MSIX) {
1130 vfio_msix_disable(vdev);
1131 } else if (vdev->interrupt == VFIO_INT_MSI) {
1132 vfio_msi_disable(vdev);
1133 }
1134
1135 if (vdev->interrupt == VFIO_INT_INTx) {
1136 vfio_intx_disable(vdev);
1137 }
1138 }
1139
1140 static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos)
1141 {
1142 uint16_t ctrl;
1143 bool msi_64bit, msi_maskbit;
1144 int ret, entries;
1145
1146 if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
1147 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
1148 return -errno;
1149 }
1150 ctrl = le16_to_cpu(ctrl);
1151
1152 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1153 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1154 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1155
1156 trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1157
1158 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit);
1159 if (ret < 0) {
1160 if (ret == -ENOTSUP) {
1161 return 0;
1162 }
1163 error_report("vfio: msi_init failed");
1164 return ret;
1165 }
1166 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1167
1168 return 0;
1169 }
1170
1171 /*
1172 * We don't have any control over how pci_add_capability() inserts
1173 * capabilities into the chain. In order to setup MSI-X we need a
1174 * MemoryRegion for the BAR. In order to setup the BAR and not
1175 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1176 * need to first look for where the MSI-X table lives. So we
1177 * unfortunately split MSI-X setup across two functions.
1178 */
1179 static int vfio_msix_early_setup(VFIOPCIDevice *vdev)
1180 {
1181 uint8_t pos;
1182 uint16_t ctrl;
1183 uint32_t table, pba;
1184 int fd = vdev->vbasedev.fd;
1185 VFIOMSIXInfo *msix;
1186
1187 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1188 if (!pos) {
1189 return 0;
1190 }
1191
1192 if (pread(fd, &ctrl, sizeof(ctrl),
1193 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
1194 return -errno;
1195 }
1196
1197 if (pread(fd, &table, sizeof(table),
1198 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
1199 return -errno;
1200 }
1201
1202 if (pread(fd, &pba, sizeof(pba),
1203 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
1204 return -errno;
1205 }
1206
1207 ctrl = le16_to_cpu(ctrl);
1208 table = le32_to_cpu(table);
1209 pba = le32_to_cpu(pba);
1210
1211 msix = g_malloc0(sizeof(*msix));
1212 msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1213 msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1214 msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1215 msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1216 msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1217
1218 /*
1219 * Test the size of the pba_offset variable and catch if it extends outside
1220 * of the specified BAR. If it is the case, we need to apply a hardware
1221 * specific quirk if the device is known or we have a broken configuration.
1222 */
1223 if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1224 PCIDevice *pdev = &vdev->pdev;
1225 uint16_t vendor = pci_get_word(pdev->config + PCI_VENDOR_ID);
1226 uint16_t device = pci_get_word(pdev->config + PCI_DEVICE_ID);
1227
1228 /*
1229 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1230 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1231 * the VF PBA offset while the BAR itself is only 8k. The correct value
1232 * is 0x1000, so we hard code that here.
1233 */
1234 if (vendor == PCI_VENDOR_ID_CHELSIO && (device & 0xff00) == 0x5800) {
1235 msix->pba_offset = 0x1000;
1236 } else {
1237 error_report("vfio: Hardware reports invalid configuration, "
1238 "MSIX PBA outside of specified BAR");
1239 g_free(msix);
1240 return -EINVAL;
1241 }
1242 }
1243
1244 trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1245 msix->table_offset, msix->entries);
1246 vdev->msix = msix;
1247
1248 return 0;
1249 }
1250
1251 static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos)
1252 {
1253 int ret;
1254
1255 ret = msix_init(&vdev->pdev, vdev->msix->entries,
1256 &vdev->bars[vdev->msix->table_bar].region.mem,
1257 vdev->msix->table_bar, vdev->msix->table_offset,
1258 &vdev->bars[vdev->msix->pba_bar].region.mem,
1259 vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
1260 if (ret < 0) {
1261 if (ret == -ENOTSUP) {
1262 return 0;
1263 }
1264 error_report("vfio: msix_init failed");
1265 return ret;
1266 }
1267
1268 return 0;
1269 }
1270
1271 static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1272 {
1273 msi_uninit(&vdev->pdev);
1274
1275 if (vdev->msix) {
1276 msix_uninit(&vdev->pdev,
1277 &vdev->bars[vdev->msix->table_bar].region.mem,
1278 &vdev->bars[vdev->msix->pba_bar].region.mem);
1279 }
1280 }
1281
1282 /*
1283 * Resource setup
1284 */
1285 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1286 {
1287 int i;
1288
1289 for (i = 0; i < PCI_ROM_SLOT; i++) {
1290 VFIOBAR *bar = &vdev->bars[i];
1291
1292 if (!bar->region.size) {
1293 continue;
1294 }
1295
1296 memory_region_set_enabled(&bar->region.mmap_mem, enabled);
1297 if (vdev->msix && vdev->msix->table_bar == i) {
1298 memory_region_set_enabled(&vdev->msix->mmap_mem, enabled);
1299 }
1300 }
1301 }
1302
1303 static void vfio_unregister_bar(VFIOPCIDevice *vdev, int nr)
1304 {
1305 VFIOBAR *bar = &vdev->bars[nr];
1306
1307 if (!bar->region.size) {
1308 return;
1309 }
1310
1311 vfio_bar_quirk_teardown(vdev, nr);
1312
1313 memory_region_del_subregion(&bar->region.mem, &bar->region.mmap_mem);
1314
1315 if (vdev->msix && vdev->msix->table_bar == nr) {
1316 memory_region_del_subregion(&bar->region.mem, &vdev->msix->mmap_mem);
1317 }
1318 }
1319
1320 static void vfio_unmap_bar(VFIOPCIDevice *vdev, int nr)
1321 {
1322 VFIOBAR *bar = &vdev->bars[nr];
1323
1324 if (!bar->region.size) {
1325 return;
1326 }
1327
1328 vfio_bar_quirk_free(vdev, nr);
1329
1330 munmap(bar->region.mmap, memory_region_size(&bar->region.mmap_mem));
1331
1332 if (vdev->msix && vdev->msix->table_bar == nr) {
1333 munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem));
1334 }
1335 }
1336
1337 static void vfio_map_bar(VFIOPCIDevice *vdev, int nr)
1338 {
1339 VFIOBAR *bar = &vdev->bars[nr];
1340 uint64_t size = bar->region.size;
1341 char name[64];
1342 uint32_t pci_bar;
1343 uint8_t type;
1344 int ret;
1345
1346 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1347 if (!size) {
1348 return;
1349 }
1350
1351 snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d",
1352 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1353 vdev->host.function, nr);
1354
1355 /* Determine what type of BAR this is for registration */
1356 ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
1357 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
1358 if (ret != sizeof(pci_bar)) {
1359 error_report("vfio: Failed to read BAR %d (%m)", nr);
1360 return;
1361 }
1362
1363 pci_bar = le32_to_cpu(pci_bar);
1364 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1365 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
1366 type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1367 ~PCI_BASE_ADDRESS_MEM_MASK);
1368
1369 /* A "slow" read/write mapping underlies all BARs */
1370 memory_region_init_io(&bar->region.mem, OBJECT(vdev), &vfio_region_ops,
1371 bar, name, size);
1372 pci_register_bar(&vdev->pdev, nr, type, &bar->region.mem);
1373
1374 /*
1375 * We can't mmap areas overlapping the MSIX vector table, so we
1376 * potentially insert a direct-mapped subregion before and after it.
1377 */
1378 if (vdev->msix && vdev->msix->table_bar == nr) {
1379 size = vdev->msix->table_offset & qemu_real_host_page_mask;
1380 }
1381
1382 strncat(name, " mmap", sizeof(name) - strlen(name) - 1);
1383 if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem,
1384 &bar->region.mmap_mem, &bar->region.mmap,
1385 size, 0, name)) {
1386 error_report("%s unsupported. Performance may be slow", name);
1387 }
1388
1389 if (vdev->msix && vdev->msix->table_bar == nr) {
1390 uint64_t start;
1391
1392 start = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1393 (vdev->msix->entries *
1394 PCI_MSIX_ENTRY_SIZE));
1395
1396 size = start < bar->region.size ? bar->region.size - start : 0;
1397 strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1);
1398 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
1399 if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem,
1400 &vdev->msix->mmap_mem,
1401 &vdev->msix->mmap, size, start, name)) {
1402 error_report("%s unsupported. Performance may be slow", name);
1403 }
1404 }
1405
1406 vfio_bar_quirk_setup(vdev, nr);
1407 }
1408
1409 static void vfio_map_bars(VFIOPCIDevice *vdev)
1410 {
1411 int i;
1412
1413 for (i = 0; i < PCI_ROM_SLOT; i++) {
1414 vfio_map_bar(vdev, i);
1415 }
1416
1417 if (vdev->has_vga) {
1418 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem,
1419 OBJECT(vdev), &vfio_vga_ops,
1420 &vdev->vga.region[QEMU_PCI_VGA_MEM],
1421 "vfio-vga-mmio@0xa0000",
1422 QEMU_PCI_VGA_MEM_SIZE);
1423 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem,
1424 OBJECT(vdev), &vfio_vga_ops,
1425 &vdev->vga.region[QEMU_PCI_VGA_IO_LO],
1426 "vfio-vga-io@0x3b0",
1427 QEMU_PCI_VGA_IO_LO_SIZE);
1428 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
1429 OBJECT(vdev), &vfio_vga_ops,
1430 &vdev->vga.region[QEMU_PCI_VGA_IO_HI],
1431 "vfio-vga-io@0x3c0",
1432 QEMU_PCI_VGA_IO_HI_SIZE);
1433
1434 pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem,
1435 &vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem,
1436 &vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem);
1437 vfio_vga_quirk_setup(vdev);
1438 }
1439 }
1440
1441 static void vfio_unregister_bars(VFIOPCIDevice *vdev)
1442 {
1443 int i;
1444
1445 for (i = 0; i < PCI_ROM_SLOT; i++) {
1446 vfio_unregister_bar(vdev, i);
1447 }
1448
1449 if (vdev->has_vga) {
1450 vfio_vga_quirk_teardown(vdev);
1451 pci_unregister_vga(&vdev->pdev);
1452 }
1453 }
1454
1455 static void vfio_unmap_bars(VFIOPCIDevice *vdev)
1456 {
1457 int i;
1458
1459 for (i = 0; i < PCI_ROM_SLOT; i++) {
1460 vfio_unmap_bar(vdev, i);
1461 }
1462
1463 if (vdev->has_vga) {
1464 vfio_vga_quirk_free(vdev);
1465 }
1466 }
1467
1468 /*
1469 * General setup
1470 */
1471 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1472 {
1473 uint8_t tmp, next = 0xff;
1474
1475 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1476 tmp = pdev->config[tmp + 1]) {
1477 if (tmp > pos && tmp < next) {
1478 next = tmp;
1479 }
1480 }
1481
1482 return next - pos;
1483 }
1484
1485 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1486 {
1487 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1488 }
1489
1490 static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1491 uint16_t val, uint16_t mask)
1492 {
1493 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1494 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1495 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1496 }
1497
1498 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1499 {
1500 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1501 }
1502
1503 static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1504 uint32_t val, uint32_t mask)
1505 {
1506 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1507 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1508 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1509 }
1510
1511 static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
1512 {
1513 uint16_t flags;
1514 uint8_t type;
1515
1516 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
1517 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
1518
1519 if (type != PCI_EXP_TYPE_ENDPOINT &&
1520 type != PCI_EXP_TYPE_LEG_END &&
1521 type != PCI_EXP_TYPE_RC_END) {
1522
1523 error_report("vfio: Assignment of PCIe type 0x%x "
1524 "devices is not currently supported", type);
1525 return -EINVAL;
1526 }
1527
1528 if (!pci_bus_is_express(vdev->pdev.bus)) {
1529 /*
1530 * Use express capability as-is on PCI bus. It doesn't make much
1531 * sense to even expose, but some drivers (ex. tg3) depend on it
1532 * and guests don't seem to be particular about it. We'll need
1533 * to revist this or force express devices to express buses if we
1534 * ever expose an IOMMU to the guest.
1535 */
1536 } else if (pci_bus_is_root(vdev->pdev.bus)) {
1537 /*
1538 * On a Root Complex bus Endpoints become Root Complex Integrated
1539 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1540 */
1541 if (type == PCI_EXP_TYPE_ENDPOINT) {
1542 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1543 PCI_EXP_TYPE_RC_END << 4,
1544 PCI_EXP_FLAGS_TYPE);
1545
1546 /* Link Capabilities, Status, and Control goes away */
1547 if (size > PCI_EXP_LNKCTL) {
1548 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
1549 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1550 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
1551
1552 #ifndef PCI_EXP_LNKCAP2
1553 #define PCI_EXP_LNKCAP2 44
1554 #endif
1555 #ifndef PCI_EXP_LNKSTA2
1556 #define PCI_EXP_LNKSTA2 50
1557 #endif
1558 /* Link 2 Capabilities, Status, and Control goes away */
1559 if (size > PCI_EXP_LNKCAP2) {
1560 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
1561 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
1562 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
1563 }
1564 }
1565
1566 } else if (type == PCI_EXP_TYPE_LEG_END) {
1567 /*
1568 * Legacy endpoints don't belong on the root complex. Windows
1569 * seems to be happier with devices if we skip the capability.
1570 */
1571 return 0;
1572 }
1573
1574 } else {
1575 /*
1576 * Convert Root Complex Integrated Endpoints to regular endpoints.
1577 * These devices don't support LNK/LNK2 capabilities, so make them up.
1578 */
1579 if (type == PCI_EXP_TYPE_RC_END) {
1580 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1581 PCI_EXP_TYPE_ENDPOINT << 4,
1582 PCI_EXP_FLAGS_TYPE);
1583 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
1584 PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
1585 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1586 }
1587
1588 /* Mark the Link Status bits as emulated to allow virtual negotiation */
1589 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
1590 pci_get_word(vdev->pdev.config + pos +
1591 PCI_EXP_LNKSTA),
1592 PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
1593 }
1594
1595 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
1596 if (pos >= 0) {
1597 vdev->pdev.exp.exp_cap = pos;
1598 }
1599
1600 return pos;
1601 }
1602
1603 static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
1604 {
1605 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
1606
1607 if (cap & PCI_EXP_DEVCAP_FLR) {
1608 trace_vfio_check_pcie_flr(vdev->vbasedev.name);
1609 vdev->has_flr = true;
1610 }
1611 }
1612
1613 static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
1614 {
1615 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
1616
1617 if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
1618 trace_vfio_check_pm_reset(vdev->vbasedev.name);
1619 vdev->has_pm_reset = true;
1620 }
1621 }
1622
1623 static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
1624 {
1625 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
1626
1627 if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
1628 trace_vfio_check_af_flr(vdev->vbasedev.name);
1629 vdev->has_flr = true;
1630 }
1631 }
1632
1633 static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
1634 {
1635 PCIDevice *pdev = &vdev->pdev;
1636 uint8_t cap_id, next, size;
1637 int ret;
1638
1639 cap_id = pdev->config[pos];
1640 next = pdev->config[pos + 1];
1641
1642 /*
1643 * If it becomes important to configure capabilities to their actual
1644 * size, use this as the default when it's something we don't recognize.
1645 * Since QEMU doesn't actually handle many of the config accesses,
1646 * exact size doesn't seem worthwhile.
1647 */
1648 size = vfio_std_cap_max_size(pdev, pos);
1649
1650 /*
1651 * pci_add_capability always inserts the new capability at the head
1652 * of the chain. Therefore to end up with a chain that matches the
1653 * physical device, we insert from the end by making this recursive.
1654 * This is also why we pre-caclulate size above as cached config space
1655 * will be changed as we unwind the stack.
1656 */
1657 if (next) {
1658 ret = vfio_add_std_cap(vdev, next);
1659 if (ret) {
1660 return ret;
1661 }
1662 } else {
1663 /* Begin the rebuild, use QEMU emulated list bits */
1664 pdev->config[PCI_CAPABILITY_LIST] = 0;
1665 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
1666 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1667 }
1668
1669 /* Use emulated next pointer to allow dropping caps */
1670 pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff);
1671
1672 switch (cap_id) {
1673 case PCI_CAP_ID_MSI:
1674 ret = vfio_msi_setup(vdev, pos);
1675 break;
1676 case PCI_CAP_ID_EXP:
1677 vfio_check_pcie_flr(vdev, pos);
1678 ret = vfio_setup_pcie_cap(vdev, pos, size);
1679 break;
1680 case PCI_CAP_ID_MSIX:
1681 ret = vfio_msix_setup(vdev, pos);
1682 break;
1683 case PCI_CAP_ID_PM:
1684 vfio_check_pm_reset(vdev, pos);
1685 vdev->pm_cap = pos;
1686 ret = pci_add_capability(pdev, cap_id, pos, size);
1687 break;
1688 case PCI_CAP_ID_AF:
1689 vfio_check_af_flr(vdev, pos);
1690 ret = pci_add_capability(pdev, cap_id, pos, size);
1691 break;
1692 default:
1693 ret = pci_add_capability(pdev, cap_id, pos, size);
1694 break;
1695 }
1696
1697 if (ret < 0) {
1698 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
1699 "0x%x[0x%x]@0x%x: %d", vdev->host.domain,
1700 vdev->host.bus, vdev->host.slot, vdev->host.function,
1701 cap_id, size, pos, ret);
1702 return ret;
1703 }
1704
1705 return 0;
1706 }
1707
1708 static int vfio_add_capabilities(VFIOPCIDevice *vdev)
1709 {
1710 PCIDevice *pdev = &vdev->pdev;
1711
1712 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
1713 !pdev->config[PCI_CAPABILITY_LIST]) {
1714 return 0; /* Nothing to add */
1715 }
1716
1717 return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
1718 }
1719
1720 static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
1721 {
1722 PCIDevice *pdev = &vdev->pdev;
1723 uint16_t cmd;
1724
1725 vfio_disable_interrupts(vdev);
1726
1727 /* Make sure the device is in D0 */
1728 if (vdev->pm_cap) {
1729 uint16_t pmcsr;
1730 uint8_t state;
1731
1732 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
1733 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1734 if (state) {
1735 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1736 vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
1737 /* vfio handles the necessary delay here */
1738 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
1739 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1740 if (state) {
1741 error_report("vfio: Unable to power on device, stuck in D%d",
1742 state);
1743 }
1744 }
1745 }
1746
1747 /*
1748 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
1749 * Also put INTx Disable in known state.
1750 */
1751 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
1752 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1753 PCI_COMMAND_INTX_DISABLE);
1754 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
1755 }
1756
1757 static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
1758 {
1759 vfio_intx_enable(vdev);
1760 }
1761
1762 static bool vfio_pci_host_match(PCIHostDeviceAddress *host1,
1763 PCIHostDeviceAddress *host2)
1764 {
1765 return (host1->domain == host2->domain && host1->bus == host2->bus &&
1766 host1->slot == host2->slot && host1->function == host2->function);
1767 }
1768
1769 static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
1770 {
1771 VFIOGroup *group;
1772 struct vfio_pci_hot_reset_info *info;
1773 struct vfio_pci_dependent_device *devices;
1774 struct vfio_pci_hot_reset *reset;
1775 int32_t *fds;
1776 int ret, i, count;
1777 bool multi = false;
1778
1779 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
1780
1781 vfio_pci_pre_reset(vdev);
1782 vdev->vbasedev.needs_reset = false;
1783
1784 info = g_malloc0(sizeof(*info));
1785 info->argsz = sizeof(*info);
1786
1787 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1788 if (ret && errno != ENOSPC) {
1789 ret = -errno;
1790 if (!vdev->has_pm_reset) {
1791 error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, "
1792 "no available reset mechanism.", vdev->host.domain,
1793 vdev->host.bus, vdev->host.slot, vdev->host.function);
1794 }
1795 goto out_single;
1796 }
1797
1798 count = info->count;
1799 info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
1800 info->argsz = sizeof(*info) + (count * sizeof(*devices));
1801 devices = &info->devices[0];
1802
1803 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
1804 if (ret) {
1805 ret = -errno;
1806 error_report("vfio: hot reset info failed: %m");
1807 goto out_single;
1808 }
1809
1810 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
1811
1812 /* Verify that we have all the groups required */
1813 for (i = 0; i < info->count; i++) {
1814 PCIHostDeviceAddress host;
1815 VFIOPCIDevice *tmp;
1816 VFIODevice *vbasedev_iter;
1817
1818 host.domain = devices[i].segment;
1819 host.bus = devices[i].bus;
1820 host.slot = PCI_SLOT(devices[i].devfn);
1821 host.function = PCI_FUNC(devices[i].devfn);
1822
1823 trace_vfio_pci_hot_reset_dep_devices(host.domain,
1824 host.bus, host.slot, host.function, devices[i].group_id);
1825
1826 if (vfio_pci_host_match(&host, &vdev->host)) {
1827 continue;
1828 }
1829
1830 QLIST_FOREACH(group, &vfio_group_list, next) {
1831 if (group->groupid == devices[i].group_id) {
1832 break;
1833 }
1834 }
1835
1836 if (!group) {
1837 if (!vdev->has_pm_reset) {
1838 error_report("vfio: Cannot reset device %s, "
1839 "depends on group %d which is not owned.",
1840 vdev->vbasedev.name, devices[i].group_id);
1841 }
1842 ret = -EPERM;
1843 goto out;
1844 }
1845
1846 /* Prep dependent devices for reset and clear our marker. */
1847 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
1848 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
1849 continue;
1850 }
1851 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
1852 if (vfio_pci_host_match(&host, &tmp->host)) {
1853 if (single) {
1854 ret = -EINVAL;
1855 goto out_single;
1856 }
1857 vfio_pci_pre_reset(tmp);
1858 tmp->vbasedev.needs_reset = false;
1859 multi = true;
1860 break;
1861 }
1862 }
1863 }
1864
1865 if (!single && !multi) {
1866 ret = -EINVAL;
1867 goto out_single;
1868 }
1869
1870 /* Determine how many group fds need to be passed */
1871 count = 0;
1872 QLIST_FOREACH(group, &vfio_group_list, next) {
1873 for (i = 0; i < info->count; i++) {
1874 if (group->groupid == devices[i].group_id) {
1875 count++;
1876 break;
1877 }
1878 }
1879 }
1880
1881 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
1882 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
1883 fds = &reset->group_fds[0];
1884
1885 /* Fill in group fds */
1886 QLIST_FOREACH(group, &vfio_group_list, next) {
1887 for (i = 0; i < info->count; i++) {
1888 if (group->groupid == devices[i].group_id) {
1889 fds[reset->count++] = group->fd;
1890 break;
1891 }
1892 }
1893 }
1894
1895 /* Bus reset! */
1896 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
1897 g_free(reset);
1898
1899 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
1900 ret ? "%m" : "Success");
1901
1902 out:
1903 /* Re-enable INTx on affected devices */
1904 for (i = 0; i < info->count; i++) {
1905 PCIHostDeviceAddress host;
1906 VFIOPCIDevice *tmp;
1907 VFIODevice *vbasedev_iter;
1908
1909 host.domain = devices[i].segment;
1910 host.bus = devices[i].bus;
1911 host.slot = PCI_SLOT(devices[i].devfn);
1912 host.function = PCI_FUNC(devices[i].devfn);
1913
1914 if (vfio_pci_host_match(&host, &vdev->host)) {
1915 continue;
1916 }
1917
1918 QLIST_FOREACH(group, &vfio_group_list, next) {
1919 if (group->groupid == devices[i].group_id) {
1920 break;
1921 }
1922 }
1923
1924 if (!group) {
1925 break;
1926 }
1927
1928 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
1929 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
1930 continue;
1931 }
1932 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
1933 if (vfio_pci_host_match(&host, &tmp->host)) {
1934 vfio_pci_post_reset(tmp);
1935 break;
1936 }
1937 }
1938 }
1939 out_single:
1940 vfio_pci_post_reset(vdev);
1941 g_free(info);
1942
1943 return ret;
1944 }
1945
1946 /*
1947 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
1948 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
1949 * of doing hot resets when there is only a single device per bus. The in-use
1950 * here refers to how many VFIODevices are affected. A hot reset that affects
1951 * multiple devices, but only a single in-use device, means that we can call
1952 * it from our bus ->reset() callback since the extent is effectively a single
1953 * device. This allows us to make use of it in the hotplug path. When there
1954 * are multiple in-use devices, we can only trigger the hot reset during a
1955 * system reset and thus from our reset handler. We separate _one vs _multi
1956 * here so that we don't overlap and do a double reset on the system reset
1957 * path where both our reset handler and ->reset() callback are used. Calling
1958 * _one() will only do a hot reset for the one in-use devices case, calling
1959 * _multi() will do nothing if a _one() would have been sufficient.
1960 */
1961 static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
1962 {
1963 return vfio_pci_hot_reset(vdev, true);
1964 }
1965
1966 static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
1967 {
1968 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
1969 return vfio_pci_hot_reset(vdev, false);
1970 }
1971
1972 static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
1973 {
1974 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
1975 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
1976 vbasedev->needs_reset = true;
1977 }
1978 }
1979
1980 static VFIODeviceOps vfio_pci_ops = {
1981 .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
1982 .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
1983 .vfio_eoi = vfio_intx_eoi,
1984 };
1985
1986 static int vfio_populate_device(VFIOPCIDevice *vdev)
1987 {
1988 VFIODevice *vbasedev = &vdev->vbasedev;
1989 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
1990 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
1991 int i, ret = -1;
1992
1993 /* Sanity check device */
1994 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
1995 error_report("vfio: Um, this isn't a PCI device");
1996 goto error;
1997 }
1998
1999 if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2000 error_report("vfio: unexpected number of io regions %u",
2001 vbasedev->num_regions);
2002 goto error;
2003 }
2004
2005 if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2006 error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs);
2007 goto error;
2008 }
2009
2010 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2011 reg_info.index = i;
2012
2013 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
2014 if (ret) {
2015 error_report("vfio: Error getting region %d info: %m", i);
2016 goto error;
2017 }
2018
2019 trace_vfio_populate_device_region(vbasedev->name, i,
2020 (unsigned long)reg_info.size,
2021 (unsigned long)reg_info.offset,
2022 (unsigned long)reg_info.flags);
2023
2024 vdev->bars[i].region.vbasedev = vbasedev;
2025 vdev->bars[i].region.flags = reg_info.flags;
2026 vdev->bars[i].region.size = reg_info.size;
2027 vdev->bars[i].region.fd_offset = reg_info.offset;
2028 vdev->bars[i].region.nr = i;
2029 QLIST_INIT(&vdev->bars[i].quirks);
2030 }
2031
2032 reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX;
2033
2034 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
2035 if (ret) {
2036 error_report("vfio: Error getting config info: %m");
2037 goto error;
2038 }
2039
2040 trace_vfio_populate_device_config(vdev->vbasedev.name,
2041 (unsigned long)reg_info.size,
2042 (unsigned long)reg_info.offset,
2043 (unsigned long)reg_info.flags);
2044
2045 vdev->config_size = reg_info.size;
2046 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2047 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2048 }
2049 vdev->config_offset = reg_info.offset;
2050
2051 if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) &&
2052 vbasedev->num_regions > VFIO_PCI_VGA_REGION_INDEX) {
2053 struct vfio_region_info vga_info = {
2054 .argsz = sizeof(vga_info),
2055 .index = VFIO_PCI_VGA_REGION_INDEX,
2056 };
2057
2058 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info);
2059 if (ret) {
2060 error_report(
2061 "vfio: Device does not support requested feature x-vga");
2062 goto error;
2063 }
2064
2065 if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) ||
2066 !(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2067 vga_info.size < 0xbffff + 1) {
2068 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
2069 (unsigned long)vga_info.flags,
2070 (unsigned long)vga_info.size);
2071 goto error;
2072 }
2073
2074 vdev->vga.fd_offset = vga_info.offset;
2075 vdev->vga.fd = vdev->vbasedev.fd;
2076
2077 vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2078 vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2079 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks);
2080
2081 vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2082 vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2083 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks);
2084
2085 vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2086 vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2087 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks);
2088
2089 vdev->has_vga = true;
2090 }
2091
2092 irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
2093
2094 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
2095 if (ret) {
2096 /* This can fail for an old kernel or legacy PCI dev */
2097 trace_vfio_populate_device_get_irq_info_failure();
2098 ret = 0;
2099 } else if (irq_info.count == 1) {
2100 vdev->pci_aer = true;
2101 } else {
2102 error_report("vfio: %s "
2103 "Could not enable error recovery for the device",
2104 vbasedev->name);
2105 }
2106
2107 error:
2108 return ret;
2109 }
2110
2111 static void vfio_put_device(VFIOPCIDevice *vdev)
2112 {
2113 g_free(vdev->vbasedev.name);
2114 if (vdev->msix) {
2115 object_unparent(OBJECT(&vdev->msix->mmap_mem));
2116 g_free(vdev->msix);
2117 vdev->msix = NULL;
2118 }
2119 vfio_put_base_device(&vdev->vbasedev);
2120 }
2121
2122 static void vfio_err_notifier_handler(void *opaque)
2123 {
2124 VFIOPCIDevice *vdev = opaque;
2125
2126 if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2127 return;
2128 }
2129
2130 /*
2131 * TBD. Retrieve the error details and decide what action
2132 * needs to be taken. One of the actions could be to pass
2133 * the error to the guest and have the guest driver recover
2134 * from the error. This requires that PCIe capabilities be
2135 * exposed to the guest. For now, we just terminate the
2136 * guest to contain the error.
2137 */
2138
2139 error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. "
2140 "Please collect any data possible and then kill the guest",
2141 __func__, vdev->host.domain, vdev->host.bus,
2142 vdev->host.slot, vdev->host.function);
2143
2144 vm_stop(RUN_STATE_INTERNAL_ERROR);
2145 }
2146
2147 /*
2148 * Registers error notifier for devices supporting error recovery.
2149 * If we encounter a failure in this function, we report an error
2150 * and continue after disabling error recovery support for the
2151 * device.
2152 */
2153 static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2154 {
2155 int ret;
2156 int argsz;
2157 struct vfio_irq_set *irq_set;
2158 int32_t *pfd;
2159
2160 if (!vdev->pci_aer) {
2161 return;
2162 }
2163
2164 if (event_notifier_init(&vdev->err_notifier, 0)) {
2165 error_report("vfio: Unable to init event notifier for error detection");
2166 vdev->pci_aer = false;
2167 return;
2168 }
2169
2170 argsz = sizeof(*irq_set) + sizeof(*pfd);
2171
2172 irq_set = g_malloc0(argsz);
2173 irq_set->argsz = argsz;
2174 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2175 VFIO_IRQ_SET_ACTION_TRIGGER;
2176 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2177 irq_set->start = 0;
2178 irq_set->count = 1;
2179 pfd = (int32_t *)&irq_set->data;
2180
2181 *pfd = event_notifier_get_fd(&vdev->err_notifier);
2182 qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);
2183
2184 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2185 if (ret) {
2186 error_report("vfio: Failed to set up error notification");
2187 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2188 event_notifier_cleanup(&vdev->err_notifier);
2189 vdev->pci_aer = false;
2190 }
2191 g_free(irq_set);
2192 }
2193
2194 static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2195 {
2196 int argsz;
2197 struct vfio_irq_set *irq_set;
2198 int32_t *pfd;
2199 int ret;
2200
2201 if (!vdev->pci_aer) {
2202 return;
2203 }
2204
2205 argsz = sizeof(*irq_set) + sizeof(*pfd);
2206
2207 irq_set = g_malloc0(argsz);
2208 irq_set->argsz = argsz;
2209 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2210 VFIO_IRQ_SET_ACTION_TRIGGER;
2211 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2212 irq_set->start = 0;
2213 irq_set->count = 1;
2214 pfd = (int32_t *)&irq_set->data;
2215 *pfd = -1;
2216
2217 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2218 if (ret) {
2219 error_report("vfio: Failed to de-assign error fd: %m");
2220 }
2221 g_free(irq_set);
2222 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2223 NULL, NULL, vdev);
2224 event_notifier_cleanup(&vdev->err_notifier);
2225 }
2226
2227 static void vfio_req_notifier_handler(void *opaque)
2228 {
2229 VFIOPCIDevice *vdev = opaque;
2230
2231 if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2232 return;
2233 }
2234
2235 qdev_unplug(&vdev->pdev.qdev, NULL);
2236 }
2237
2238 static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2239 {
2240 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
2241 .index = VFIO_PCI_REQ_IRQ_INDEX };
2242 int argsz;
2243 struct vfio_irq_set *irq_set;
2244 int32_t *pfd;
2245
2246 if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2247 return;
2248 }
2249
2250 if (ioctl(vdev->vbasedev.fd,
2251 VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
2252 return;
2253 }
2254
2255 if (event_notifier_init(&vdev->req_notifier, 0)) {
2256 error_report("vfio: Unable to init event notifier for device request");
2257 return;
2258 }
2259
2260 argsz = sizeof(*irq_set) + sizeof(*pfd);
2261
2262 irq_set = g_malloc0(argsz);
2263 irq_set->argsz = argsz;
2264 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2265 VFIO_IRQ_SET_ACTION_TRIGGER;
2266 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2267 irq_set->start = 0;
2268 irq_set->count = 1;
2269 pfd = (int32_t *)&irq_set->data;
2270
2271 *pfd = event_notifier_get_fd(&vdev->req_notifier);
2272 qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev);
2273
2274 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2275 error_report("vfio: Failed to set up device request notification");
2276 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2277 event_notifier_cleanup(&vdev->req_notifier);
2278 } else {
2279 vdev->req_enabled = true;
2280 }
2281
2282 g_free(irq_set);
2283 }
2284
2285 static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
2286 {
2287 int argsz;
2288 struct vfio_irq_set *irq_set;
2289 int32_t *pfd;
2290
2291 if (!vdev->req_enabled) {
2292 return;
2293 }
2294
2295 argsz = sizeof(*irq_set) + sizeof(*pfd);
2296
2297 irq_set = g_malloc0(argsz);
2298 irq_set->argsz = argsz;
2299 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2300 VFIO_IRQ_SET_ACTION_TRIGGER;
2301 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2302 irq_set->start = 0;
2303 irq_set->count = 1;
2304 pfd = (int32_t *)&irq_set->data;
2305 *pfd = -1;
2306
2307 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2308 error_report("vfio: Failed to de-assign device request fd: %m");
2309 }
2310 g_free(irq_set);
2311 qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
2312 NULL, NULL, vdev);
2313 event_notifier_cleanup(&vdev->req_notifier);
2314
2315 vdev->req_enabled = false;
2316 }
2317
2318 /*
2319 * AMD Radeon PCI config reset, based on Linux:
2320 * drivers/gpu/drm/radeon/ci_smc.c:ci_is_smc_running()
2321 * drivers/gpu/drm/radeon/radeon_device.c:radeon_pci_config_reset
2322 * drivers/gpu/drm/radeon/ci_smc.c:ci_reset_smc()
2323 * drivers/gpu/drm/radeon/ci_smc.c:ci_stop_smc_clock()
2324 * IDs: include/drm/drm_pciids.h
2325 * Registers: http://cgit.freedesktop.org/~agd5f/linux/commit/?id=4e2aa447f6f0
2326 *
2327 * Bonaire and Hawaii GPUs do not respond to a bus reset. This is a bug in the
2328 * hardware that should be fixed on future ASICs. The symptom of this is that
2329 * once the accerlated driver loads, Windows guests will bsod on subsequent
2330 * attmpts to load the driver, such as after VM reset or shutdown/restart. To
2331 * work around this, we do an AMD specific PCI config reset, followed by an SMC
2332 * reset. The PCI config reset only works if SMC firmware is running, so we
2333 * have a dependency on the state of the device as to whether this reset will
2334 * be effective. There are still cases where we won't be able to kick the
2335 * device into working, but this greatly improves the usability overall. The
2336 * config reset magic is relatively common on AMD GPUs, but the setup and SMC
2337 * poking is largely ASIC specific.
2338 */
2339 static bool vfio_radeon_smc_is_running(VFIOPCIDevice *vdev)
2340 {
2341 uint32_t clk, pc_c;
2342
2343 /*
2344 * Registers 200h and 204h are index and data registers for accessing
2345 * indirect configuration registers within the device.
2346 */
2347 vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4);
2348 clk = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
2349 vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000370, 4);
2350 pc_c = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
2351
2352 return (!(clk & 1) && (0x20100 <= pc_c));
2353 }
2354
2355 /*
2356 * The scope of a config reset is controlled by a mode bit in the misc register
2357 * and a fuse, exposed as a bit in another register. The fuse is the default
2358 * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the forumula
2359 * scope = !(misc ^ fuse), where the resulting scope is defined the same as
2360 * the fuse. A truth table therefore tells us that if misc == fuse, we need
2361 * to flip the value of the bit in the misc register.
2362 */
2363 static void vfio_radeon_set_gfx_only_reset(VFIOPCIDevice *vdev)
2364 {
2365 uint32_t misc, fuse;
2366 bool a, b;
2367
2368 vfio_region_write(&vdev->bars[5].region, 0x200, 0xc00c0000, 4);
2369 fuse = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
2370 b = fuse & 64;
2371
2372 vfio_region_write(&vdev->bars[5].region, 0x200, 0xc0000010, 4);
2373 misc = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
2374 a = misc & 2;
2375
2376 if (a == b) {
2377 vfio_region_write(&vdev->bars[5].region, 0x204, misc ^ 2, 4);
2378 vfio_region_read(&vdev->bars[5].region, 0x204, 4); /* flush */
2379 }
2380 }
2381
2382 static int vfio_radeon_reset(VFIOPCIDevice *vdev)
2383 {
2384 PCIDevice *pdev = &vdev->pdev;
2385 int i, ret = 0;
2386 uint32_t data;
2387
2388 /* Defer to a kernel implemented reset */
2389 if (vdev->vbasedev.reset_works) {
2390 return -ENODEV;
2391 }
2392
2393 /* Enable only memory BAR access */
2394 vfio_pci_write_config(pdev, PCI_COMMAND, PCI_COMMAND_MEMORY, 2);
2395
2396 /* Reset only works if SMC firmware is loaded and running */
2397 if (!vfio_radeon_smc_is_running(vdev)) {
2398 ret = -EINVAL;
2399 goto out;
2400 }
2401
2402 /* Make sure only the GFX function is reset */
2403 vfio_radeon_set_gfx_only_reset(vdev);
2404
2405 /* AMD PCI config reset */
2406 vfio_pci_write_config(pdev, 0x7c, 0x39d5e86b, 4);
2407 usleep(100);
2408
2409 /* Read back the memory size to make sure we're out of reset */
2410 for (i = 0; i < 100000; i++) {
2411 if (vfio_region_read(&vdev->bars[5].region, 0x5428, 4) != 0xffffffff) {
2412 break;
2413 }
2414 usleep(1);
2415 }
2416
2417 /* Reset SMC */
2418 vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000000, 4);
2419 data = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
2420 data |= 1;
2421 vfio_region_write(&vdev->bars[5].region, 0x204, data, 4);
2422
2423 /* Disable SMC clock */
2424 vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4);
2425 data = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
2426 data |= 1;
2427 vfio_region_write(&vdev->bars[5].region, 0x204, data, 4);
2428
2429 out:
2430 /* Restore PCI command register */
2431 vfio_pci_write_config(pdev, PCI_COMMAND, 0, 2);
2432
2433 return ret;
2434 }
2435
2436 static void vfio_setup_resetfn(VFIOPCIDevice *vdev)
2437 {
2438 PCIDevice *pdev = &vdev->pdev;
2439 uint16_t vendor, device;
2440
2441 vendor = pci_get_word(pdev->config + PCI_VENDOR_ID);
2442 device = pci_get_word(pdev->config + PCI_DEVICE_ID);
2443
2444 switch (vendor) {
2445 case 0x1002:
2446 switch (device) {
2447 /* Bonaire */
2448 case 0x6649: /* Bonaire [FirePro W5100] */
2449 case 0x6650:
2450 case 0x6651:
2451 case 0x6658: /* Bonaire XTX [Radeon R7 260X] */
2452 case 0x665c: /* Bonaire XT [Radeon HD 7790/8770 / R9 260 OEM] */
2453 case 0x665d: /* Bonaire [Radeon R7 200 Series] */
2454 /* Hawaii */
2455 case 0x67A0: /* Hawaii XT GL [FirePro W9100] */
2456 case 0x67A1: /* Hawaii PRO GL [FirePro W8100] */
2457 case 0x67A2:
2458 case 0x67A8:
2459 case 0x67A9:
2460 case 0x67AA:
2461 case 0x67B0: /* Hawaii XT [Radeon R9 290X] */
2462 case 0x67B1: /* Hawaii PRO [Radeon R9 290] */
2463 case 0x67B8:
2464 case 0x67B9:
2465 case 0x67BA:
2466 case 0x67BE:
2467 vdev->resetfn = vfio_radeon_reset;
2468 break;
2469 }
2470 break;
2471 }
2472 }
2473
2474 static int vfio_initfn(PCIDevice *pdev)
2475 {
2476 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2477 VFIODevice *vbasedev_iter;
2478 VFIOGroup *group;
2479 char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
2480 ssize_t len;
2481 struct stat st;
2482 int groupid;
2483 int ret;
2484
2485 /* Check that the host device exists */
2486 snprintf(path, sizeof(path),
2487 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
2488 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2489 vdev->host.function);
2490 if (stat(path, &st) < 0) {
2491 error_report("vfio: error: no such host device: %s", path);
2492 return -errno;
2493 }
2494
2495 vdev->vbasedev.ops = &vfio_pci_ops;
2496
2497 vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
2498 vdev->vbasedev.name = g_strdup_printf("%04x:%02x:%02x.%01x",
2499 vdev->host.domain, vdev->host.bus,
2500 vdev->host.slot, vdev->host.function);
2501
2502 strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1);
2503
2504 len = readlink(path, iommu_group_path, sizeof(path));
2505 if (len <= 0 || len >= sizeof(path)) {
2506 error_report("vfio: error no iommu_group for device");
2507 return len < 0 ? -errno : -ENAMETOOLONG;
2508 }
2509
2510 iommu_group_path[len] = 0;
2511 group_name = basename(iommu_group_path);
2512
2513 if (sscanf(group_name, "%d", &groupid) != 1) {
2514 error_report("vfio: error reading %s: %m", path);
2515 return -errno;
2516 }
2517
2518 trace_vfio_initfn(vdev->vbasedev.name, groupid);
2519
2520 group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev));
2521 if (!group) {
2522 error_report("vfio: failed to get group %d", groupid);
2523 return -ENOENT;
2524 }
2525
2526 snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x",
2527 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2528 vdev->host.function);
2529
2530 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2531 if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
2532 error_report("vfio: error: device %s is already attached", path);
2533 vfio_put_group(group);
2534 return -EBUSY;
2535 }
2536 }
2537
2538 ret = vfio_get_device(group, path, &vdev->vbasedev);
2539 if (ret) {
2540 error_report("vfio: failed to get device %s", path);
2541 vfio_put_group(group);
2542 return ret;
2543 }
2544
2545 ret = vfio_populate_device(vdev);
2546 if (ret) {
2547 return ret;
2548 }
2549
2550 /* Get a copy of config space */
2551 ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
2552 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
2553 vdev->config_offset);
2554 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
2555 ret = ret < 0 ? -errno : -EFAULT;
2556 error_report("vfio: Failed to read device config space");
2557 return ret;
2558 }
2559
2560 /* vfio emulates a lot for us, but some bits need extra love */
2561 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
2562
2563 /* QEMU can choose to expose the ROM or not */
2564 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
2565
2566 /* QEMU can change multi-function devices to single function, or reverse */
2567 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
2568 PCI_HEADER_TYPE_MULTI_FUNCTION;
2569
2570 /* Restore or clear multifunction, this is always controlled by QEMU */
2571 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
2572 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
2573 } else {
2574 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
2575 }
2576
2577 /*
2578 * Clear host resource mapping info. If we choose not to register a
2579 * BAR, such as might be the case with the option ROM, we can get
2580 * confusing, unwritable, residual addresses from the host here.
2581 */
2582 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
2583 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
2584
2585 vfio_pci_size_rom(vdev);
2586
2587 ret = vfio_msix_early_setup(vdev);
2588 if (ret) {
2589 return ret;
2590 }
2591
2592 vfio_map_bars(vdev);
2593
2594 ret = vfio_add_capabilities(vdev);
2595 if (ret) {
2596 goto out_teardown;
2597 }
2598
2599 /* QEMU emulates all of MSI & MSIX */
2600 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
2601 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
2602 MSIX_CAP_LENGTH);
2603 }
2604
2605 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
2606 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
2607 vdev->msi_cap_size);
2608 }
2609
2610 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
2611 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
2612 vfio_intx_mmap_enable, vdev);
2613 pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
2614 ret = vfio_intx_enable(vdev);
2615 if (ret) {
2616 goto out_teardown;
2617 }
2618 }
2619
2620 vfio_register_err_notifier(vdev);
2621 vfio_register_req_notifier(vdev);
2622 vfio_setup_resetfn(vdev);
2623
2624 return 0;
2625
2626 out_teardown:
2627 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2628 vfio_teardown_msi(vdev);
2629 vfio_unregister_bars(vdev);
2630 return ret;
2631 }
2632
2633 static void vfio_instance_finalize(Object *obj)
2634 {
2635 PCIDevice *pci_dev = PCI_DEVICE(obj);
2636 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev);
2637 VFIOGroup *group = vdev->vbasedev.group;
2638
2639 vfio_unmap_bars(vdev);
2640 g_free(vdev->emulated_config_bits);
2641 g_free(vdev->rom);
2642 vfio_put_device(vdev);
2643 vfio_put_group(group);
2644 }
2645
2646 static void vfio_exitfn(PCIDevice *pdev)
2647 {
2648 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2649
2650 vfio_unregister_req_notifier(vdev);
2651 vfio_unregister_err_notifier(vdev);
2652 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2653 vfio_disable_interrupts(vdev);
2654 if (vdev->intx.mmap_timer) {
2655 timer_free(vdev->intx.mmap_timer);
2656 }
2657 vfio_teardown_msi(vdev);
2658 vfio_unregister_bars(vdev);
2659 }
2660
2661 static void vfio_pci_reset(DeviceState *dev)
2662 {
2663 PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
2664 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2665
2666 trace_vfio_pci_reset(vdev->vbasedev.name);
2667
2668 vfio_pci_pre_reset(vdev);
2669
2670 if (vdev->resetfn && !vdev->resetfn(vdev)) {
2671 goto post_reset;
2672 }
2673
2674 if (vdev->vbasedev.reset_works &&
2675 (vdev->has_flr || !vdev->has_pm_reset) &&
2676 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2677 trace_vfio_pci_reset_flr(vdev->vbasedev.name);
2678 goto post_reset;
2679 }
2680
2681 /* See if we can do our own bus reset */
2682 if (!vfio_pci_hot_reset_one(vdev)) {
2683 goto post_reset;
2684 }
2685
2686 /* If nothing else works and the device supports PM reset, use it */
2687 if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
2688 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
2689 trace_vfio_pci_reset_pm(vdev->vbasedev.name);
2690 goto post_reset;
2691 }
2692
2693 post_reset:
2694 vfio_pci_post_reset(vdev);
2695 }
2696
2697 static void vfio_instance_init(Object *obj)
2698 {
2699 PCIDevice *pci_dev = PCI_DEVICE(obj);
2700 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
2701
2702 device_add_bootindex_property(obj, &vdev->bootindex,
2703 "bootindex", NULL,
2704 &pci_dev->qdev, NULL);
2705 }
2706
2707 static Property vfio_pci_dev_properties[] = {
2708 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
2709 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
2710 intx.mmap_timeout, 1100),
2711 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
2712 VFIO_FEATURE_ENABLE_VGA_BIT, false),
2713 DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
2714 VFIO_FEATURE_ENABLE_REQ_BIT, true),
2715 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
2716 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
2717 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
2718 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
2719 /*
2720 * TODO - support passed fds... is this necessary?
2721 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
2722 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
2723 */
2724 DEFINE_PROP_END_OF_LIST(),
2725 };
2726
2727 static const VMStateDescription vfio_pci_vmstate = {
2728 .name = "vfio-pci",
2729 .unmigratable = 1,
2730 };
2731
2732 static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
2733 {
2734 DeviceClass *dc = DEVICE_CLASS(klass);
2735 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
2736
2737 dc->reset = vfio_pci_reset;
2738 dc->props = vfio_pci_dev_properties;
2739 dc->vmsd = &vfio_pci_vmstate;
2740 dc->desc = "VFIO-based PCI device assignment";
2741 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2742 pdc->init = vfio_initfn;
2743 pdc->exit = vfio_exitfn;
2744 pdc->config_read = vfio_pci_read_config;
2745 pdc->config_write = vfio_pci_write_config;
2746 pdc->is_express = 1; /* We might be */
2747 }
2748
2749 static const TypeInfo vfio_pci_dev_info = {
2750 .name = "vfio-pci",
2751 .parent = TYPE_PCI_DEVICE,
2752 .instance_size = sizeof(VFIOPCIDevice),
2753 .class_init = vfio_pci_dev_class_init,
2754 .instance_init = vfio_instance_init,
2755 .instance_finalize = vfio_instance_finalize,
2756 };
2757
2758 static void register_vfio_pci_dev_type(void)
2759 {
2760 type_register_static(&vfio_pci_dev_info);
2761 }
2762
2763 type_init(register_vfio_pci_dev_type)