]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * vfio based device assignment support | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Alex Williamson <alex.williamson@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Based on qemu-kvm device-assignment: | |
13 | * Adapted for KVM by Qumranet. | |
14 | * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) | |
15 | * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) | |
16 | * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) | |
17 | * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) | |
18 | * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) | |
19 | */ | |
20 | ||
21 | #include <linux/vfio.h> | |
22 | #include <sys/ioctl.h> | |
23 | #include <sys/mman.h> | |
24 | #include <sys/stat.h> | |
25 | #include <sys/types.h> | |
26 | #include <unistd.h> | |
27 | ||
28 | #include "config.h" | |
29 | #include "exec/address-spaces.h" | |
30 | #include "exec/memory.h" | |
31 | #include "hw/pci/msi.h" | |
32 | #include "hw/pci/msix.h" | |
33 | #include "hw/pci/pci.h" | |
34 | #include "qemu-common.h" | |
35 | #include "qemu/error-report.h" | |
36 | #include "qemu/event_notifier.h" | |
37 | #include "qemu/queue.h" | |
38 | #include "qemu/range.h" | |
39 | #include "sysemu/kvm.h" | |
40 | #include "sysemu/sysemu.h" | |
41 | #include "trace.h" | |
42 | #include "hw/vfio/vfio.h" | |
43 | #include "hw/vfio/vfio-common.h" | |
44 | ||
45 | struct VFIOPCIDevice; | |
46 | ||
47 | typedef struct VFIOQuirk { | |
48 | MemoryRegion mem; | |
49 | struct VFIOPCIDevice *vdev; | |
50 | QLIST_ENTRY(VFIOQuirk) next; | |
51 | struct { | |
52 | uint32_t base_offset:TARGET_PAGE_BITS; | |
53 | uint32_t address_offset:TARGET_PAGE_BITS; | |
54 | uint32_t address_size:3; | |
55 | uint32_t bar:3; | |
56 | ||
57 | uint32_t address_match; | |
58 | uint32_t address_mask; | |
59 | ||
60 | uint32_t address_val:TARGET_PAGE_BITS; | |
61 | uint32_t data_offset:TARGET_PAGE_BITS; | |
62 | uint32_t data_size:3; | |
63 | ||
64 | uint8_t flags; | |
65 | uint8_t read_flags; | |
66 | uint8_t write_flags; | |
67 | } data; | |
68 | } VFIOQuirk; | |
69 | ||
70 | typedef struct VFIOBAR { | |
71 | VFIORegion region; | |
72 | bool ioport; | |
73 | bool mem64; | |
74 | QLIST_HEAD(, VFIOQuirk) quirks; | |
75 | } VFIOBAR; | |
76 | ||
77 | typedef struct VFIOVGARegion { | |
78 | MemoryRegion mem; | |
79 | off_t offset; | |
80 | int nr; | |
81 | QLIST_HEAD(, VFIOQuirk) quirks; | |
82 | } VFIOVGARegion; | |
83 | ||
84 | typedef struct VFIOVGA { | |
85 | off_t fd_offset; | |
86 | int fd; | |
87 | VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS]; | |
88 | } VFIOVGA; | |
89 | ||
90 | typedef struct VFIOINTx { | |
91 | bool pending; /* interrupt pending */ | |
92 | bool kvm_accel; /* set when QEMU bypass through KVM enabled */ | |
93 | uint8_t pin; /* which pin to pull for qemu_set_irq */ | |
94 | EventNotifier interrupt; /* eventfd triggered on interrupt */ | |
95 | EventNotifier unmask; /* eventfd for unmask on QEMU bypass */ | |
96 | PCIINTxRoute route; /* routing info for QEMU bypass */ | |
97 | uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */ | |
98 | QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */ | |
99 | } VFIOINTx; | |
100 | ||
101 | typedef struct VFIOMSIVector { | |
102 | /* | |
103 | * Two interrupt paths are configured per vector. The first, is only used | |
104 | * for interrupts injected via QEMU. This is typically the non-accel path, | |
105 | * but may also be used when we want QEMU to handle masking and pending | |
106 | * bits. The KVM path bypasses QEMU and is therefore higher performance, | |
107 | * but requires masking at the device. virq is used to track the MSI route | |
108 | * through KVM, thus kvm_interrupt is only available when virq is set to a | |
109 | * valid (>= 0) value. | |
110 | */ | |
111 | EventNotifier interrupt; | |
112 | EventNotifier kvm_interrupt; | |
113 | struct VFIOPCIDevice *vdev; /* back pointer to device */ | |
114 | int virq; | |
115 | bool use; | |
116 | } VFIOMSIVector; | |
117 | ||
118 | enum { | |
119 | VFIO_INT_NONE = 0, | |
120 | VFIO_INT_INTx = 1, | |
121 | VFIO_INT_MSI = 2, | |
122 | VFIO_INT_MSIX = 3, | |
123 | }; | |
124 | ||
125 | /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */ | |
126 | typedef struct VFIOMSIXInfo { | |
127 | uint8_t table_bar; | |
128 | uint8_t pba_bar; | |
129 | uint16_t entries; | |
130 | uint32_t table_offset; | |
131 | uint32_t pba_offset; | |
132 | MemoryRegion mmap_mem; | |
133 | void *mmap; | |
134 | } VFIOMSIXInfo; | |
135 | ||
136 | typedef struct VFIOPCIDevice { | |
137 | PCIDevice pdev; | |
138 | VFIODevice vbasedev; | |
139 | VFIOINTx intx; | |
140 | unsigned int config_size; | |
141 | uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */ | |
142 | off_t config_offset; /* Offset of config space region within device fd */ | |
143 | unsigned int rom_size; | |
144 | off_t rom_offset; /* Offset of ROM region within device fd */ | |
145 | void *rom; | |
146 | int msi_cap_size; | |
147 | VFIOMSIVector *msi_vectors; | |
148 | VFIOMSIXInfo *msix; | |
149 | int nr_vectors; /* Number of MSI/MSIX vectors currently in use */ | |
150 | int interrupt; /* Current interrupt type */ | |
151 | VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */ | |
152 | VFIOVGA vga; /* 0xa0000, 0x3b0, 0x3c0 */ | |
153 | PCIHostDeviceAddress host; | |
154 | EventNotifier err_notifier; | |
155 | EventNotifier req_notifier; | |
156 | int (*resetfn)(struct VFIOPCIDevice *); | |
157 | uint32_t features; | |
158 | #define VFIO_FEATURE_ENABLE_VGA_BIT 0 | |
159 | #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT) | |
160 | #define VFIO_FEATURE_ENABLE_REQ_BIT 1 | |
161 | #define VFIO_FEATURE_ENABLE_REQ (1 << VFIO_FEATURE_ENABLE_REQ_BIT) | |
162 | int32_t bootindex; | |
163 | uint8_t pm_cap; | |
164 | bool has_vga; | |
165 | bool pci_aer; | |
166 | bool req_enabled; | |
167 | bool has_flr; | |
168 | bool has_pm_reset; | |
169 | bool rom_read_failed; | |
170 | } VFIOPCIDevice; | |
171 | ||
172 | typedef struct VFIORomBlacklistEntry { | |
173 | uint16_t vendor_id; | |
174 | uint16_t device_id; | |
175 | } VFIORomBlacklistEntry; | |
176 | ||
177 | /* | |
178 | * List of device ids/vendor ids for which to disable | |
179 | * option rom loading. This avoids the guest hangs during rom | |
180 | * execution as noticed with the BCM 57810 card for lack of a | |
181 | * more better way to handle such issues. | |
182 | * The user can still override by specifying a romfile or | |
183 | * rombar=1. | |
184 | * Please see https://bugs.launchpad.net/qemu/+bug/1284874 | |
185 | * for an analysis of the 57810 card hang. When adding | |
186 | * a new vendor id/device id combination below, please also add | |
187 | * your card/environment details and information that could | |
188 | * help in debugging to the bug tracking this issue | |
189 | */ | |
190 | static const VFIORomBlacklistEntry romblacklist[] = { | |
191 | /* Broadcom BCM 57810 */ | |
192 | { 0x14e4, 0x168e } | |
193 | }; | |
194 | ||
195 | #define MSIX_CAP_LENGTH 12 | |
196 | ||
197 | static void vfio_disable_interrupts(VFIOPCIDevice *vdev); | |
198 | static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len); | |
199 | static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, | |
200 | uint32_t val, int len); | |
201 | static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled); | |
202 | ||
203 | /* | |
204 | * Disabling BAR mmaping can be slow, but toggling it around INTx can | |
205 | * also be a huge overhead. We try to get the best of both worlds by | |
206 | * waiting until an interrupt to disable mmaps (subsequent transitions | |
207 | * to the same state are effectively no overhead). If the interrupt has | |
208 | * been serviced and the time gap is long enough, we re-enable mmaps for | |
209 | * performance. This works well for things like graphics cards, which | |
210 | * may not use their interrupt at all and are penalized to an unusable | |
211 | * level by read/write BAR traps. Other devices, like NICs, have more | |
212 | * regular interrupts and see much better latency by staying in non-mmap | |
213 | * mode. We therefore set the default mmap_timeout such that a ping | |
214 | * is just enough to keep the mmap disabled. Users can experiment with | |
215 | * other options with the x-intx-mmap-timeout-ms parameter (a value of | |
216 | * zero disables the timer). | |
217 | */ | |
218 | static void vfio_intx_mmap_enable(void *opaque) | |
219 | { | |
220 | VFIOPCIDevice *vdev = opaque; | |
221 | ||
222 | if (vdev->intx.pending) { | |
223 | timer_mod(vdev->intx.mmap_timer, | |
224 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); | |
225 | return; | |
226 | } | |
227 | ||
228 | vfio_mmap_set_enabled(vdev, true); | |
229 | } | |
230 | ||
231 | static void vfio_intx_interrupt(void *opaque) | |
232 | { | |
233 | VFIOPCIDevice *vdev = opaque; | |
234 | ||
235 | if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) { | |
236 | return; | |
237 | } | |
238 | ||
239 | trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin); | |
240 | ||
241 | vdev->intx.pending = true; | |
242 | pci_irq_assert(&vdev->pdev); | |
243 | vfio_mmap_set_enabled(vdev, false); | |
244 | if (vdev->intx.mmap_timeout) { | |
245 | timer_mod(vdev->intx.mmap_timer, | |
246 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); | |
247 | } | |
248 | } | |
249 | ||
250 | static void vfio_eoi(VFIODevice *vbasedev) | |
251 | { | |
252 | VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); | |
253 | ||
254 | if (!vdev->intx.pending) { | |
255 | return; | |
256 | } | |
257 | ||
258 | trace_vfio_eoi(vbasedev->name); | |
259 | ||
260 | vdev->intx.pending = false; | |
261 | pci_irq_deassert(&vdev->pdev); | |
262 | vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX); | |
263 | } | |
264 | ||
265 | static void vfio_enable_intx_kvm(VFIOPCIDevice *vdev) | |
266 | { | |
267 | #ifdef CONFIG_KVM | |
268 | struct kvm_irqfd irqfd = { | |
269 | .fd = event_notifier_get_fd(&vdev->intx.interrupt), | |
270 | .gsi = vdev->intx.route.irq, | |
271 | .flags = KVM_IRQFD_FLAG_RESAMPLE, | |
272 | }; | |
273 | struct vfio_irq_set *irq_set; | |
274 | int ret, argsz; | |
275 | int32_t *pfd; | |
276 | ||
277 | if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() || | |
278 | vdev->intx.route.mode != PCI_INTX_ENABLED || | |
279 | !kvm_resamplefds_enabled()) { | |
280 | return; | |
281 | } | |
282 | ||
283 | /* Get to a known interrupt state */ | |
284 | qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev); | |
285 | vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); | |
286 | vdev->intx.pending = false; | |
287 | pci_irq_deassert(&vdev->pdev); | |
288 | ||
289 | /* Get an eventfd for resample/unmask */ | |
290 | if (event_notifier_init(&vdev->intx.unmask, 0)) { | |
291 | error_report("vfio: Error: event_notifier_init failed eoi"); | |
292 | goto fail; | |
293 | } | |
294 | ||
295 | /* KVM triggers it, VFIO listens for it */ | |
296 | irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask); | |
297 | ||
298 | if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) { | |
299 | error_report("vfio: Error: Failed to setup resample irqfd: %m"); | |
300 | goto fail_irqfd; | |
301 | } | |
302 | ||
303 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
304 | ||
305 | irq_set = g_malloc0(argsz); | |
306 | irq_set->argsz = argsz; | |
307 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; | |
308 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
309 | irq_set->start = 0; | |
310 | irq_set->count = 1; | |
311 | pfd = (int32_t *)&irq_set->data; | |
312 | ||
313 | *pfd = irqfd.resamplefd; | |
314 | ||
315 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
316 | g_free(irq_set); | |
317 | if (ret) { | |
318 | error_report("vfio: Error: Failed to setup INTx unmask fd: %m"); | |
319 | goto fail_vfio; | |
320 | } | |
321 | ||
322 | /* Let'em rip */ | |
323 | vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); | |
324 | ||
325 | vdev->intx.kvm_accel = true; | |
326 | ||
327 | trace_vfio_enable_intx_kvm(vdev->vbasedev.name); | |
328 | ||
329 | return; | |
330 | ||
331 | fail_vfio: | |
332 | irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN; | |
333 | kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd); | |
334 | fail_irqfd: | |
335 | event_notifier_cleanup(&vdev->intx.unmask); | |
336 | fail: | |
337 | qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev); | |
338 | vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); | |
339 | #endif | |
340 | } | |
341 | ||
342 | static void vfio_disable_intx_kvm(VFIOPCIDevice *vdev) | |
343 | { | |
344 | #ifdef CONFIG_KVM | |
345 | struct kvm_irqfd irqfd = { | |
346 | .fd = event_notifier_get_fd(&vdev->intx.interrupt), | |
347 | .gsi = vdev->intx.route.irq, | |
348 | .flags = KVM_IRQFD_FLAG_DEASSIGN, | |
349 | }; | |
350 | ||
351 | if (!vdev->intx.kvm_accel) { | |
352 | return; | |
353 | } | |
354 | ||
355 | /* | |
356 | * Get to a known state, hardware masked, QEMU ready to accept new | |
357 | * interrupts, QEMU IRQ de-asserted. | |
358 | */ | |
359 | vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); | |
360 | vdev->intx.pending = false; | |
361 | pci_irq_deassert(&vdev->pdev); | |
362 | ||
363 | /* Tell KVM to stop listening for an INTx irqfd */ | |
364 | if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) { | |
365 | error_report("vfio: Error: Failed to disable INTx irqfd: %m"); | |
366 | } | |
367 | ||
368 | /* We only need to close the eventfd for VFIO to cleanup the kernel side */ | |
369 | event_notifier_cleanup(&vdev->intx.unmask); | |
370 | ||
371 | /* QEMU starts listening for interrupt events. */ | |
372 | qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev); | |
373 | ||
374 | vdev->intx.kvm_accel = false; | |
375 | ||
376 | /* If we've missed an event, let it re-fire through QEMU */ | |
377 | vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); | |
378 | ||
379 | trace_vfio_disable_intx_kvm(vdev->vbasedev.name); | |
380 | #endif | |
381 | } | |
382 | ||
383 | static void vfio_update_irq(PCIDevice *pdev) | |
384 | { | |
385 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); | |
386 | PCIINTxRoute route; | |
387 | ||
388 | if (vdev->interrupt != VFIO_INT_INTx) { | |
389 | return; | |
390 | } | |
391 | ||
392 | route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); | |
393 | ||
394 | if (!pci_intx_route_changed(&vdev->intx.route, &route)) { | |
395 | return; /* Nothing changed */ | |
396 | } | |
397 | ||
398 | trace_vfio_update_irq(vdev->vbasedev.name, | |
399 | vdev->intx.route.irq, route.irq); | |
400 | ||
401 | vfio_disable_intx_kvm(vdev); | |
402 | ||
403 | vdev->intx.route = route; | |
404 | ||
405 | if (route.mode != PCI_INTX_ENABLED) { | |
406 | return; | |
407 | } | |
408 | ||
409 | vfio_enable_intx_kvm(vdev); | |
410 | ||
411 | /* Re-enable the interrupt in cased we missed an EOI */ | |
412 | vfio_eoi(&vdev->vbasedev); | |
413 | } | |
414 | ||
415 | static int vfio_enable_intx(VFIOPCIDevice *vdev) | |
416 | { | |
417 | uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1); | |
418 | int ret, argsz; | |
419 | struct vfio_irq_set *irq_set; | |
420 | int32_t *pfd; | |
421 | ||
422 | if (!pin) { | |
423 | return 0; | |
424 | } | |
425 | ||
426 | vfio_disable_interrupts(vdev); | |
427 | ||
428 | vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ | |
429 | pci_config_set_interrupt_pin(vdev->pdev.config, pin); | |
430 | ||
431 | #ifdef CONFIG_KVM | |
432 | /* | |
433 | * Only conditional to avoid generating error messages on platforms | |
434 | * where we won't actually use the result anyway. | |
435 | */ | |
436 | if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) { | |
437 | vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, | |
438 | vdev->intx.pin); | |
439 | } | |
440 | #endif | |
441 | ||
442 | ret = event_notifier_init(&vdev->intx.interrupt, 0); | |
443 | if (ret) { | |
444 | error_report("vfio: Error: event_notifier_init failed"); | |
445 | return ret; | |
446 | } | |
447 | ||
448 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
449 | ||
450 | irq_set = g_malloc0(argsz); | |
451 | irq_set->argsz = argsz; | |
452 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
453 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
454 | irq_set->start = 0; | |
455 | irq_set->count = 1; | |
456 | pfd = (int32_t *)&irq_set->data; | |
457 | ||
458 | *pfd = event_notifier_get_fd(&vdev->intx.interrupt); | |
459 | qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev); | |
460 | ||
461 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
462 | g_free(irq_set); | |
463 | if (ret) { | |
464 | error_report("vfio: Error: Failed to setup INTx fd: %m"); | |
465 | qemu_set_fd_handler(*pfd, NULL, NULL, vdev); | |
466 | event_notifier_cleanup(&vdev->intx.interrupt); | |
467 | return -errno; | |
468 | } | |
469 | ||
470 | vfio_enable_intx_kvm(vdev); | |
471 | ||
472 | vdev->interrupt = VFIO_INT_INTx; | |
473 | ||
474 | trace_vfio_enable_intx(vdev->vbasedev.name); | |
475 | ||
476 | return 0; | |
477 | } | |
478 | ||
479 | static void vfio_disable_intx(VFIOPCIDevice *vdev) | |
480 | { | |
481 | int fd; | |
482 | ||
483 | timer_del(vdev->intx.mmap_timer); | |
484 | vfio_disable_intx_kvm(vdev); | |
485 | vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); | |
486 | vdev->intx.pending = false; | |
487 | pci_irq_deassert(&vdev->pdev); | |
488 | vfio_mmap_set_enabled(vdev, true); | |
489 | ||
490 | fd = event_notifier_get_fd(&vdev->intx.interrupt); | |
491 | qemu_set_fd_handler(fd, NULL, NULL, vdev); | |
492 | event_notifier_cleanup(&vdev->intx.interrupt); | |
493 | ||
494 | vdev->interrupt = VFIO_INT_NONE; | |
495 | ||
496 | trace_vfio_disable_intx(vdev->vbasedev.name); | |
497 | } | |
498 | ||
499 | /* | |
500 | * MSI/X | |
501 | */ | |
502 | static void vfio_msi_interrupt(void *opaque) | |
503 | { | |
504 | VFIOMSIVector *vector = opaque; | |
505 | VFIOPCIDevice *vdev = vector->vdev; | |
506 | int nr = vector - vdev->msi_vectors; | |
507 | ||
508 | if (!event_notifier_test_and_clear(&vector->interrupt)) { | |
509 | return; | |
510 | } | |
511 | ||
512 | #ifdef DEBUG_VFIO | |
513 | MSIMessage msg; | |
514 | ||
515 | if (vdev->interrupt == VFIO_INT_MSIX) { | |
516 | msg = msix_get_message(&vdev->pdev, nr); | |
517 | } else if (vdev->interrupt == VFIO_INT_MSI) { | |
518 | msg = msi_get_message(&vdev->pdev, nr); | |
519 | } else { | |
520 | abort(); | |
521 | } | |
522 | ||
523 | trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data); | |
524 | #endif | |
525 | ||
526 | if (vdev->interrupt == VFIO_INT_MSIX) { | |
527 | msix_notify(&vdev->pdev, nr); | |
528 | } else if (vdev->interrupt == VFIO_INT_MSI) { | |
529 | msi_notify(&vdev->pdev, nr); | |
530 | } else { | |
531 | error_report("vfio: MSI interrupt receieved, but not enabled?"); | |
532 | } | |
533 | } | |
534 | ||
535 | static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix) | |
536 | { | |
537 | struct vfio_irq_set *irq_set; | |
538 | int ret = 0, i, argsz; | |
539 | int32_t *fds; | |
540 | ||
541 | argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); | |
542 | ||
543 | irq_set = g_malloc0(argsz); | |
544 | irq_set->argsz = argsz; | |
545 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
546 | irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; | |
547 | irq_set->start = 0; | |
548 | irq_set->count = vdev->nr_vectors; | |
549 | fds = (int32_t *)&irq_set->data; | |
550 | ||
551 | for (i = 0; i < vdev->nr_vectors; i++) { | |
552 | int fd = -1; | |
553 | ||
554 | /* | |
555 | * MSI vs MSI-X - The guest has direct access to MSI mask and pending | |
556 | * bits, therefore we always use the KVM signaling path when setup. | |
557 | * MSI-X mask and pending bits are emulated, so we want to use the | |
558 | * KVM signaling path only when configured and unmasked. | |
559 | */ | |
560 | if (vdev->msi_vectors[i].use) { | |
561 | if (vdev->msi_vectors[i].virq < 0 || | |
562 | (msix && msix_is_masked(&vdev->pdev, i))) { | |
563 | fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); | |
564 | } else { | |
565 | fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt); | |
566 | } | |
567 | } | |
568 | ||
569 | fds[i] = fd; | |
570 | } | |
571 | ||
572 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
573 | ||
574 | g_free(irq_set); | |
575 | ||
576 | return ret; | |
577 | } | |
578 | ||
579 | static void vfio_add_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage *msg, | |
580 | bool msix) | |
581 | { | |
582 | int virq; | |
583 | ||
584 | if ((msix && !VFIO_ALLOW_KVM_MSIX) || | |
585 | (!msix && !VFIO_ALLOW_KVM_MSI) || !msg) { | |
586 | return; | |
587 | } | |
588 | ||
589 | if (event_notifier_init(&vector->kvm_interrupt, 0)) { | |
590 | return; | |
591 | } | |
592 | ||
593 | virq = kvm_irqchip_add_msi_route(kvm_state, *msg); | |
594 | if (virq < 0) { | |
595 | event_notifier_cleanup(&vector->kvm_interrupt); | |
596 | return; | |
597 | } | |
598 | ||
599 | if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, | |
600 | NULL, virq) < 0) { | |
601 | kvm_irqchip_release_virq(kvm_state, virq); | |
602 | event_notifier_cleanup(&vector->kvm_interrupt); | |
603 | return; | |
604 | } | |
605 | ||
606 | vector->virq = virq; | |
607 | } | |
608 | ||
609 | static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector) | |
610 | { | |
611 | kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, | |
612 | vector->virq); | |
613 | kvm_irqchip_release_virq(kvm_state, vector->virq); | |
614 | vector->virq = -1; | |
615 | event_notifier_cleanup(&vector->kvm_interrupt); | |
616 | } | |
617 | ||
618 | static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg) | |
619 | { | |
620 | kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg); | |
621 | } | |
622 | ||
623 | static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, | |
624 | MSIMessage *msg, IOHandler *handler) | |
625 | { | |
626 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); | |
627 | VFIOMSIVector *vector; | |
628 | int ret; | |
629 | ||
630 | trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr); | |
631 | ||
632 | vector = &vdev->msi_vectors[nr]; | |
633 | ||
634 | if (!vector->use) { | |
635 | vector->vdev = vdev; | |
636 | vector->virq = -1; | |
637 | if (event_notifier_init(&vector->interrupt, 0)) { | |
638 | error_report("vfio: Error: event_notifier_init failed"); | |
639 | } | |
640 | vector->use = true; | |
641 | msix_vector_use(pdev, nr); | |
642 | } | |
643 | ||
644 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), | |
645 | handler, NULL, vector); | |
646 | ||
647 | /* | |
648 | * Attempt to enable route through KVM irqchip, | |
649 | * default to userspace handling if unavailable. | |
650 | */ | |
651 | if (vector->virq >= 0) { | |
652 | if (!msg) { | |
653 | vfio_remove_kvm_msi_virq(vector); | |
654 | } else { | |
655 | vfio_update_kvm_msi_virq(vector, *msg); | |
656 | } | |
657 | } else { | |
658 | vfio_add_kvm_msi_virq(vector, msg, true); | |
659 | } | |
660 | ||
661 | /* | |
662 | * We don't want to have the host allocate all possible MSI vectors | |
663 | * for a device if they're not in use, so we shutdown and incrementally | |
664 | * increase them as needed. | |
665 | */ | |
666 | if (vdev->nr_vectors < nr + 1) { | |
667 | vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); | |
668 | vdev->nr_vectors = nr + 1; | |
669 | ret = vfio_enable_vectors(vdev, true); | |
670 | if (ret) { | |
671 | error_report("vfio: failed to enable vectors, %d", ret); | |
672 | } | |
673 | } else { | |
674 | int argsz; | |
675 | struct vfio_irq_set *irq_set; | |
676 | int32_t *pfd; | |
677 | ||
678 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
679 | ||
680 | irq_set = g_malloc0(argsz); | |
681 | irq_set->argsz = argsz; | |
682 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
683 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
684 | irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; | |
685 | irq_set->start = nr; | |
686 | irq_set->count = 1; | |
687 | pfd = (int32_t *)&irq_set->data; | |
688 | ||
689 | if (vector->virq >= 0) { | |
690 | *pfd = event_notifier_get_fd(&vector->kvm_interrupt); | |
691 | } else { | |
692 | *pfd = event_notifier_get_fd(&vector->interrupt); | |
693 | } | |
694 | ||
695 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
696 | g_free(irq_set); | |
697 | if (ret) { | |
698 | error_report("vfio: failed to modify vector, %d", ret); | |
699 | } | |
700 | } | |
701 | ||
702 | return 0; | |
703 | } | |
704 | ||
705 | static int vfio_msix_vector_use(PCIDevice *pdev, | |
706 | unsigned int nr, MSIMessage msg) | |
707 | { | |
708 | return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt); | |
709 | } | |
710 | ||
711 | static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) | |
712 | { | |
713 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); | |
714 | VFIOMSIVector *vector = &vdev->msi_vectors[nr]; | |
715 | ||
716 | trace_vfio_msix_vector_release(vdev->vbasedev.name, nr); | |
717 | ||
718 | /* | |
719 | * There are still old guests that mask and unmask vectors on every | |
720 | * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of | |
721 | * the KVM setup in place, simply switch VFIO to use the non-bypass | |
722 | * eventfd. We'll then fire the interrupt through QEMU and the MSI-X | |
723 | * core will mask the interrupt and set pending bits, allowing it to | |
724 | * be re-asserted on unmask. Nothing to do if already using QEMU mode. | |
725 | */ | |
726 | if (vector->virq >= 0) { | |
727 | int argsz; | |
728 | struct vfio_irq_set *irq_set; | |
729 | int32_t *pfd; | |
730 | ||
731 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
732 | ||
733 | irq_set = g_malloc0(argsz); | |
734 | irq_set->argsz = argsz; | |
735 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
736 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
737 | irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; | |
738 | irq_set->start = nr; | |
739 | irq_set->count = 1; | |
740 | pfd = (int32_t *)&irq_set->data; | |
741 | ||
742 | *pfd = event_notifier_get_fd(&vector->interrupt); | |
743 | ||
744 | ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
745 | ||
746 | g_free(irq_set); | |
747 | } | |
748 | } | |
749 | ||
750 | static void vfio_enable_msix(VFIOPCIDevice *vdev) | |
751 | { | |
752 | vfio_disable_interrupts(vdev); | |
753 | ||
754 | vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector)); | |
755 | ||
756 | vdev->interrupt = VFIO_INT_MSIX; | |
757 | ||
758 | /* | |
759 | * Some communication channels between VF & PF or PF & fw rely on the | |
760 | * physical state of the device and expect that enabling MSI-X from the | |
761 | * guest enables the same on the host. When our guest is Linux, the | |
762 | * guest driver call to pci_enable_msix() sets the enabling bit in the | |
763 | * MSI-X capability, but leaves the vector table masked. We therefore | |
764 | * can't rely on a vector_use callback (from request_irq() in the guest) | |
765 | * to switch the physical device into MSI-X mode because that may come a | |
766 | * long time after pci_enable_msix(). This code enables vector 0 with | |
767 | * triggering to userspace, then immediately release the vector, leaving | |
768 | * the physical device with no vectors enabled, but MSI-X enabled, just | |
769 | * like the guest view. | |
770 | */ | |
771 | vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL); | |
772 | vfio_msix_vector_release(&vdev->pdev, 0); | |
773 | ||
774 | if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, | |
775 | vfio_msix_vector_release, NULL)) { | |
776 | error_report("vfio: msix_set_vector_notifiers failed"); | |
777 | } | |
778 | ||
779 | trace_vfio_enable_msix(vdev->vbasedev.name); | |
780 | } | |
781 | ||
782 | static void vfio_enable_msi(VFIOPCIDevice *vdev) | |
783 | { | |
784 | int ret, i; | |
785 | ||
786 | vfio_disable_interrupts(vdev); | |
787 | ||
788 | vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); | |
789 | retry: | |
790 | vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector)); | |
791 | ||
792 | for (i = 0; i < vdev->nr_vectors; i++) { | |
793 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
794 | MSIMessage msg = msi_get_message(&vdev->pdev, i); | |
795 | ||
796 | vector->vdev = vdev; | |
797 | vector->virq = -1; | |
798 | vector->use = true; | |
799 | ||
800 | if (event_notifier_init(&vector->interrupt, 0)) { | |
801 | error_report("vfio: Error: event_notifier_init failed"); | |
802 | } | |
803 | ||
804 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), | |
805 | vfio_msi_interrupt, NULL, vector); | |
806 | ||
807 | /* | |
808 | * Attempt to enable route through KVM irqchip, | |
809 | * default to userspace handling if unavailable. | |
810 | */ | |
811 | vfio_add_kvm_msi_virq(vector, &msg, false); | |
812 | } | |
813 | ||
814 | /* Set interrupt type prior to possible interrupts */ | |
815 | vdev->interrupt = VFIO_INT_MSI; | |
816 | ||
817 | ret = vfio_enable_vectors(vdev, false); | |
818 | if (ret) { | |
819 | if (ret < 0) { | |
820 | error_report("vfio: Error: Failed to setup MSI fds: %m"); | |
821 | } else if (ret != vdev->nr_vectors) { | |
822 | error_report("vfio: Error: Failed to enable %d " | |
823 | "MSI vectors, retry with %d", vdev->nr_vectors, ret); | |
824 | } | |
825 | ||
826 | for (i = 0; i < vdev->nr_vectors; i++) { | |
827 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
828 | if (vector->virq >= 0) { | |
829 | vfio_remove_kvm_msi_virq(vector); | |
830 | } | |
831 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), | |
832 | NULL, NULL, NULL); | |
833 | event_notifier_cleanup(&vector->interrupt); | |
834 | } | |
835 | ||
836 | g_free(vdev->msi_vectors); | |
837 | ||
838 | if (ret > 0 && ret != vdev->nr_vectors) { | |
839 | vdev->nr_vectors = ret; | |
840 | goto retry; | |
841 | } | |
842 | vdev->nr_vectors = 0; | |
843 | ||
844 | /* | |
845 | * Failing to setup MSI doesn't really fall within any specification. | |
846 | * Let's try leaving interrupts disabled and hope the guest figures | |
847 | * out to fall back to INTx for this device. | |
848 | */ | |
849 | error_report("vfio: Error: Failed to enable MSI"); | |
850 | vdev->interrupt = VFIO_INT_NONE; | |
851 | ||
852 | return; | |
853 | } | |
854 | ||
855 | trace_vfio_enable_msi(vdev->vbasedev.name, vdev->nr_vectors); | |
856 | } | |
857 | ||
858 | static void vfio_disable_msi_common(VFIOPCIDevice *vdev) | |
859 | { | |
860 | int i; | |
861 | ||
862 | for (i = 0; i < vdev->nr_vectors; i++) { | |
863 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
864 | if (vdev->msi_vectors[i].use) { | |
865 | if (vector->virq >= 0) { | |
866 | vfio_remove_kvm_msi_virq(vector); | |
867 | } | |
868 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), | |
869 | NULL, NULL, NULL); | |
870 | event_notifier_cleanup(&vector->interrupt); | |
871 | } | |
872 | } | |
873 | ||
874 | g_free(vdev->msi_vectors); | |
875 | vdev->msi_vectors = NULL; | |
876 | vdev->nr_vectors = 0; | |
877 | vdev->interrupt = VFIO_INT_NONE; | |
878 | ||
879 | vfio_enable_intx(vdev); | |
880 | } | |
881 | ||
882 | static void vfio_disable_msix(VFIOPCIDevice *vdev) | |
883 | { | |
884 | int i; | |
885 | ||
886 | msix_unset_vector_notifiers(&vdev->pdev); | |
887 | ||
888 | /* | |
889 | * MSI-X will only release vectors if MSI-X is still enabled on the | |
890 | * device, check through the rest and release it ourselves if necessary. | |
891 | */ | |
892 | for (i = 0; i < vdev->nr_vectors; i++) { | |
893 | if (vdev->msi_vectors[i].use) { | |
894 | vfio_msix_vector_release(&vdev->pdev, i); | |
895 | msix_vector_unuse(&vdev->pdev, i); | |
896 | } | |
897 | } | |
898 | ||
899 | if (vdev->nr_vectors) { | |
900 | vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); | |
901 | } | |
902 | ||
903 | vfio_disable_msi_common(vdev); | |
904 | ||
905 | trace_vfio_disable_msix(vdev->vbasedev.name); | |
906 | } | |
907 | ||
908 | static void vfio_disable_msi(VFIOPCIDevice *vdev) | |
909 | { | |
910 | vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX); | |
911 | vfio_disable_msi_common(vdev); | |
912 | ||
913 | trace_vfio_disable_msi(vdev->vbasedev.name); | |
914 | } | |
915 | ||
916 | static void vfio_update_msi(VFIOPCIDevice *vdev) | |
917 | { | |
918 | int i; | |
919 | ||
920 | for (i = 0; i < vdev->nr_vectors; i++) { | |
921 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
922 | MSIMessage msg; | |
923 | ||
924 | if (!vector->use || vector->virq < 0) { | |
925 | continue; | |
926 | } | |
927 | ||
928 | msg = msi_get_message(&vdev->pdev, i); | |
929 | vfio_update_kvm_msi_virq(vector, msg); | |
930 | } | |
931 | } | |
932 | ||
933 | static void vfio_pci_load_rom(VFIOPCIDevice *vdev) | |
934 | { | |
935 | struct vfio_region_info reg_info = { | |
936 | .argsz = sizeof(reg_info), | |
937 | .index = VFIO_PCI_ROM_REGION_INDEX | |
938 | }; | |
939 | uint64_t size; | |
940 | off_t off = 0; | |
941 | ssize_t bytes; | |
942 | ||
943 | if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { | |
944 | error_report("vfio: Error getting ROM info: %m"); | |
945 | return; | |
946 | } | |
947 | ||
948 | trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info.size, | |
949 | (unsigned long)reg_info.offset, | |
950 | (unsigned long)reg_info.flags); | |
951 | ||
952 | vdev->rom_size = size = reg_info.size; | |
953 | vdev->rom_offset = reg_info.offset; | |
954 | ||
955 | if (!vdev->rom_size) { | |
956 | vdev->rom_read_failed = true; | |
957 | error_report("vfio-pci: Cannot read device rom at " | |
958 | "%s", vdev->vbasedev.name); | |
959 | error_printf("Device option ROM contents are probably invalid " | |
960 | "(check dmesg).\nSkip option ROM probe with rombar=0, " | |
961 | "or load from file with romfile=\n"); | |
962 | return; | |
963 | } | |
964 | ||
965 | vdev->rom = g_malloc(size); | |
966 | memset(vdev->rom, 0xff, size); | |
967 | ||
968 | while (size) { | |
969 | bytes = pread(vdev->vbasedev.fd, vdev->rom + off, | |
970 | size, vdev->rom_offset + off); | |
971 | if (bytes == 0) { | |
972 | break; | |
973 | } else if (bytes > 0) { | |
974 | off += bytes; | |
975 | size -= bytes; | |
976 | } else { | |
977 | if (errno == EINTR || errno == EAGAIN) { | |
978 | continue; | |
979 | } | |
980 | error_report("vfio: Error reading device ROM: %m"); | |
981 | break; | |
982 | } | |
983 | } | |
984 | } | |
985 | ||
986 | static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size) | |
987 | { | |
988 | VFIOPCIDevice *vdev = opaque; | |
989 | union { | |
990 | uint8_t byte; | |
991 | uint16_t word; | |
992 | uint32_t dword; | |
993 | uint64_t qword; | |
994 | } val; | |
995 | uint64_t data = 0; | |
996 | ||
997 | /* Load the ROM lazily when the guest tries to read it */ | |
998 | if (unlikely(!vdev->rom && !vdev->rom_read_failed)) { | |
999 | vfio_pci_load_rom(vdev); | |
1000 | } | |
1001 | ||
1002 | memcpy(&val, vdev->rom + addr, | |
1003 | (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0); | |
1004 | ||
1005 | switch (size) { | |
1006 | case 1: | |
1007 | data = val.byte; | |
1008 | break; | |
1009 | case 2: | |
1010 | data = le16_to_cpu(val.word); | |
1011 | break; | |
1012 | case 4: | |
1013 | data = le32_to_cpu(val.dword); | |
1014 | break; | |
1015 | default: | |
1016 | hw_error("vfio: unsupported read size, %d bytes\n", size); | |
1017 | break; | |
1018 | } | |
1019 | ||
1020 | trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data); | |
1021 | ||
1022 | return data; | |
1023 | } | |
1024 | ||
1025 | static void vfio_rom_write(void *opaque, hwaddr addr, | |
1026 | uint64_t data, unsigned size) | |
1027 | { | |
1028 | } | |
1029 | ||
1030 | static const MemoryRegionOps vfio_rom_ops = { | |
1031 | .read = vfio_rom_read, | |
1032 | .write = vfio_rom_write, | |
1033 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1034 | }; | |
1035 | ||
1036 | static bool vfio_blacklist_opt_rom(VFIOPCIDevice *vdev) | |
1037 | { | |
1038 | PCIDevice *pdev = &vdev->pdev; | |
1039 | uint16_t vendor_id, device_id; | |
1040 | int count = 0; | |
1041 | ||
1042 | vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); | |
1043 | device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); | |
1044 | ||
1045 | while (count < ARRAY_SIZE(romblacklist)) { | |
1046 | if (romblacklist[count].vendor_id == vendor_id && | |
1047 | romblacklist[count].device_id == device_id) { | |
1048 | return true; | |
1049 | } | |
1050 | count++; | |
1051 | } | |
1052 | ||
1053 | return false; | |
1054 | } | |
1055 | ||
1056 | static void vfio_pci_size_rom(VFIOPCIDevice *vdev) | |
1057 | { | |
1058 | uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK); | |
1059 | off_t offset = vdev->config_offset + PCI_ROM_ADDRESS; | |
1060 | DeviceState *dev = DEVICE(vdev); | |
1061 | char name[32]; | |
1062 | int fd = vdev->vbasedev.fd; | |
1063 | ||
1064 | if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { | |
1065 | /* Since pci handles romfile, just print a message and return */ | |
1066 | if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) { | |
1067 | error_printf("Warning : Device at %04x:%02x:%02x.%x " | |
1068 | "is known to cause system instability issues during " | |
1069 | "option rom execution. " | |
1070 | "Proceeding anyway since user specified romfile\n", | |
1071 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1072 | vdev->host.function); | |
1073 | } | |
1074 | return; | |
1075 | } | |
1076 | ||
1077 | /* | |
1078 | * Use the same size ROM BAR as the physical device. The contents | |
1079 | * will get filled in later when the guest tries to read it. | |
1080 | */ | |
1081 | if (pread(fd, &orig, 4, offset) != 4 || | |
1082 | pwrite(fd, &size, 4, offset) != 4 || | |
1083 | pread(fd, &size, 4, offset) != 4 || | |
1084 | pwrite(fd, &orig, 4, offset) != 4) { | |
1085 | error_report("%s(%04x:%02x:%02x.%x) failed: %m", | |
1086 | __func__, vdev->host.domain, vdev->host.bus, | |
1087 | vdev->host.slot, vdev->host.function); | |
1088 | return; | |
1089 | } | |
1090 | ||
1091 | size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1; | |
1092 | ||
1093 | if (!size) { | |
1094 | return; | |
1095 | } | |
1096 | ||
1097 | if (vfio_blacklist_opt_rom(vdev)) { | |
1098 | if (dev->opts && qemu_opt_get(dev->opts, "rombar")) { | |
1099 | error_printf("Warning : Device at %04x:%02x:%02x.%x " | |
1100 | "is known to cause system instability issues during " | |
1101 | "option rom execution. " | |
1102 | "Proceeding anyway since user specified non zero value for " | |
1103 | "rombar\n", | |
1104 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1105 | vdev->host.function); | |
1106 | } else { | |
1107 | error_printf("Warning : Rom loading for device at " | |
1108 | "%04x:%02x:%02x.%x has been disabled due to " | |
1109 | "system instability issues. " | |
1110 | "Specify rombar=1 or romfile to force\n", | |
1111 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1112 | vdev->host.function); | |
1113 | return; | |
1114 | } | |
1115 | } | |
1116 | ||
1117 | trace_vfio_pci_size_rom(vdev->vbasedev.name, size); | |
1118 | ||
1119 | snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom", | |
1120 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1121 | vdev->host.function); | |
1122 | ||
1123 | memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev), | |
1124 | &vfio_rom_ops, vdev, name, size); | |
1125 | ||
1126 | pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, | |
1127 | PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom); | |
1128 | ||
1129 | vdev->pdev.has_rom = true; | |
1130 | vdev->rom_read_failed = false; | |
1131 | } | |
1132 | ||
1133 | static void vfio_vga_write(void *opaque, hwaddr addr, | |
1134 | uint64_t data, unsigned size) | |
1135 | { | |
1136 | VFIOVGARegion *region = opaque; | |
1137 | VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); | |
1138 | union { | |
1139 | uint8_t byte; | |
1140 | uint16_t word; | |
1141 | uint32_t dword; | |
1142 | uint64_t qword; | |
1143 | } buf; | |
1144 | off_t offset = vga->fd_offset + region->offset + addr; | |
1145 | ||
1146 | switch (size) { | |
1147 | case 1: | |
1148 | buf.byte = data; | |
1149 | break; | |
1150 | case 2: | |
1151 | buf.word = cpu_to_le16(data); | |
1152 | break; | |
1153 | case 4: | |
1154 | buf.dword = cpu_to_le32(data); | |
1155 | break; | |
1156 | default: | |
1157 | hw_error("vfio: unsupported write size, %d bytes", size); | |
1158 | break; | |
1159 | } | |
1160 | ||
1161 | if (pwrite(vga->fd, &buf, size, offset) != size) { | |
1162 | error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", | |
1163 | __func__, region->offset + addr, data, size); | |
1164 | } | |
1165 | ||
1166 | trace_vfio_vga_write(region->offset + addr, data, size); | |
1167 | } | |
1168 | ||
1169 | static uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size) | |
1170 | { | |
1171 | VFIOVGARegion *region = opaque; | |
1172 | VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); | |
1173 | union { | |
1174 | uint8_t byte; | |
1175 | uint16_t word; | |
1176 | uint32_t dword; | |
1177 | uint64_t qword; | |
1178 | } buf; | |
1179 | uint64_t data = 0; | |
1180 | off_t offset = vga->fd_offset + region->offset + addr; | |
1181 | ||
1182 | if (pread(vga->fd, &buf, size, offset) != size) { | |
1183 | error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", | |
1184 | __func__, region->offset + addr, size); | |
1185 | return (uint64_t)-1; | |
1186 | } | |
1187 | ||
1188 | switch (size) { | |
1189 | case 1: | |
1190 | data = buf.byte; | |
1191 | break; | |
1192 | case 2: | |
1193 | data = le16_to_cpu(buf.word); | |
1194 | break; | |
1195 | case 4: | |
1196 | data = le32_to_cpu(buf.dword); | |
1197 | break; | |
1198 | default: | |
1199 | hw_error("vfio: unsupported read size, %d bytes", size); | |
1200 | break; | |
1201 | } | |
1202 | ||
1203 | trace_vfio_vga_read(region->offset + addr, size, data); | |
1204 | ||
1205 | return data; | |
1206 | } | |
1207 | ||
1208 | static const MemoryRegionOps vfio_vga_ops = { | |
1209 | .read = vfio_vga_read, | |
1210 | .write = vfio_vga_write, | |
1211 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1212 | }; | |
1213 | ||
1214 | /* | |
1215 | * Device specific quirks | |
1216 | */ | |
1217 | ||
1218 | /* Is range1 fully contained within range2? */ | |
1219 | static bool vfio_range_contained(uint64_t first1, uint64_t len1, | |
1220 | uint64_t first2, uint64_t len2) { | |
1221 | return (first1 >= first2 && first1 + len1 <= first2 + len2); | |
1222 | } | |
1223 | ||
1224 | static bool vfio_flags_enabled(uint8_t flags, uint8_t mask) | |
1225 | { | |
1226 | return (mask && (flags & mask) == mask); | |
1227 | } | |
1228 | ||
1229 | static uint64_t vfio_generic_window_quirk_read(void *opaque, | |
1230 | hwaddr addr, unsigned size) | |
1231 | { | |
1232 | VFIOQuirk *quirk = opaque; | |
1233 | VFIOPCIDevice *vdev = quirk->vdev; | |
1234 | uint64_t data; | |
1235 | ||
1236 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) && | |
1237 | ranges_overlap(addr, size, | |
1238 | quirk->data.data_offset, quirk->data.data_size)) { | |
1239 | hwaddr offset = addr - quirk->data.data_offset; | |
1240 | ||
1241 | if (!vfio_range_contained(addr, size, quirk->data.data_offset, | |
1242 | quirk->data.data_size)) { | |
1243 | hw_error("%s: window data read not fully contained: %s", | |
1244 | __func__, memory_region_name(&quirk->mem)); | |
1245 | } | |
1246 | ||
1247 | data = vfio_pci_read_config(&vdev->pdev, | |
1248 | quirk->data.address_val + offset, size); | |
1249 | ||
1250 | trace_vfio_generic_window_quirk_read(memory_region_name(&quirk->mem), | |
1251 | vdev->vbasedev.name, | |
1252 | quirk->data.bar, | |
1253 | addr, size, data); | |
1254 | } else { | |
1255 | data = vfio_region_read(&vdev->bars[quirk->data.bar].region, | |
1256 | addr + quirk->data.base_offset, size); | |
1257 | } | |
1258 | ||
1259 | return data; | |
1260 | } | |
1261 | ||
1262 | static void vfio_generic_window_quirk_write(void *opaque, hwaddr addr, | |
1263 | uint64_t data, unsigned size) | |
1264 | { | |
1265 | VFIOQuirk *quirk = opaque; | |
1266 | VFIOPCIDevice *vdev = quirk->vdev; | |
1267 | ||
1268 | if (ranges_overlap(addr, size, | |
1269 | quirk->data.address_offset, quirk->data.address_size)) { | |
1270 | ||
1271 | if (addr != quirk->data.address_offset) { | |
1272 | hw_error("%s: offset write into address window: %s", | |
1273 | __func__, memory_region_name(&quirk->mem)); | |
1274 | } | |
1275 | ||
1276 | if ((data & ~quirk->data.address_mask) == quirk->data.address_match) { | |
1277 | quirk->data.flags |= quirk->data.write_flags | | |
1278 | quirk->data.read_flags; | |
1279 | quirk->data.address_val = data & quirk->data.address_mask; | |
1280 | } else { | |
1281 | quirk->data.flags &= ~(quirk->data.write_flags | | |
1282 | quirk->data.read_flags); | |
1283 | } | |
1284 | } | |
1285 | ||
1286 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) && | |
1287 | ranges_overlap(addr, size, | |
1288 | quirk->data.data_offset, quirk->data.data_size)) { | |
1289 | hwaddr offset = addr - quirk->data.data_offset; | |
1290 | ||
1291 | if (!vfio_range_contained(addr, size, quirk->data.data_offset, | |
1292 | quirk->data.data_size)) { | |
1293 | hw_error("%s: window data write not fully contained: %s", | |
1294 | __func__, memory_region_name(&quirk->mem)); | |
1295 | } | |
1296 | ||
1297 | vfio_pci_write_config(&vdev->pdev, | |
1298 | quirk->data.address_val + offset, data, size); | |
1299 | trace_vfio_generic_window_quirk_write(memory_region_name(&quirk->mem), | |
1300 | vdev->vbasedev.name, | |
1301 | quirk->data.bar, | |
1302 | addr, data, size); | |
1303 | return; | |
1304 | } | |
1305 | ||
1306 | vfio_region_write(&vdev->bars[quirk->data.bar].region, | |
1307 | addr + quirk->data.base_offset, data, size); | |
1308 | } | |
1309 | ||
1310 | static const MemoryRegionOps vfio_generic_window_quirk = { | |
1311 | .read = vfio_generic_window_quirk_read, | |
1312 | .write = vfio_generic_window_quirk_write, | |
1313 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1314 | }; | |
1315 | ||
1316 | static uint64_t vfio_generic_quirk_read(void *opaque, | |
1317 | hwaddr addr, unsigned size) | |
1318 | { | |
1319 | VFIOQuirk *quirk = opaque; | |
1320 | VFIOPCIDevice *vdev = quirk->vdev; | |
1321 | hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; | |
1322 | hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK; | |
1323 | uint64_t data; | |
1324 | ||
1325 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) && | |
1326 | ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) { | |
1327 | if (!vfio_range_contained(addr, size, offset, | |
1328 | quirk->data.address_mask + 1)) { | |
1329 | hw_error("%s: read not fully contained: %s", | |
1330 | __func__, memory_region_name(&quirk->mem)); | |
1331 | } | |
1332 | ||
1333 | data = vfio_pci_read_config(&vdev->pdev, addr - offset, size); | |
1334 | ||
1335 | trace_vfio_generic_quirk_read(memory_region_name(&quirk->mem), | |
1336 | vdev->vbasedev.name, quirk->data.bar, | |
1337 | addr + base, size, data); | |
1338 | } else { | |
1339 | data = vfio_region_read(&vdev->bars[quirk->data.bar].region, | |
1340 | addr + base, size); | |
1341 | } | |
1342 | ||
1343 | return data; | |
1344 | } | |
1345 | ||
1346 | static void vfio_generic_quirk_write(void *opaque, hwaddr addr, | |
1347 | uint64_t data, unsigned size) | |
1348 | { | |
1349 | VFIOQuirk *quirk = opaque; | |
1350 | VFIOPCIDevice *vdev = quirk->vdev; | |
1351 | hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; | |
1352 | hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK; | |
1353 | ||
1354 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) && | |
1355 | ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) { | |
1356 | if (!vfio_range_contained(addr, size, offset, | |
1357 | quirk->data.address_mask + 1)) { | |
1358 | hw_error("%s: write not fully contained: %s", | |
1359 | __func__, memory_region_name(&quirk->mem)); | |
1360 | } | |
1361 | ||
1362 | vfio_pci_write_config(&vdev->pdev, addr - offset, data, size); | |
1363 | ||
1364 | trace_vfio_generic_quirk_write(memory_region_name(&quirk->mem), | |
1365 | vdev->vbasedev.name, quirk->data.bar, | |
1366 | addr + base, data, size); | |
1367 | } else { | |
1368 | vfio_region_write(&vdev->bars[quirk->data.bar].region, | |
1369 | addr + base, data, size); | |
1370 | } | |
1371 | } | |
1372 | ||
1373 | static const MemoryRegionOps vfio_generic_quirk = { | |
1374 | .read = vfio_generic_quirk_read, | |
1375 | .write = vfio_generic_quirk_write, | |
1376 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1377 | }; | |
1378 | ||
1379 | #define PCI_VENDOR_ID_ATI 0x1002 | |
1380 | ||
1381 | /* | |
1382 | * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR | |
1383 | * through VGA register 0x3c3. On newer cards, the I/O port BAR is always | |
1384 | * BAR4 (older cards like the X550 used BAR1, but we don't care to support | |
1385 | * those). Note that on bare metal, a read of 0x3c3 doesn't always return the | |
1386 | * I/O port BAR address. Originally this was coded to return the virtual BAR | |
1387 | * address only if the physical register read returns the actual BAR address, | |
1388 | * but users have reported greater success if we return the virtual address | |
1389 | * unconditionally. | |
1390 | */ | |
1391 | static uint64_t vfio_ati_3c3_quirk_read(void *opaque, | |
1392 | hwaddr addr, unsigned size) | |
1393 | { | |
1394 | VFIOQuirk *quirk = opaque; | |
1395 | VFIOPCIDevice *vdev = quirk->vdev; | |
1396 | uint64_t data = vfio_pci_read_config(&vdev->pdev, | |
1397 | PCI_BASE_ADDRESS_0 + (4 * 4) + 1, | |
1398 | size); | |
1399 | trace_vfio_ati_3c3_quirk_read(data); | |
1400 | ||
1401 | return data; | |
1402 | } | |
1403 | ||
1404 | static const MemoryRegionOps vfio_ati_3c3_quirk = { | |
1405 | .read = vfio_ati_3c3_quirk_read, | |
1406 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1407 | }; | |
1408 | ||
1409 | static void vfio_vga_probe_ati_3c3_quirk(VFIOPCIDevice *vdev) | |
1410 | { | |
1411 | PCIDevice *pdev = &vdev->pdev; | |
1412 | VFIOQuirk *quirk; | |
1413 | ||
1414 | if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { | |
1415 | return; | |
1416 | } | |
1417 | ||
1418 | /* | |
1419 | * As long as the BAR is >= 256 bytes it will be aligned such that the | |
1420 | * lower byte is always zero. Filter out anything else, if it exists. | |
1421 | */ | |
1422 | if (!vdev->bars[4].ioport || vdev->bars[4].region.size < 256) { | |
1423 | return; | |
1424 | } | |
1425 | ||
1426 | quirk = g_malloc0(sizeof(*quirk)); | |
1427 | quirk->vdev = vdev; | |
1428 | ||
1429 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, quirk, | |
1430 | "vfio-ati-3c3-quirk", 1); | |
1431 | memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, | |
1432 | 3 /* offset 3 bytes from 0x3c0 */, &quirk->mem); | |
1433 | ||
1434 | QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, | |
1435 | quirk, next); | |
1436 | ||
1437 | trace_vfio_vga_probe_ati_3c3_quirk(vdev->vbasedev.name); | |
1438 | } | |
1439 | ||
1440 | /* | |
1441 | * Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI | |
1442 | * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access | |
1443 | * the MMIO space directly, but a window to this space is provided through | |
1444 | * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the | |
1445 | * data register. When the address is programmed to a range of 0x4000-0x4fff | |
1446 | * PCI configuration space is available. Experimentation seems to indicate | |
1447 | * that only read-only access is provided, but we drop writes when the window | |
1448 | * is enabled to config space nonetheless. | |
1449 | */ | |
1450 | static void vfio_probe_ati_bar4_window_quirk(VFIOPCIDevice *vdev, int nr) | |
1451 | { | |
1452 | PCIDevice *pdev = &vdev->pdev; | |
1453 | VFIOQuirk *quirk; | |
1454 | ||
1455 | if (!vdev->has_vga || nr != 4 || | |
1456 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { | |
1457 | return; | |
1458 | } | |
1459 | ||
1460 | quirk = g_malloc0(sizeof(*quirk)); | |
1461 | quirk->vdev = vdev; | |
1462 | quirk->data.address_size = 4; | |
1463 | quirk->data.data_offset = 4; | |
1464 | quirk->data.data_size = 4; | |
1465 | quirk->data.address_match = 0x4000; | |
1466 | quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1; | |
1467 | quirk->data.bar = nr; | |
1468 | quirk->data.read_flags = quirk->data.write_flags = 1; | |
1469 | ||
1470 | memory_region_init_io(&quirk->mem, OBJECT(vdev), | |
1471 | &vfio_generic_window_quirk, quirk, | |
1472 | "vfio-ati-bar4-window-quirk", 8); | |
1473 | memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, | |
1474 | quirk->data.base_offset, &quirk->mem, 1); | |
1475 | ||
1476 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1477 | ||
1478 | trace_vfio_probe_ati_bar4_window_quirk(vdev->vbasedev.name); | |
1479 | } | |
1480 | ||
1481 | #define PCI_VENDOR_ID_REALTEK 0x10ec | |
1482 | ||
1483 | /* | |
1484 | * RTL8168 devices have a backdoor that can access the MSI-X table. At BAR2 | |
1485 | * offset 0x70 there is a dword data register, offset 0x74 is a dword address | |
1486 | * register. According to the Linux r8169 driver, the MSI-X table is addressed | |
1487 | * when the "type" portion of the address register is set to 0x1. This appears | |
1488 | * to be bits 16:30. Bit 31 is both a write indicator and some sort of | |
1489 | * "address latched" indicator. Bits 12:15 are a mask field, which we can | |
1490 | * ignore because the MSI-X table should always be accessed as a dword (full | |
1491 | * mask). Bits 0:11 is offset within the type. | |
1492 | * | |
1493 | * Example trace: | |
1494 | * | |
1495 | * Read from MSI-X table offset 0 | |
1496 | * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr | |
1497 | * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch | |
1498 | * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data | |
1499 | * | |
1500 | * Write 0xfee00000 to MSI-X table offset 0 | |
1501 | * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data | |
1502 | * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write | |
1503 | * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete | |
1504 | */ | |
1505 | ||
1506 | static uint64_t vfio_rtl8168_window_quirk_read(void *opaque, | |
1507 | hwaddr addr, unsigned size) | |
1508 | { | |
1509 | VFIOQuirk *quirk = opaque; | |
1510 | VFIOPCIDevice *vdev = quirk->vdev; | |
1511 | uint64_t val = 0; | |
1512 | ||
1513 | if (!quirk->data.flags) { /* Non-MSI-X table access */ | |
1514 | return vfio_region_read(&vdev->bars[quirk->data.bar].region, | |
1515 | addr + 0x70, size); | |
1516 | } | |
1517 | ||
1518 | switch (addr) { | |
1519 | case 4: /* address */ | |
1520 | val = quirk->data.address_match ^ 0x80000000U; /* latch/complete */ | |
1521 | break; | |
1522 | case 0: /* data */ | |
1523 | if ((vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { | |
1524 | memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, | |
1525 | (hwaddr)(quirk->data.address_match & 0xfff), | |
1526 | &val, size, MEMTXATTRS_UNSPECIFIED); | |
1527 | } | |
1528 | break; | |
1529 | } | |
1530 | ||
1531 | trace_vfio_rtl8168_quirk_read(vdev->vbasedev.name, | |
1532 | addr ? "address" : "data", val); | |
1533 | return val; | |
1534 | } | |
1535 | ||
1536 | static void vfio_rtl8168_window_quirk_write(void *opaque, hwaddr addr, | |
1537 | uint64_t data, unsigned size) | |
1538 | { | |
1539 | VFIOQuirk *quirk = opaque; | |
1540 | VFIOPCIDevice *vdev = quirk->vdev; | |
1541 | ||
1542 | switch (addr) { | |
1543 | case 4: /* address */ | |
1544 | if ((data & 0x7fff0000) == 0x10000) { /* MSI-X table */ | |
1545 | quirk->data.flags = 1; /* Activate reads */ | |
1546 | quirk->data.address_match = data; | |
1547 | ||
1548 | trace_vfio_rtl8168_quirk_write(vdev->vbasedev.name, data); | |
1549 | ||
1550 | if (data & 0x80000000U) { /* Do write */ | |
1551 | if (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) { | |
1552 | hwaddr offset = data & 0xfff; | |
1553 | uint64_t val = quirk->data.address_mask; | |
1554 | ||
1555 | trace_vfio_rtl8168_quirk_msix(vdev->vbasedev.name, | |
1556 | (uint16_t)offset, val); | |
1557 | ||
1558 | /* Write to the proper guest MSI-X table instead */ | |
1559 | memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, | |
1560 | offset, val, size, | |
1561 | MEMTXATTRS_UNSPECIFIED); | |
1562 | } | |
1563 | return; /* Do not write guest MSI-X data to hardware */ | |
1564 | } | |
1565 | } else { | |
1566 | quirk->data.flags = 0; /* De-activate reads, non-MSI-X */ | |
1567 | } | |
1568 | break; | |
1569 | case 0: /* data */ | |
1570 | quirk->data.address_mask = data; | |
1571 | break; | |
1572 | } | |
1573 | ||
1574 | vfio_region_write(&vdev->bars[quirk->data.bar].region, | |
1575 | addr + 0x70, data, size); | |
1576 | } | |
1577 | ||
1578 | static const MemoryRegionOps vfio_rtl8168_window_quirk = { | |
1579 | .read = vfio_rtl8168_window_quirk_read, | |
1580 | .write = vfio_rtl8168_window_quirk_write, | |
1581 | .valid = { | |
1582 | .min_access_size = 4, | |
1583 | .max_access_size = 4, | |
1584 | .unaligned = false, | |
1585 | }, | |
1586 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1587 | }; | |
1588 | ||
1589 | static void vfio_probe_rtl8168_bar2_window_quirk(VFIOPCIDevice *vdev, int nr) | |
1590 | { | |
1591 | PCIDevice *pdev = &vdev->pdev; | |
1592 | VFIOQuirk *quirk; | |
1593 | ||
1594 | if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_REALTEK || | |
1595 | pci_get_word(pdev->config + PCI_DEVICE_ID) != 0x8168 || nr != 2) { | |
1596 | return; | |
1597 | } | |
1598 | ||
1599 | quirk = g_malloc0(sizeof(*quirk)); | |
1600 | quirk->vdev = vdev; | |
1601 | quirk->data.bar = nr; | |
1602 | ||
1603 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_rtl8168_window_quirk, | |
1604 | quirk, "vfio-rtl8168-window-quirk", 8); | |
1605 | memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, | |
1606 | 0x70, &quirk->mem, 1); | |
1607 | ||
1608 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1609 | ||
1610 | trace_vfio_rtl8168_quirk_enable(vdev->vbasedev.name); | |
1611 | } | |
1612 | ||
1613 | /* | |
1614 | * Trap the BAR2 MMIO window to config space as well. | |
1615 | */ | |
1616 | static void vfio_probe_ati_bar2_4000_quirk(VFIOPCIDevice *vdev, int nr) | |
1617 | { | |
1618 | PCIDevice *pdev = &vdev->pdev; | |
1619 | VFIOQuirk *quirk; | |
1620 | ||
1621 | /* Only enable on newer devices where BAR2 is 64bit */ | |
1622 | if (!vdev->has_vga || nr != 2 || !vdev->bars[2].mem64 || | |
1623 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { | |
1624 | return; | |
1625 | } | |
1626 | ||
1627 | quirk = g_malloc0(sizeof(*quirk)); | |
1628 | quirk->vdev = vdev; | |
1629 | quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1; | |
1630 | quirk->data.address_match = 0x4000; | |
1631 | quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1; | |
1632 | quirk->data.bar = nr; | |
1633 | ||
1634 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk, | |
1635 | "vfio-ati-bar2-4000-quirk", | |
1636 | TARGET_PAGE_ALIGN(quirk->data.address_mask + 1)); | |
1637 | memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, | |
1638 | quirk->data.address_match & TARGET_PAGE_MASK, | |
1639 | &quirk->mem, 1); | |
1640 | ||
1641 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1642 | ||
1643 | trace_vfio_probe_ati_bar2_4000_quirk(vdev->vbasedev.name); | |
1644 | } | |
1645 | ||
1646 | /* | |
1647 | * Older ATI/AMD cards like the X550 have a similar window to that above. | |
1648 | * I/O port BAR1 provides a window to a mirror of PCI config space located | |
1649 | * in BAR2 at offset 0xf00. We don't care to support such older cards, but | |
1650 | * note it for future reference. | |
1651 | */ | |
1652 | ||
1653 | #define PCI_VENDOR_ID_NVIDIA 0x10de | |
1654 | ||
1655 | /* | |
1656 | * Nvidia has several different methods to get to config space, the | |
1657 | * nouveu project has several of these documented here: | |
1658 | * https://github.com/pathscale/envytools/tree/master/hwdocs | |
1659 | * | |
1660 | * The first quirk is actually not documented in envytools and is found | |
1661 | * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an | |
1662 | * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access | |
1663 | * the mirror of PCI config space found at BAR0 offset 0x1800. The access | |
1664 | * sequence first writes 0x338 to I/O port 0x3d4. The target offset is | |
1665 | * then written to 0x3d0. Finally 0x538 is written for a read and 0x738 | |
1666 | * is written for a write to 0x3d4. The BAR0 offset is then accessible | |
1667 | * through 0x3d0. This quirk doesn't seem to be necessary on newer cards | |
1668 | * that use the I/O port BAR5 window but it doesn't hurt to leave it. | |
1669 | */ | |
1670 | enum { | |
1671 | NV_3D0_NONE = 0, | |
1672 | NV_3D0_SELECT, | |
1673 | NV_3D0_WINDOW, | |
1674 | NV_3D0_READ, | |
1675 | NV_3D0_WRITE, | |
1676 | }; | |
1677 | ||
1678 | static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque, | |
1679 | hwaddr addr, unsigned size) | |
1680 | { | |
1681 | VFIOQuirk *quirk = opaque; | |
1682 | VFIOPCIDevice *vdev = quirk->vdev; | |
1683 | PCIDevice *pdev = &vdev->pdev; | |
1684 | uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], | |
1685 | addr + quirk->data.base_offset, size); | |
1686 | ||
1687 | if (quirk->data.flags == NV_3D0_READ && addr == quirk->data.data_offset) { | |
1688 | data = vfio_pci_read_config(pdev, quirk->data.address_val, size); | |
1689 | trace_vfio_nvidia_3d0_quirk_read(size, data); | |
1690 | } | |
1691 | ||
1692 | quirk->data.flags = NV_3D0_NONE; | |
1693 | ||
1694 | return data; | |
1695 | } | |
1696 | ||
1697 | static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr, | |
1698 | uint64_t data, unsigned size) | |
1699 | { | |
1700 | VFIOQuirk *quirk = opaque; | |
1701 | VFIOPCIDevice *vdev = quirk->vdev; | |
1702 | PCIDevice *pdev = &vdev->pdev; | |
1703 | ||
1704 | switch (quirk->data.flags) { | |
1705 | case NV_3D0_NONE: | |
1706 | if (addr == quirk->data.address_offset && data == 0x338) { | |
1707 | quirk->data.flags = NV_3D0_SELECT; | |
1708 | } | |
1709 | break; | |
1710 | case NV_3D0_SELECT: | |
1711 | quirk->data.flags = NV_3D0_NONE; | |
1712 | if (addr == quirk->data.data_offset && | |
1713 | (data & ~quirk->data.address_mask) == quirk->data.address_match) { | |
1714 | quirk->data.flags = NV_3D0_WINDOW; | |
1715 | quirk->data.address_val = data & quirk->data.address_mask; | |
1716 | } | |
1717 | break; | |
1718 | case NV_3D0_WINDOW: | |
1719 | quirk->data.flags = NV_3D0_NONE; | |
1720 | if (addr == quirk->data.address_offset) { | |
1721 | if (data == 0x538) { | |
1722 | quirk->data.flags = NV_3D0_READ; | |
1723 | } else if (data == 0x738) { | |
1724 | quirk->data.flags = NV_3D0_WRITE; | |
1725 | } | |
1726 | } | |
1727 | break; | |
1728 | case NV_3D0_WRITE: | |
1729 | quirk->data.flags = NV_3D0_NONE; | |
1730 | if (addr == quirk->data.data_offset) { | |
1731 | vfio_pci_write_config(pdev, quirk->data.address_val, data, size); | |
1732 | trace_vfio_nvidia_3d0_quirk_write(data, size); | |
1733 | return; | |
1734 | } | |
1735 | break; | |
1736 | } | |
1737 | ||
1738 | vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], | |
1739 | addr + quirk->data.base_offset, data, size); | |
1740 | } | |
1741 | ||
1742 | static const MemoryRegionOps vfio_nvidia_3d0_quirk = { | |
1743 | .read = vfio_nvidia_3d0_quirk_read, | |
1744 | .write = vfio_nvidia_3d0_quirk_write, | |
1745 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1746 | }; | |
1747 | ||
1748 | static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice *vdev) | |
1749 | { | |
1750 | PCIDevice *pdev = &vdev->pdev; | |
1751 | VFIOQuirk *quirk; | |
1752 | ||
1753 | if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA || | |
1754 | !vdev->bars[1].region.size) { | |
1755 | return; | |
1756 | } | |
1757 | ||
1758 | quirk = g_malloc0(sizeof(*quirk)); | |
1759 | quirk->vdev = vdev; | |
1760 | quirk->data.base_offset = 0x10; | |
1761 | quirk->data.address_offset = 4; | |
1762 | quirk->data.address_size = 2; | |
1763 | quirk->data.address_match = 0x1800; | |
1764 | quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1; | |
1765 | quirk->data.data_offset = 0; | |
1766 | quirk->data.data_size = 4; | |
1767 | ||
1768 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_3d0_quirk, | |
1769 | quirk, "vfio-nvidia-3d0-quirk", 6); | |
1770 | memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, | |
1771 | quirk->data.base_offset, &quirk->mem); | |
1772 | ||
1773 | QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, | |
1774 | quirk, next); | |
1775 | ||
1776 | trace_vfio_vga_probe_nvidia_3d0_quirk(vdev->vbasedev.name); | |
1777 | } | |
1778 | ||
1779 | /* | |
1780 | * The second quirk is documented in envytools. The I/O port BAR5 is just | |
1781 | * a set of address/data ports to the MMIO BARs. The BAR we care about is | |
1782 | * again BAR0. This backdoor is apparently a bit newer than the one above | |
1783 | * so we need to not only trap 256 bytes @0x1800, but all of PCI config | |
1784 | * space, including extended space is available at the 4k @0x88000. | |
1785 | */ | |
1786 | enum { | |
1787 | NV_BAR5_ADDRESS = 0x1, | |
1788 | NV_BAR5_ENABLE = 0x2, | |
1789 | NV_BAR5_MASTER = 0x4, | |
1790 | NV_BAR5_VALID = 0x7, | |
1791 | }; | |
1792 | ||
1793 | static void vfio_nvidia_bar5_window_quirk_write(void *opaque, hwaddr addr, | |
1794 | uint64_t data, unsigned size) | |
1795 | { | |
1796 | VFIOQuirk *quirk = opaque; | |
1797 | ||
1798 | switch (addr) { | |
1799 | case 0x0: | |
1800 | if (data & 0x1) { | |
1801 | quirk->data.flags |= NV_BAR5_MASTER; | |
1802 | } else { | |
1803 | quirk->data.flags &= ~NV_BAR5_MASTER; | |
1804 | } | |
1805 | break; | |
1806 | case 0x4: | |
1807 | if (data & 0x1) { | |
1808 | quirk->data.flags |= NV_BAR5_ENABLE; | |
1809 | } else { | |
1810 | quirk->data.flags &= ~NV_BAR5_ENABLE; | |
1811 | } | |
1812 | break; | |
1813 | case 0x8: | |
1814 | if (quirk->data.flags & NV_BAR5_MASTER) { | |
1815 | if ((data & ~0xfff) == 0x88000) { | |
1816 | quirk->data.flags |= NV_BAR5_ADDRESS; | |
1817 | quirk->data.address_val = data & 0xfff; | |
1818 | } else if ((data & ~0xff) == 0x1800) { | |
1819 | quirk->data.flags |= NV_BAR5_ADDRESS; | |
1820 | quirk->data.address_val = data & 0xff; | |
1821 | } else { | |
1822 | quirk->data.flags &= ~NV_BAR5_ADDRESS; | |
1823 | } | |
1824 | } | |
1825 | break; | |
1826 | } | |
1827 | ||
1828 | vfio_generic_window_quirk_write(opaque, addr, data, size); | |
1829 | } | |
1830 | ||
1831 | static const MemoryRegionOps vfio_nvidia_bar5_window_quirk = { | |
1832 | .read = vfio_generic_window_quirk_read, | |
1833 | .write = vfio_nvidia_bar5_window_quirk_write, | |
1834 | .valid.min_access_size = 4, | |
1835 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1836 | }; | |
1837 | ||
1838 | static void vfio_probe_nvidia_bar5_window_quirk(VFIOPCIDevice *vdev, int nr) | |
1839 | { | |
1840 | PCIDevice *pdev = &vdev->pdev; | |
1841 | VFIOQuirk *quirk; | |
1842 | ||
1843 | if (!vdev->has_vga || nr != 5 || | |
1844 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { | |
1845 | return; | |
1846 | } | |
1847 | ||
1848 | quirk = g_malloc0(sizeof(*quirk)); | |
1849 | quirk->vdev = vdev; | |
1850 | quirk->data.read_flags = quirk->data.write_flags = NV_BAR5_VALID; | |
1851 | quirk->data.address_offset = 0x8; | |
1852 | quirk->data.address_size = 0; /* actually 4, but avoids generic code */ | |
1853 | quirk->data.data_offset = 0xc; | |
1854 | quirk->data.data_size = 4; | |
1855 | quirk->data.bar = nr; | |
1856 | ||
1857 | memory_region_init_io(&quirk->mem, OBJECT(vdev), | |
1858 | &vfio_nvidia_bar5_window_quirk, quirk, | |
1859 | "vfio-nvidia-bar5-window-quirk", 16); | |
1860 | memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, | |
1861 | 0, &quirk->mem, 1); | |
1862 | ||
1863 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1864 | ||
1865 | trace_vfio_probe_nvidia_bar5_window_quirk(vdev->vbasedev.name); | |
1866 | } | |
1867 | ||
1868 | static void vfio_nvidia_88000_quirk_write(void *opaque, hwaddr addr, | |
1869 | uint64_t data, unsigned size) | |
1870 | { | |
1871 | VFIOQuirk *quirk = opaque; | |
1872 | VFIOPCIDevice *vdev = quirk->vdev; | |
1873 | PCIDevice *pdev = &vdev->pdev; | |
1874 | hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; | |
1875 | ||
1876 | vfio_generic_quirk_write(opaque, addr, data, size); | |
1877 | ||
1878 | /* | |
1879 | * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the | |
1880 | * MSI capability ID register. Both the ID and next register are | |
1881 | * read-only, so we allow writes covering either of those to real hw. | |
1882 | * NB - only fixed for the 0x88000 MMIO window. | |
1883 | */ | |
1884 | if ((pdev->cap_present & QEMU_PCI_CAP_MSI) && | |
1885 | vfio_range_contained(addr, size, pdev->msi_cap, PCI_MSI_FLAGS)) { | |
1886 | vfio_region_write(&vdev->bars[quirk->data.bar].region, | |
1887 | addr + base, data, size); | |
1888 | } | |
1889 | } | |
1890 | ||
1891 | static const MemoryRegionOps vfio_nvidia_88000_quirk = { | |
1892 | .read = vfio_generic_quirk_read, | |
1893 | .write = vfio_nvidia_88000_quirk_write, | |
1894 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1895 | }; | |
1896 | ||
1897 | /* | |
1898 | * Finally, BAR0 itself. We want to redirect any accesses to either | |
1899 | * 0x1800 or 0x88000 through the PCI config space access functions. | |
1900 | * | |
1901 | * NB - quirk at a page granularity or else they don't seem to work when | |
1902 | * BARs are mmap'd | |
1903 | * | |
1904 | * Here's offset 0x88000... | |
1905 | */ | |
1906 | static void vfio_probe_nvidia_bar0_88000_quirk(VFIOPCIDevice *vdev, int nr) | |
1907 | { | |
1908 | PCIDevice *pdev = &vdev->pdev; | |
1909 | VFIOQuirk *quirk; | |
1910 | uint16_t vendor, class; | |
1911 | ||
1912 | vendor = pci_get_word(pdev->config + PCI_VENDOR_ID); | |
1913 | class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); | |
1914 | ||
1915 | if (nr != 0 || vendor != PCI_VENDOR_ID_NVIDIA || | |
1916 | class != PCI_CLASS_DISPLAY_VGA) { | |
1917 | return; | |
1918 | } | |
1919 | ||
1920 | quirk = g_malloc0(sizeof(*quirk)); | |
1921 | quirk->vdev = vdev; | |
1922 | quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1; | |
1923 | quirk->data.address_match = 0x88000; | |
1924 | quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1; | |
1925 | quirk->data.bar = nr; | |
1926 | ||
1927 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_88000_quirk, | |
1928 | quirk, "vfio-nvidia-bar0-88000-quirk", | |
1929 | TARGET_PAGE_ALIGN(quirk->data.address_mask + 1)); | |
1930 | memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, | |
1931 | quirk->data.address_match & TARGET_PAGE_MASK, | |
1932 | &quirk->mem, 1); | |
1933 | ||
1934 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1935 | ||
1936 | trace_vfio_probe_nvidia_bar0_88000_quirk(vdev->vbasedev.name); | |
1937 | } | |
1938 | ||
1939 | /* | |
1940 | * And here's the same for BAR0 offset 0x1800... | |
1941 | */ | |
1942 | static void vfio_probe_nvidia_bar0_1800_quirk(VFIOPCIDevice *vdev, int nr) | |
1943 | { | |
1944 | PCIDevice *pdev = &vdev->pdev; | |
1945 | VFIOQuirk *quirk; | |
1946 | ||
1947 | if (!vdev->has_vga || nr != 0 || | |
1948 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { | |
1949 | return; | |
1950 | } | |
1951 | ||
1952 | /* Log the chipset ID */ | |
1953 | trace_vfio_probe_nvidia_bar0_1800_quirk_id( | |
1954 | (unsigned int)(vfio_region_read(&vdev->bars[0].region, 0, 4) >> 20) | |
1955 | & 0xff); | |
1956 | ||
1957 | quirk = g_malloc0(sizeof(*quirk)); | |
1958 | quirk->vdev = vdev; | |
1959 | quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1; | |
1960 | quirk->data.address_match = 0x1800; | |
1961 | quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1; | |
1962 | quirk->data.bar = nr; | |
1963 | ||
1964 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk, | |
1965 | "vfio-nvidia-bar0-1800-quirk", | |
1966 | TARGET_PAGE_ALIGN(quirk->data.address_mask + 1)); | |
1967 | memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, | |
1968 | quirk->data.address_match & TARGET_PAGE_MASK, | |
1969 | &quirk->mem, 1); | |
1970 | ||
1971 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1972 | ||
1973 | trace_vfio_probe_nvidia_bar0_1800_quirk(vdev->vbasedev.name); | |
1974 | } | |
1975 | ||
1976 | /* | |
1977 | * TODO - Some Nvidia devices provide config access to their companion HDA | |
1978 | * device and even to their parent bridge via these config space mirrors. | |
1979 | * Add quirks for those regions. | |
1980 | */ | |
1981 | ||
1982 | /* | |
1983 | * Common quirk probe entry points. | |
1984 | */ | |
1985 | static void vfio_vga_quirk_setup(VFIOPCIDevice *vdev) | |
1986 | { | |
1987 | vfio_vga_probe_ati_3c3_quirk(vdev); | |
1988 | vfio_vga_probe_nvidia_3d0_quirk(vdev); | |
1989 | } | |
1990 | ||
1991 | static void vfio_vga_quirk_teardown(VFIOPCIDevice *vdev) | |
1992 | { | |
1993 | VFIOQuirk *quirk; | |
1994 | int i; | |
1995 | ||
1996 | for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) { | |
1997 | QLIST_FOREACH(quirk, &vdev->vga.region[i].quirks, next) { | |
1998 | memory_region_del_subregion(&vdev->vga.region[i].mem, &quirk->mem); | |
1999 | } | |
2000 | } | |
2001 | } | |
2002 | ||
2003 | static void vfio_vga_quirk_free(VFIOPCIDevice *vdev) | |
2004 | { | |
2005 | int i; | |
2006 | ||
2007 | for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) { | |
2008 | while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) { | |
2009 | VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks); | |
2010 | object_unparent(OBJECT(&quirk->mem)); | |
2011 | QLIST_REMOVE(quirk, next); | |
2012 | g_free(quirk); | |
2013 | } | |
2014 | } | |
2015 | } | |
2016 | ||
2017 | static void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr) | |
2018 | { | |
2019 | vfio_probe_ati_bar4_window_quirk(vdev, nr); | |
2020 | vfio_probe_ati_bar2_4000_quirk(vdev, nr); | |
2021 | vfio_probe_nvidia_bar5_window_quirk(vdev, nr); | |
2022 | vfio_probe_nvidia_bar0_88000_quirk(vdev, nr); | |
2023 | vfio_probe_nvidia_bar0_1800_quirk(vdev, nr); | |
2024 | vfio_probe_rtl8168_bar2_window_quirk(vdev, nr); | |
2025 | } | |
2026 | ||
2027 | static void vfio_bar_quirk_teardown(VFIOPCIDevice *vdev, int nr) | |
2028 | { | |
2029 | VFIOBAR *bar = &vdev->bars[nr]; | |
2030 | VFIOQuirk *quirk; | |
2031 | ||
2032 | QLIST_FOREACH(quirk, &bar->quirks, next) { | |
2033 | memory_region_del_subregion(&bar->region.mem, &quirk->mem); | |
2034 | } | |
2035 | } | |
2036 | ||
2037 | static void vfio_bar_quirk_free(VFIOPCIDevice *vdev, int nr) | |
2038 | { | |
2039 | VFIOBAR *bar = &vdev->bars[nr]; | |
2040 | ||
2041 | while (!QLIST_EMPTY(&bar->quirks)) { | |
2042 | VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks); | |
2043 | object_unparent(OBJECT(&quirk->mem)); | |
2044 | QLIST_REMOVE(quirk, next); | |
2045 | g_free(quirk); | |
2046 | } | |
2047 | } | |
2048 | ||
2049 | /* | |
2050 | * PCI config space | |
2051 | */ | |
2052 | static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) | |
2053 | { | |
2054 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); | |
2055 | uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val; | |
2056 | ||
2057 | memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); | |
2058 | emu_bits = le32_to_cpu(emu_bits); | |
2059 | ||
2060 | if (emu_bits) { | |
2061 | emu_val = pci_default_read_config(pdev, addr, len); | |
2062 | } | |
2063 | ||
2064 | if (~emu_bits & (0xffffffffU >> (32 - len * 8))) { | |
2065 | ssize_t ret; | |
2066 | ||
2067 | ret = pread(vdev->vbasedev.fd, &phys_val, len, | |
2068 | vdev->config_offset + addr); | |
2069 | if (ret != len) { | |
2070 | error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m", | |
2071 | __func__, vdev->host.domain, vdev->host.bus, | |
2072 | vdev->host.slot, vdev->host.function, addr, len); | |
2073 | return -errno; | |
2074 | } | |
2075 | phys_val = le32_to_cpu(phys_val); | |
2076 | } | |
2077 | ||
2078 | val = (emu_val & emu_bits) | (phys_val & ~emu_bits); | |
2079 | ||
2080 | trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val); | |
2081 | ||
2082 | return val; | |
2083 | } | |
2084 | ||
2085 | static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, | |
2086 | uint32_t val, int len) | |
2087 | { | |
2088 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); | |
2089 | uint32_t val_le = cpu_to_le32(val); | |
2090 | ||
2091 | trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len); | |
2092 | ||
2093 | /* Write everything to VFIO, let it filter out what we can't write */ | |
2094 | if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr) | |
2095 | != len) { | |
2096 | error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m", | |
2097 | __func__, vdev->host.domain, vdev->host.bus, | |
2098 | vdev->host.slot, vdev->host.function, addr, val, len); | |
2099 | } | |
2100 | ||
2101 | /* MSI/MSI-X Enabling/Disabling */ | |
2102 | if (pdev->cap_present & QEMU_PCI_CAP_MSI && | |
2103 | ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) { | |
2104 | int is_enabled, was_enabled = msi_enabled(pdev); | |
2105 | ||
2106 | pci_default_write_config(pdev, addr, val, len); | |
2107 | ||
2108 | is_enabled = msi_enabled(pdev); | |
2109 | ||
2110 | if (!was_enabled) { | |
2111 | if (is_enabled) { | |
2112 | vfio_enable_msi(vdev); | |
2113 | } | |
2114 | } else { | |
2115 | if (!is_enabled) { | |
2116 | vfio_disable_msi(vdev); | |
2117 | } else { | |
2118 | vfio_update_msi(vdev); | |
2119 | } | |
2120 | } | |
2121 | } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX && | |
2122 | ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) { | |
2123 | int is_enabled, was_enabled = msix_enabled(pdev); | |
2124 | ||
2125 | pci_default_write_config(pdev, addr, val, len); | |
2126 | ||
2127 | is_enabled = msix_enabled(pdev); | |
2128 | ||
2129 | if (!was_enabled && is_enabled) { | |
2130 | vfio_enable_msix(vdev); | |
2131 | } else if (was_enabled && !is_enabled) { | |
2132 | vfio_disable_msix(vdev); | |
2133 | } | |
2134 | } else { | |
2135 | /* Write everything to QEMU to keep emulated bits correct */ | |
2136 | pci_default_write_config(pdev, addr, val, len); | |
2137 | } | |
2138 | } | |
2139 | ||
2140 | /* | |
2141 | * Interrupt setup | |
2142 | */ | |
2143 | static void vfio_disable_interrupts(VFIOPCIDevice *vdev) | |
2144 | { | |
2145 | /* | |
2146 | * More complicated than it looks. Disabling MSI/X transitions the | |
2147 | * device to INTx mode (if supported). Therefore we need to first | |
2148 | * disable MSI/X and then cleanup by disabling INTx. | |
2149 | */ | |
2150 | if (vdev->interrupt == VFIO_INT_MSIX) { | |
2151 | vfio_disable_msix(vdev); | |
2152 | } else if (vdev->interrupt == VFIO_INT_MSI) { | |
2153 | vfio_disable_msi(vdev); | |
2154 | } | |
2155 | ||
2156 | if (vdev->interrupt == VFIO_INT_INTx) { | |
2157 | vfio_disable_intx(vdev); | |
2158 | } | |
2159 | } | |
2160 | ||
2161 | static int vfio_setup_msi(VFIOPCIDevice *vdev, int pos) | |
2162 | { | |
2163 | uint16_t ctrl; | |
2164 | bool msi_64bit, msi_maskbit; | |
2165 | int ret, entries; | |
2166 | ||
2167 | if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl), | |
2168 | vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { | |
2169 | return -errno; | |
2170 | } | |
2171 | ctrl = le16_to_cpu(ctrl); | |
2172 | ||
2173 | msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT); | |
2174 | msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT); | |
2175 | entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1); | |
2176 | ||
2177 | trace_vfio_setup_msi(vdev->vbasedev.name, pos); | |
2178 | ||
2179 | ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit); | |
2180 | if (ret < 0) { | |
2181 | if (ret == -ENOTSUP) { | |
2182 | return 0; | |
2183 | } | |
2184 | error_report("vfio: msi_init failed"); | |
2185 | return ret; | |
2186 | } | |
2187 | vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); | |
2188 | ||
2189 | return 0; | |
2190 | } | |
2191 | ||
2192 | /* | |
2193 | * We don't have any control over how pci_add_capability() inserts | |
2194 | * capabilities into the chain. In order to setup MSI-X we need a | |
2195 | * MemoryRegion for the BAR. In order to setup the BAR and not | |
2196 | * attempt to mmap the MSI-X table area, which VFIO won't allow, we | |
2197 | * need to first look for where the MSI-X table lives. So we | |
2198 | * unfortunately split MSI-X setup across two functions. | |
2199 | */ | |
2200 | static int vfio_early_setup_msix(VFIOPCIDevice *vdev) | |
2201 | { | |
2202 | uint8_t pos; | |
2203 | uint16_t ctrl; | |
2204 | uint32_t table, pba; | |
2205 | int fd = vdev->vbasedev.fd; | |
2206 | ||
2207 | pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); | |
2208 | if (!pos) { | |
2209 | return 0; | |
2210 | } | |
2211 | ||
2212 | if (pread(fd, &ctrl, sizeof(ctrl), | |
2213 | vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { | |
2214 | return -errno; | |
2215 | } | |
2216 | ||
2217 | if (pread(fd, &table, sizeof(table), | |
2218 | vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) { | |
2219 | return -errno; | |
2220 | } | |
2221 | ||
2222 | if (pread(fd, &pba, sizeof(pba), | |
2223 | vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) { | |
2224 | return -errno; | |
2225 | } | |
2226 | ||
2227 | ctrl = le16_to_cpu(ctrl); | |
2228 | table = le32_to_cpu(table); | |
2229 | pba = le32_to_cpu(pba); | |
2230 | ||
2231 | vdev->msix = g_malloc0(sizeof(*(vdev->msix))); | |
2232 | vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK; | |
2233 | vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; | |
2234 | vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; | |
2235 | vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; | |
2236 | vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; | |
2237 | ||
2238 | /* | |
2239 | * Test the size of the pba_offset variable and catch if it extends outside | |
2240 | * of the specified BAR. If it is the case, we need to apply a hardware | |
2241 | * specific quirk if the device is known or we have a broken configuration. | |
2242 | */ | |
2243 | if (vdev->msix->pba_offset >= | |
2244 | vdev->bars[vdev->msix->pba_bar].region.size) { | |
2245 | ||
2246 | PCIDevice *pdev = &vdev->pdev; | |
2247 | uint16_t vendor = pci_get_word(pdev->config + PCI_VENDOR_ID); | |
2248 | uint16_t device = pci_get_word(pdev->config + PCI_DEVICE_ID); | |
2249 | ||
2250 | /* | |
2251 | * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5 | |
2252 | * adapters. The T5 hardware returns an incorrect value of 0x8000 for | |
2253 | * the VF PBA offset while the BAR itself is only 8k. The correct value | |
2254 | * is 0x1000, so we hard code that here. | |
2255 | */ | |
2256 | if (vendor == PCI_VENDOR_ID_CHELSIO && (device & 0xff00) == 0x5800) { | |
2257 | vdev->msix->pba_offset = 0x1000; | |
2258 | } else { | |
2259 | error_report("vfio: Hardware reports invalid configuration, " | |
2260 | "MSIX PBA outside of specified BAR"); | |
2261 | return -EINVAL; | |
2262 | } | |
2263 | } | |
2264 | ||
2265 | trace_vfio_early_setup_msix(vdev->vbasedev.name, pos, | |
2266 | vdev->msix->table_bar, | |
2267 | vdev->msix->table_offset, | |
2268 | vdev->msix->entries); | |
2269 | ||
2270 | return 0; | |
2271 | } | |
2272 | ||
2273 | static int vfio_setup_msix(VFIOPCIDevice *vdev, int pos) | |
2274 | { | |
2275 | int ret; | |
2276 | ||
2277 | ret = msix_init(&vdev->pdev, vdev->msix->entries, | |
2278 | &vdev->bars[vdev->msix->table_bar].region.mem, | |
2279 | vdev->msix->table_bar, vdev->msix->table_offset, | |
2280 | &vdev->bars[vdev->msix->pba_bar].region.mem, | |
2281 | vdev->msix->pba_bar, vdev->msix->pba_offset, pos); | |
2282 | if (ret < 0) { | |
2283 | if (ret == -ENOTSUP) { | |
2284 | return 0; | |
2285 | } | |
2286 | error_report("vfio: msix_init failed"); | |
2287 | return ret; | |
2288 | } | |
2289 | ||
2290 | return 0; | |
2291 | } | |
2292 | ||
2293 | static void vfio_teardown_msi(VFIOPCIDevice *vdev) | |
2294 | { | |
2295 | msi_uninit(&vdev->pdev); | |
2296 | ||
2297 | if (vdev->msix) { | |
2298 | msix_uninit(&vdev->pdev, | |
2299 | &vdev->bars[vdev->msix->table_bar].region.mem, | |
2300 | &vdev->bars[vdev->msix->pba_bar].region.mem); | |
2301 | } | |
2302 | } | |
2303 | ||
2304 | /* | |
2305 | * Resource setup | |
2306 | */ | |
2307 | static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled) | |
2308 | { | |
2309 | int i; | |
2310 | ||
2311 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
2312 | VFIOBAR *bar = &vdev->bars[i]; | |
2313 | ||
2314 | if (!bar->region.size) { | |
2315 | continue; | |
2316 | } | |
2317 | ||
2318 | memory_region_set_enabled(&bar->region.mmap_mem, enabled); | |
2319 | if (vdev->msix && vdev->msix->table_bar == i) { | |
2320 | memory_region_set_enabled(&vdev->msix->mmap_mem, enabled); | |
2321 | } | |
2322 | } | |
2323 | } | |
2324 | ||
2325 | static void vfio_unregister_bar(VFIOPCIDevice *vdev, int nr) | |
2326 | { | |
2327 | VFIOBAR *bar = &vdev->bars[nr]; | |
2328 | ||
2329 | if (!bar->region.size) { | |
2330 | return; | |
2331 | } | |
2332 | ||
2333 | vfio_bar_quirk_teardown(vdev, nr); | |
2334 | ||
2335 | memory_region_del_subregion(&bar->region.mem, &bar->region.mmap_mem); | |
2336 | ||
2337 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
2338 | memory_region_del_subregion(&bar->region.mem, &vdev->msix->mmap_mem); | |
2339 | } | |
2340 | } | |
2341 | ||
2342 | static void vfio_unmap_bar(VFIOPCIDevice *vdev, int nr) | |
2343 | { | |
2344 | VFIOBAR *bar = &vdev->bars[nr]; | |
2345 | ||
2346 | if (!bar->region.size) { | |
2347 | return; | |
2348 | } | |
2349 | ||
2350 | vfio_bar_quirk_free(vdev, nr); | |
2351 | ||
2352 | munmap(bar->region.mmap, memory_region_size(&bar->region.mmap_mem)); | |
2353 | ||
2354 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
2355 | munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem)); | |
2356 | } | |
2357 | } | |
2358 | ||
2359 | static void vfio_map_bar(VFIOPCIDevice *vdev, int nr) | |
2360 | { | |
2361 | VFIOBAR *bar = &vdev->bars[nr]; | |
2362 | uint64_t size = bar->region.size; | |
2363 | char name[64]; | |
2364 | uint32_t pci_bar; | |
2365 | uint8_t type; | |
2366 | int ret; | |
2367 | ||
2368 | /* Skip both unimplemented BARs and the upper half of 64bit BARS. */ | |
2369 | if (!size) { | |
2370 | return; | |
2371 | } | |
2372 | ||
2373 | snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d", | |
2374 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2375 | vdev->host.function, nr); | |
2376 | ||
2377 | /* Determine what type of BAR this is for registration */ | |
2378 | ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar), | |
2379 | vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr)); | |
2380 | if (ret != sizeof(pci_bar)) { | |
2381 | error_report("vfio: Failed to read BAR %d (%m)", nr); | |
2382 | return; | |
2383 | } | |
2384 | ||
2385 | pci_bar = le32_to_cpu(pci_bar); | |
2386 | bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO); | |
2387 | bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64); | |
2388 | type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK : | |
2389 | ~PCI_BASE_ADDRESS_MEM_MASK); | |
2390 | ||
2391 | /* A "slow" read/write mapping underlies all BARs */ | |
2392 | memory_region_init_io(&bar->region.mem, OBJECT(vdev), &vfio_region_ops, | |
2393 | bar, name, size); | |
2394 | pci_register_bar(&vdev->pdev, nr, type, &bar->region.mem); | |
2395 | ||
2396 | /* | |
2397 | * We can't mmap areas overlapping the MSIX vector table, so we | |
2398 | * potentially insert a direct-mapped subregion before and after it. | |
2399 | */ | |
2400 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
2401 | size = vdev->msix->table_offset & qemu_real_host_page_mask; | |
2402 | } | |
2403 | ||
2404 | strncat(name, " mmap", sizeof(name) - strlen(name) - 1); | |
2405 | if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem, | |
2406 | &bar->region.mmap_mem, &bar->region.mmap, | |
2407 | size, 0, name)) { | |
2408 | error_report("%s unsupported. Performance may be slow", name); | |
2409 | } | |
2410 | ||
2411 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
2412 | uint64_t start; | |
2413 | ||
2414 | start = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset + | |
2415 | (vdev->msix->entries * | |
2416 | PCI_MSIX_ENTRY_SIZE)); | |
2417 | ||
2418 | size = start < bar->region.size ? bar->region.size - start : 0; | |
2419 | strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1); | |
2420 | /* VFIOMSIXInfo contains another MemoryRegion for this mapping */ | |
2421 | if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem, | |
2422 | &vdev->msix->mmap_mem, | |
2423 | &vdev->msix->mmap, size, start, name)) { | |
2424 | error_report("%s unsupported. Performance may be slow", name); | |
2425 | } | |
2426 | } | |
2427 | ||
2428 | vfio_bar_quirk_setup(vdev, nr); | |
2429 | } | |
2430 | ||
2431 | static void vfio_map_bars(VFIOPCIDevice *vdev) | |
2432 | { | |
2433 | int i; | |
2434 | ||
2435 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
2436 | vfio_map_bar(vdev, i); | |
2437 | } | |
2438 | ||
2439 | if (vdev->has_vga) { | |
2440 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem, | |
2441 | OBJECT(vdev), &vfio_vga_ops, | |
2442 | &vdev->vga.region[QEMU_PCI_VGA_MEM], | |
2443 | "vfio-vga-mmio@0xa0000", | |
2444 | QEMU_PCI_VGA_MEM_SIZE); | |
2445 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, | |
2446 | OBJECT(vdev), &vfio_vga_ops, | |
2447 | &vdev->vga.region[QEMU_PCI_VGA_IO_LO], | |
2448 | "vfio-vga-io@0x3b0", | |
2449 | QEMU_PCI_VGA_IO_LO_SIZE); | |
2450 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, | |
2451 | OBJECT(vdev), &vfio_vga_ops, | |
2452 | &vdev->vga.region[QEMU_PCI_VGA_IO_HI], | |
2453 | "vfio-vga-io@0x3c0", | |
2454 | QEMU_PCI_VGA_IO_HI_SIZE); | |
2455 | ||
2456 | pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem, | |
2457 | &vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, | |
2458 | &vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem); | |
2459 | vfio_vga_quirk_setup(vdev); | |
2460 | } | |
2461 | } | |
2462 | ||
2463 | static void vfio_unregister_bars(VFIOPCIDevice *vdev) | |
2464 | { | |
2465 | int i; | |
2466 | ||
2467 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
2468 | vfio_unregister_bar(vdev, i); | |
2469 | } | |
2470 | ||
2471 | if (vdev->has_vga) { | |
2472 | vfio_vga_quirk_teardown(vdev); | |
2473 | pci_unregister_vga(&vdev->pdev); | |
2474 | } | |
2475 | } | |
2476 | ||
2477 | static void vfio_unmap_bars(VFIOPCIDevice *vdev) | |
2478 | { | |
2479 | int i; | |
2480 | ||
2481 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
2482 | vfio_unmap_bar(vdev, i); | |
2483 | } | |
2484 | ||
2485 | if (vdev->has_vga) { | |
2486 | vfio_vga_quirk_free(vdev); | |
2487 | } | |
2488 | } | |
2489 | ||
2490 | /* | |
2491 | * General setup | |
2492 | */ | |
2493 | static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos) | |
2494 | { | |
2495 | uint8_t tmp, next = 0xff; | |
2496 | ||
2497 | for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp; | |
2498 | tmp = pdev->config[tmp + 1]) { | |
2499 | if (tmp > pos && tmp < next) { | |
2500 | next = tmp; | |
2501 | } | |
2502 | } | |
2503 | ||
2504 | return next - pos; | |
2505 | } | |
2506 | ||
2507 | static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask) | |
2508 | { | |
2509 | pci_set_word(buf, (pci_get_word(buf) & ~mask) | val); | |
2510 | } | |
2511 | ||
2512 | static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos, | |
2513 | uint16_t val, uint16_t mask) | |
2514 | { | |
2515 | vfio_set_word_bits(vdev->pdev.config + pos, val, mask); | |
2516 | vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask); | |
2517 | vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask); | |
2518 | } | |
2519 | ||
2520 | static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask) | |
2521 | { | |
2522 | pci_set_long(buf, (pci_get_long(buf) & ~mask) | val); | |
2523 | } | |
2524 | ||
2525 | static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos, | |
2526 | uint32_t val, uint32_t mask) | |
2527 | { | |
2528 | vfio_set_long_bits(vdev->pdev.config + pos, val, mask); | |
2529 | vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask); | |
2530 | vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask); | |
2531 | } | |
2532 | ||
2533 | static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size) | |
2534 | { | |
2535 | uint16_t flags; | |
2536 | uint8_t type; | |
2537 | ||
2538 | flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); | |
2539 | type = (flags & PCI_EXP_FLAGS_TYPE) >> 4; | |
2540 | ||
2541 | if (type != PCI_EXP_TYPE_ENDPOINT && | |
2542 | type != PCI_EXP_TYPE_LEG_END && | |
2543 | type != PCI_EXP_TYPE_RC_END) { | |
2544 | ||
2545 | error_report("vfio: Assignment of PCIe type 0x%x " | |
2546 | "devices is not currently supported", type); | |
2547 | return -EINVAL; | |
2548 | } | |
2549 | ||
2550 | if (!pci_bus_is_express(vdev->pdev.bus)) { | |
2551 | /* | |
2552 | * Use express capability as-is on PCI bus. It doesn't make much | |
2553 | * sense to even expose, but some drivers (ex. tg3) depend on it | |
2554 | * and guests don't seem to be particular about it. We'll need | |
2555 | * to revist this or force express devices to express buses if we | |
2556 | * ever expose an IOMMU to the guest. | |
2557 | */ | |
2558 | } else if (pci_bus_is_root(vdev->pdev.bus)) { | |
2559 | /* | |
2560 | * On a Root Complex bus Endpoints become Root Complex Integrated | |
2561 | * Endpoints, which changes the type and clears the LNK & LNK2 fields. | |
2562 | */ | |
2563 | if (type == PCI_EXP_TYPE_ENDPOINT) { | |
2564 | vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, | |
2565 | PCI_EXP_TYPE_RC_END << 4, | |
2566 | PCI_EXP_FLAGS_TYPE); | |
2567 | ||
2568 | /* Link Capabilities, Status, and Control goes away */ | |
2569 | if (size > PCI_EXP_LNKCTL) { | |
2570 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0); | |
2571 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); | |
2572 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0); | |
2573 | ||
2574 | #ifndef PCI_EXP_LNKCAP2 | |
2575 | #define PCI_EXP_LNKCAP2 44 | |
2576 | #endif | |
2577 | #ifndef PCI_EXP_LNKSTA2 | |
2578 | #define PCI_EXP_LNKSTA2 50 | |
2579 | #endif | |
2580 | /* Link 2 Capabilities, Status, and Control goes away */ | |
2581 | if (size > PCI_EXP_LNKCAP2) { | |
2582 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0); | |
2583 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0); | |
2584 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0); | |
2585 | } | |
2586 | } | |
2587 | ||
2588 | } else if (type == PCI_EXP_TYPE_LEG_END) { | |
2589 | /* | |
2590 | * Legacy endpoints don't belong on the root complex. Windows | |
2591 | * seems to be happier with devices if we skip the capability. | |
2592 | */ | |
2593 | return 0; | |
2594 | } | |
2595 | ||
2596 | } else { | |
2597 | /* | |
2598 | * Convert Root Complex Integrated Endpoints to regular endpoints. | |
2599 | * These devices don't support LNK/LNK2 capabilities, so make them up. | |
2600 | */ | |
2601 | if (type == PCI_EXP_TYPE_RC_END) { | |
2602 | vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, | |
2603 | PCI_EXP_TYPE_ENDPOINT << 4, | |
2604 | PCI_EXP_FLAGS_TYPE); | |
2605 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, | |
2606 | PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0); | |
2607 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); | |
2608 | } | |
2609 | ||
2610 | /* Mark the Link Status bits as emulated to allow virtual negotiation */ | |
2611 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, | |
2612 | pci_get_word(vdev->pdev.config + pos + | |
2613 | PCI_EXP_LNKSTA), | |
2614 | PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); | |
2615 | } | |
2616 | ||
2617 | pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size); | |
2618 | if (pos >= 0) { | |
2619 | vdev->pdev.exp.exp_cap = pos; | |
2620 | } | |
2621 | ||
2622 | return pos; | |
2623 | } | |
2624 | ||
2625 | static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos) | |
2626 | { | |
2627 | uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP); | |
2628 | ||
2629 | if (cap & PCI_EXP_DEVCAP_FLR) { | |
2630 | trace_vfio_check_pcie_flr(vdev->vbasedev.name); | |
2631 | vdev->has_flr = true; | |
2632 | } | |
2633 | } | |
2634 | ||
2635 | static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos) | |
2636 | { | |
2637 | uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL); | |
2638 | ||
2639 | if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) { | |
2640 | trace_vfio_check_pm_reset(vdev->vbasedev.name); | |
2641 | vdev->has_pm_reset = true; | |
2642 | } | |
2643 | } | |
2644 | ||
2645 | static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos) | |
2646 | { | |
2647 | uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP); | |
2648 | ||
2649 | if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) { | |
2650 | trace_vfio_check_af_flr(vdev->vbasedev.name); | |
2651 | vdev->has_flr = true; | |
2652 | } | |
2653 | } | |
2654 | ||
2655 | static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos) | |
2656 | { | |
2657 | PCIDevice *pdev = &vdev->pdev; | |
2658 | uint8_t cap_id, next, size; | |
2659 | int ret; | |
2660 | ||
2661 | cap_id = pdev->config[pos]; | |
2662 | next = pdev->config[pos + 1]; | |
2663 | ||
2664 | /* | |
2665 | * If it becomes important to configure capabilities to their actual | |
2666 | * size, use this as the default when it's something we don't recognize. | |
2667 | * Since QEMU doesn't actually handle many of the config accesses, | |
2668 | * exact size doesn't seem worthwhile. | |
2669 | */ | |
2670 | size = vfio_std_cap_max_size(pdev, pos); | |
2671 | ||
2672 | /* | |
2673 | * pci_add_capability always inserts the new capability at the head | |
2674 | * of the chain. Therefore to end up with a chain that matches the | |
2675 | * physical device, we insert from the end by making this recursive. | |
2676 | * This is also why we pre-caclulate size above as cached config space | |
2677 | * will be changed as we unwind the stack. | |
2678 | */ | |
2679 | if (next) { | |
2680 | ret = vfio_add_std_cap(vdev, next); | |
2681 | if (ret) { | |
2682 | return ret; | |
2683 | } | |
2684 | } else { | |
2685 | /* Begin the rebuild, use QEMU emulated list bits */ | |
2686 | pdev->config[PCI_CAPABILITY_LIST] = 0; | |
2687 | vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff; | |
2688 | vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST; | |
2689 | } | |
2690 | ||
2691 | /* Use emulated next pointer to allow dropping caps */ | |
2692 | pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff); | |
2693 | ||
2694 | switch (cap_id) { | |
2695 | case PCI_CAP_ID_MSI: | |
2696 | ret = vfio_setup_msi(vdev, pos); | |
2697 | break; | |
2698 | case PCI_CAP_ID_EXP: | |
2699 | vfio_check_pcie_flr(vdev, pos); | |
2700 | ret = vfio_setup_pcie_cap(vdev, pos, size); | |
2701 | break; | |
2702 | case PCI_CAP_ID_MSIX: | |
2703 | ret = vfio_setup_msix(vdev, pos); | |
2704 | break; | |
2705 | case PCI_CAP_ID_PM: | |
2706 | vfio_check_pm_reset(vdev, pos); | |
2707 | vdev->pm_cap = pos; | |
2708 | ret = pci_add_capability(pdev, cap_id, pos, size); | |
2709 | break; | |
2710 | case PCI_CAP_ID_AF: | |
2711 | vfio_check_af_flr(vdev, pos); | |
2712 | ret = pci_add_capability(pdev, cap_id, pos, size); | |
2713 | break; | |
2714 | default: | |
2715 | ret = pci_add_capability(pdev, cap_id, pos, size); | |
2716 | break; | |
2717 | } | |
2718 | ||
2719 | if (ret < 0) { | |
2720 | error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability " | |
2721 | "0x%x[0x%x]@0x%x: %d", vdev->host.domain, | |
2722 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
2723 | cap_id, size, pos, ret); | |
2724 | return ret; | |
2725 | } | |
2726 | ||
2727 | return 0; | |
2728 | } | |
2729 | ||
2730 | static int vfio_add_capabilities(VFIOPCIDevice *vdev) | |
2731 | { | |
2732 | PCIDevice *pdev = &vdev->pdev; | |
2733 | ||
2734 | if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) || | |
2735 | !pdev->config[PCI_CAPABILITY_LIST]) { | |
2736 | return 0; /* Nothing to add */ | |
2737 | } | |
2738 | ||
2739 | return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]); | |
2740 | } | |
2741 | ||
2742 | static void vfio_pci_pre_reset(VFIOPCIDevice *vdev) | |
2743 | { | |
2744 | PCIDevice *pdev = &vdev->pdev; | |
2745 | uint16_t cmd; | |
2746 | ||
2747 | vfio_disable_interrupts(vdev); | |
2748 | ||
2749 | /* Make sure the device is in D0 */ | |
2750 | if (vdev->pm_cap) { | |
2751 | uint16_t pmcsr; | |
2752 | uint8_t state; | |
2753 | ||
2754 | pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); | |
2755 | state = pmcsr & PCI_PM_CTRL_STATE_MASK; | |
2756 | if (state) { | |
2757 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
2758 | vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2); | |
2759 | /* vfio handles the necessary delay here */ | |
2760 | pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); | |
2761 | state = pmcsr & PCI_PM_CTRL_STATE_MASK; | |
2762 | if (state) { | |
2763 | error_report("vfio: Unable to power on device, stuck in D%d", | |
2764 | state); | |
2765 | } | |
2766 | } | |
2767 | } | |
2768 | ||
2769 | /* | |
2770 | * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master. | |
2771 | * Also put INTx Disable in known state. | |
2772 | */ | |
2773 | cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2); | |
2774 | cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | | |
2775 | PCI_COMMAND_INTX_DISABLE); | |
2776 | vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2); | |
2777 | } | |
2778 | ||
2779 | static void vfio_pci_post_reset(VFIOPCIDevice *vdev) | |
2780 | { | |
2781 | vfio_enable_intx(vdev); | |
2782 | } | |
2783 | ||
2784 | static bool vfio_pci_host_match(PCIHostDeviceAddress *host1, | |
2785 | PCIHostDeviceAddress *host2) | |
2786 | { | |
2787 | return (host1->domain == host2->domain && host1->bus == host2->bus && | |
2788 | host1->slot == host2->slot && host1->function == host2->function); | |
2789 | } | |
2790 | ||
2791 | static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) | |
2792 | { | |
2793 | VFIOGroup *group; | |
2794 | struct vfio_pci_hot_reset_info *info; | |
2795 | struct vfio_pci_dependent_device *devices; | |
2796 | struct vfio_pci_hot_reset *reset; | |
2797 | int32_t *fds; | |
2798 | int ret, i, count; | |
2799 | bool multi = false; | |
2800 | ||
2801 | trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); | |
2802 | ||
2803 | vfio_pci_pre_reset(vdev); | |
2804 | vdev->vbasedev.needs_reset = false; | |
2805 | ||
2806 | info = g_malloc0(sizeof(*info)); | |
2807 | info->argsz = sizeof(*info); | |
2808 | ||
2809 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); | |
2810 | if (ret && errno != ENOSPC) { | |
2811 | ret = -errno; | |
2812 | if (!vdev->has_pm_reset) { | |
2813 | error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, " | |
2814 | "no available reset mechanism.", vdev->host.domain, | |
2815 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
2816 | } | |
2817 | goto out_single; | |
2818 | } | |
2819 | ||
2820 | count = info->count; | |
2821 | info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices))); | |
2822 | info->argsz = sizeof(*info) + (count * sizeof(*devices)); | |
2823 | devices = &info->devices[0]; | |
2824 | ||
2825 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); | |
2826 | if (ret) { | |
2827 | ret = -errno; | |
2828 | error_report("vfio: hot reset info failed: %m"); | |
2829 | goto out_single; | |
2830 | } | |
2831 | ||
2832 | trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); | |
2833 | ||
2834 | /* Verify that we have all the groups required */ | |
2835 | for (i = 0; i < info->count; i++) { | |
2836 | PCIHostDeviceAddress host; | |
2837 | VFIOPCIDevice *tmp; | |
2838 | VFIODevice *vbasedev_iter; | |
2839 | ||
2840 | host.domain = devices[i].segment; | |
2841 | host.bus = devices[i].bus; | |
2842 | host.slot = PCI_SLOT(devices[i].devfn); | |
2843 | host.function = PCI_FUNC(devices[i].devfn); | |
2844 | ||
2845 | trace_vfio_pci_hot_reset_dep_devices(host.domain, | |
2846 | host.bus, host.slot, host.function, devices[i].group_id); | |
2847 | ||
2848 | if (vfio_pci_host_match(&host, &vdev->host)) { | |
2849 | continue; | |
2850 | } | |
2851 | ||
2852 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
2853 | if (group->groupid == devices[i].group_id) { | |
2854 | break; | |
2855 | } | |
2856 | } | |
2857 | ||
2858 | if (!group) { | |
2859 | if (!vdev->has_pm_reset) { | |
2860 | error_report("vfio: Cannot reset device %s, " | |
2861 | "depends on group %d which is not owned.", | |
2862 | vdev->vbasedev.name, devices[i].group_id); | |
2863 | } | |
2864 | ret = -EPERM; | |
2865 | goto out; | |
2866 | } | |
2867 | ||
2868 | /* Prep dependent devices for reset and clear our marker. */ | |
2869 | QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { | |
2870 | if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { | |
2871 | continue; | |
2872 | } | |
2873 | tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); | |
2874 | if (vfio_pci_host_match(&host, &tmp->host)) { | |
2875 | if (single) { | |
2876 | ret = -EINVAL; | |
2877 | goto out_single; | |
2878 | } | |
2879 | vfio_pci_pre_reset(tmp); | |
2880 | tmp->vbasedev.needs_reset = false; | |
2881 | multi = true; | |
2882 | break; | |
2883 | } | |
2884 | } | |
2885 | } | |
2886 | ||
2887 | if (!single && !multi) { | |
2888 | ret = -EINVAL; | |
2889 | goto out_single; | |
2890 | } | |
2891 | ||
2892 | /* Determine how many group fds need to be passed */ | |
2893 | count = 0; | |
2894 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
2895 | for (i = 0; i < info->count; i++) { | |
2896 | if (group->groupid == devices[i].group_id) { | |
2897 | count++; | |
2898 | break; | |
2899 | } | |
2900 | } | |
2901 | } | |
2902 | ||
2903 | reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); | |
2904 | reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); | |
2905 | fds = &reset->group_fds[0]; | |
2906 | ||
2907 | /* Fill in group fds */ | |
2908 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
2909 | for (i = 0; i < info->count; i++) { | |
2910 | if (group->groupid == devices[i].group_id) { | |
2911 | fds[reset->count++] = group->fd; | |
2912 | break; | |
2913 | } | |
2914 | } | |
2915 | } | |
2916 | ||
2917 | /* Bus reset! */ | |
2918 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); | |
2919 | g_free(reset); | |
2920 | ||
2921 | trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, | |
2922 | ret ? "%m" : "Success"); | |
2923 | ||
2924 | out: | |
2925 | /* Re-enable INTx on affected devices */ | |
2926 | for (i = 0; i < info->count; i++) { | |
2927 | PCIHostDeviceAddress host; | |
2928 | VFIOPCIDevice *tmp; | |
2929 | VFIODevice *vbasedev_iter; | |
2930 | ||
2931 | host.domain = devices[i].segment; | |
2932 | host.bus = devices[i].bus; | |
2933 | host.slot = PCI_SLOT(devices[i].devfn); | |
2934 | host.function = PCI_FUNC(devices[i].devfn); | |
2935 | ||
2936 | if (vfio_pci_host_match(&host, &vdev->host)) { | |
2937 | continue; | |
2938 | } | |
2939 | ||
2940 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
2941 | if (group->groupid == devices[i].group_id) { | |
2942 | break; | |
2943 | } | |
2944 | } | |
2945 | ||
2946 | if (!group) { | |
2947 | break; | |
2948 | } | |
2949 | ||
2950 | QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { | |
2951 | if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { | |
2952 | continue; | |
2953 | } | |
2954 | tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); | |
2955 | if (vfio_pci_host_match(&host, &tmp->host)) { | |
2956 | vfio_pci_post_reset(tmp); | |
2957 | break; | |
2958 | } | |
2959 | } | |
2960 | } | |
2961 | out_single: | |
2962 | vfio_pci_post_reset(vdev); | |
2963 | g_free(info); | |
2964 | ||
2965 | return ret; | |
2966 | } | |
2967 | ||
2968 | /* | |
2969 | * We want to differentiate hot reset of mulitple in-use devices vs hot reset | |
2970 | * of a single in-use device. VFIO_DEVICE_RESET will already handle the case | |
2971 | * of doing hot resets when there is only a single device per bus. The in-use | |
2972 | * here refers to how many VFIODevices are affected. A hot reset that affects | |
2973 | * multiple devices, but only a single in-use device, means that we can call | |
2974 | * it from our bus ->reset() callback since the extent is effectively a single | |
2975 | * device. This allows us to make use of it in the hotplug path. When there | |
2976 | * are multiple in-use devices, we can only trigger the hot reset during a | |
2977 | * system reset and thus from our reset handler. We separate _one vs _multi | |
2978 | * here so that we don't overlap and do a double reset on the system reset | |
2979 | * path where both our reset handler and ->reset() callback are used. Calling | |
2980 | * _one() will only do a hot reset for the one in-use devices case, calling | |
2981 | * _multi() will do nothing if a _one() would have been sufficient. | |
2982 | */ | |
2983 | static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev) | |
2984 | { | |
2985 | return vfio_pci_hot_reset(vdev, true); | |
2986 | } | |
2987 | ||
2988 | static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev) | |
2989 | { | |
2990 | VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); | |
2991 | return vfio_pci_hot_reset(vdev, false); | |
2992 | } | |
2993 | ||
2994 | static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev) | |
2995 | { | |
2996 | VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); | |
2997 | if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) { | |
2998 | vbasedev->needs_reset = true; | |
2999 | } | |
3000 | } | |
3001 | ||
3002 | static VFIODeviceOps vfio_pci_ops = { | |
3003 | .vfio_compute_needs_reset = vfio_pci_compute_needs_reset, | |
3004 | .vfio_hot_reset_multi = vfio_pci_hot_reset_multi, | |
3005 | .vfio_eoi = vfio_eoi, | |
3006 | }; | |
3007 | ||
3008 | static int vfio_populate_device(VFIOPCIDevice *vdev) | |
3009 | { | |
3010 | VFIODevice *vbasedev = &vdev->vbasedev; | |
3011 | struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; | |
3012 | struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) }; | |
3013 | int i, ret = -1; | |
3014 | ||
3015 | /* Sanity check device */ | |
3016 | if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) { | |
3017 | error_report("vfio: Um, this isn't a PCI device"); | |
3018 | goto error; | |
3019 | } | |
3020 | ||
3021 | if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { | |
3022 | error_report("vfio: unexpected number of io regions %u", | |
3023 | vbasedev->num_regions); | |
3024 | goto error; | |
3025 | } | |
3026 | ||
3027 | if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) { | |
3028 | error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs); | |
3029 | goto error; | |
3030 | } | |
3031 | ||
3032 | for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) { | |
3033 | reg_info.index = i; | |
3034 | ||
3035 | ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); | |
3036 | if (ret) { | |
3037 | error_report("vfio: Error getting region %d info: %m", i); | |
3038 | goto error; | |
3039 | } | |
3040 | ||
3041 | trace_vfio_populate_device_region(vbasedev->name, i, | |
3042 | (unsigned long)reg_info.size, | |
3043 | (unsigned long)reg_info.offset, | |
3044 | (unsigned long)reg_info.flags); | |
3045 | ||
3046 | vdev->bars[i].region.vbasedev = vbasedev; | |
3047 | vdev->bars[i].region.flags = reg_info.flags; | |
3048 | vdev->bars[i].region.size = reg_info.size; | |
3049 | vdev->bars[i].region.fd_offset = reg_info.offset; | |
3050 | vdev->bars[i].region.nr = i; | |
3051 | QLIST_INIT(&vdev->bars[i].quirks); | |
3052 | } | |
3053 | ||
3054 | reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX; | |
3055 | ||
3056 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); | |
3057 | if (ret) { | |
3058 | error_report("vfio: Error getting config info: %m"); | |
3059 | goto error; | |
3060 | } | |
3061 | ||
3062 | trace_vfio_populate_device_config(vdev->vbasedev.name, | |
3063 | (unsigned long)reg_info.size, | |
3064 | (unsigned long)reg_info.offset, | |
3065 | (unsigned long)reg_info.flags); | |
3066 | ||
3067 | vdev->config_size = reg_info.size; | |
3068 | if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) { | |
3069 | vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; | |
3070 | } | |
3071 | vdev->config_offset = reg_info.offset; | |
3072 | ||
3073 | if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) && | |
3074 | vbasedev->num_regions > VFIO_PCI_VGA_REGION_INDEX) { | |
3075 | struct vfio_region_info vga_info = { | |
3076 | .argsz = sizeof(vga_info), | |
3077 | .index = VFIO_PCI_VGA_REGION_INDEX, | |
3078 | }; | |
3079 | ||
3080 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info); | |
3081 | if (ret) { | |
3082 | error_report( | |
3083 | "vfio: Device does not support requested feature x-vga"); | |
3084 | goto error; | |
3085 | } | |
3086 | ||
3087 | if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) || | |
3088 | !(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) || | |
3089 | vga_info.size < 0xbffff + 1) { | |
3090 | error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx", | |
3091 | (unsigned long)vga_info.flags, | |
3092 | (unsigned long)vga_info.size); | |
3093 | goto error; | |
3094 | } | |
3095 | ||
3096 | vdev->vga.fd_offset = vga_info.offset; | |
3097 | vdev->vga.fd = vdev->vbasedev.fd; | |
3098 | ||
3099 | vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE; | |
3100 | vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM; | |
3101 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks); | |
3102 | ||
3103 | vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE; | |
3104 | vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO; | |
3105 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks); | |
3106 | ||
3107 | vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE; | |
3108 | vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI; | |
3109 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks); | |
3110 | ||
3111 | vdev->has_vga = true; | |
3112 | } | |
3113 | ||
3114 | irq_info.index = VFIO_PCI_ERR_IRQ_INDEX; | |
3115 | ||
3116 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); | |
3117 | if (ret) { | |
3118 | /* This can fail for an old kernel or legacy PCI dev */ | |
3119 | trace_vfio_populate_device_get_irq_info_failure(); | |
3120 | ret = 0; | |
3121 | } else if (irq_info.count == 1) { | |
3122 | vdev->pci_aer = true; | |
3123 | } else { | |
3124 | error_report("vfio: %s " | |
3125 | "Could not enable error recovery for the device", | |
3126 | vbasedev->name); | |
3127 | } | |
3128 | ||
3129 | error: | |
3130 | return ret; | |
3131 | } | |
3132 | ||
3133 | static void vfio_put_device(VFIOPCIDevice *vdev) | |
3134 | { | |
3135 | g_free(vdev->vbasedev.name); | |
3136 | if (vdev->msix) { | |
3137 | object_unparent(OBJECT(&vdev->msix->mmap_mem)); | |
3138 | g_free(vdev->msix); | |
3139 | vdev->msix = NULL; | |
3140 | } | |
3141 | vfio_put_base_device(&vdev->vbasedev); | |
3142 | } | |
3143 | ||
3144 | static void vfio_err_notifier_handler(void *opaque) | |
3145 | { | |
3146 | VFIOPCIDevice *vdev = opaque; | |
3147 | ||
3148 | if (!event_notifier_test_and_clear(&vdev->err_notifier)) { | |
3149 | return; | |
3150 | } | |
3151 | ||
3152 | /* | |
3153 | * TBD. Retrieve the error details and decide what action | |
3154 | * needs to be taken. One of the actions could be to pass | |
3155 | * the error to the guest and have the guest driver recover | |
3156 | * from the error. This requires that PCIe capabilities be | |
3157 | * exposed to the guest. For now, we just terminate the | |
3158 | * guest to contain the error. | |
3159 | */ | |
3160 | ||
3161 | error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. " | |
3162 | "Please collect any data possible and then kill the guest", | |
3163 | __func__, vdev->host.domain, vdev->host.bus, | |
3164 | vdev->host.slot, vdev->host.function); | |
3165 | ||
3166 | vm_stop(RUN_STATE_INTERNAL_ERROR); | |
3167 | } | |
3168 | ||
3169 | /* | |
3170 | * Registers error notifier for devices supporting error recovery. | |
3171 | * If we encounter a failure in this function, we report an error | |
3172 | * and continue after disabling error recovery support for the | |
3173 | * device. | |
3174 | */ | |
3175 | static void vfio_register_err_notifier(VFIOPCIDevice *vdev) | |
3176 | { | |
3177 | int ret; | |
3178 | int argsz; | |
3179 | struct vfio_irq_set *irq_set; | |
3180 | int32_t *pfd; | |
3181 | ||
3182 | if (!vdev->pci_aer) { | |
3183 | return; | |
3184 | } | |
3185 | ||
3186 | if (event_notifier_init(&vdev->err_notifier, 0)) { | |
3187 | error_report("vfio: Unable to init event notifier for error detection"); | |
3188 | vdev->pci_aer = false; | |
3189 | return; | |
3190 | } | |
3191 | ||
3192 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
3193 | ||
3194 | irq_set = g_malloc0(argsz); | |
3195 | irq_set->argsz = argsz; | |
3196 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
3197 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
3198 | irq_set->index = VFIO_PCI_ERR_IRQ_INDEX; | |
3199 | irq_set->start = 0; | |
3200 | irq_set->count = 1; | |
3201 | pfd = (int32_t *)&irq_set->data; | |
3202 | ||
3203 | *pfd = event_notifier_get_fd(&vdev->err_notifier); | |
3204 | qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev); | |
3205 | ||
3206 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
3207 | if (ret) { | |
3208 | error_report("vfio: Failed to set up error notification"); | |
3209 | qemu_set_fd_handler(*pfd, NULL, NULL, vdev); | |
3210 | event_notifier_cleanup(&vdev->err_notifier); | |
3211 | vdev->pci_aer = false; | |
3212 | } | |
3213 | g_free(irq_set); | |
3214 | } | |
3215 | ||
3216 | static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev) | |
3217 | { | |
3218 | int argsz; | |
3219 | struct vfio_irq_set *irq_set; | |
3220 | int32_t *pfd; | |
3221 | int ret; | |
3222 | ||
3223 | if (!vdev->pci_aer) { | |
3224 | return; | |
3225 | } | |
3226 | ||
3227 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
3228 | ||
3229 | irq_set = g_malloc0(argsz); | |
3230 | irq_set->argsz = argsz; | |
3231 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
3232 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
3233 | irq_set->index = VFIO_PCI_ERR_IRQ_INDEX; | |
3234 | irq_set->start = 0; | |
3235 | irq_set->count = 1; | |
3236 | pfd = (int32_t *)&irq_set->data; | |
3237 | *pfd = -1; | |
3238 | ||
3239 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
3240 | if (ret) { | |
3241 | error_report("vfio: Failed to de-assign error fd: %m"); | |
3242 | } | |
3243 | g_free(irq_set); | |
3244 | qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier), | |
3245 | NULL, NULL, vdev); | |
3246 | event_notifier_cleanup(&vdev->err_notifier); | |
3247 | } | |
3248 | ||
3249 | static void vfio_req_notifier_handler(void *opaque) | |
3250 | { | |
3251 | VFIOPCIDevice *vdev = opaque; | |
3252 | ||
3253 | if (!event_notifier_test_and_clear(&vdev->req_notifier)) { | |
3254 | return; | |
3255 | } | |
3256 | ||
3257 | qdev_unplug(&vdev->pdev.qdev, NULL); | |
3258 | } | |
3259 | ||
3260 | static void vfio_register_req_notifier(VFIOPCIDevice *vdev) | |
3261 | { | |
3262 | struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info), | |
3263 | .index = VFIO_PCI_REQ_IRQ_INDEX }; | |
3264 | int argsz; | |
3265 | struct vfio_irq_set *irq_set; | |
3266 | int32_t *pfd; | |
3267 | ||
3268 | if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) { | |
3269 | return; | |
3270 | } | |
3271 | ||
3272 | if (ioctl(vdev->vbasedev.fd, | |
3273 | VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) { | |
3274 | return; | |
3275 | } | |
3276 | ||
3277 | if (event_notifier_init(&vdev->req_notifier, 0)) { | |
3278 | error_report("vfio: Unable to init event notifier for device request"); | |
3279 | return; | |
3280 | } | |
3281 | ||
3282 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
3283 | ||
3284 | irq_set = g_malloc0(argsz); | |
3285 | irq_set->argsz = argsz; | |
3286 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
3287 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
3288 | irq_set->index = VFIO_PCI_REQ_IRQ_INDEX; | |
3289 | irq_set->start = 0; | |
3290 | irq_set->count = 1; | |
3291 | pfd = (int32_t *)&irq_set->data; | |
3292 | ||
3293 | *pfd = event_notifier_get_fd(&vdev->req_notifier); | |
3294 | qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev); | |
3295 | ||
3296 | if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) { | |
3297 | error_report("vfio: Failed to set up device request notification"); | |
3298 | qemu_set_fd_handler(*pfd, NULL, NULL, vdev); | |
3299 | event_notifier_cleanup(&vdev->req_notifier); | |
3300 | } else { | |
3301 | vdev->req_enabled = true; | |
3302 | } | |
3303 | ||
3304 | g_free(irq_set); | |
3305 | } | |
3306 | ||
3307 | static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev) | |
3308 | { | |
3309 | int argsz; | |
3310 | struct vfio_irq_set *irq_set; | |
3311 | int32_t *pfd; | |
3312 | ||
3313 | if (!vdev->req_enabled) { | |
3314 | return; | |
3315 | } | |
3316 | ||
3317 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
3318 | ||
3319 | irq_set = g_malloc0(argsz); | |
3320 | irq_set->argsz = argsz; | |
3321 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
3322 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
3323 | irq_set->index = VFIO_PCI_REQ_IRQ_INDEX; | |
3324 | irq_set->start = 0; | |
3325 | irq_set->count = 1; | |
3326 | pfd = (int32_t *)&irq_set->data; | |
3327 | *pfd = -1; | |
3328 | ||
3329 | if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) { | |
3330 | error_report("vfio: Failed to de-assign device request fd: %m"); | |
3331 | } | |
3332 | g_free(irq_set); | |
3333 | qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier), | |
3334 | NULL, NULL, vdev); | |
3335 | event_notifier_cleanup(&vdev->req_notifier); | |
3336 | ||
3337 | vdev->req_enabled = false; | |
3338 | } | |
3339 | ||
3340 | /* | |
3341 | * AMD Radeon PCI config reset, based on Linux: | |
3342 | * drivers/gpu/drm/radeon/ci_smc.c:ci_is_smc_running() | |
3343 | * drivers/gpu/drm/radeon/radeon_device.c:radeon_pci_config_reset | |
3344 | * drivers/gpu/drm/radeon/ci_smc.c:ci_reset_smc() | |
3345 | * drivers/gpu/drm/radeon/ci_smc.c:ci_stop_smc_clock() | |
3346 | * IDs: include/drm/drm_pciids.h | |
3347 | * Registers: http://cgit.freedesktop.org/~agd5f/linux/commit/?id=4e2aa447f6f0 | |
3348 | * | |
3349 | * Bonaire and Hawaii GPUs do not respond to a bus reset. This is a bug in the | |
3350 | * hardware that should be fixed on future ASICs. The symptom of this is that | |
3351 | * once the accerlated driver loads, Windows guests will bsod on subsequent | |
3352 | * attmpts to load the driver, such as after VM reset or shutdown/restart. To | |
3353 | * work around this, we do an AMD specific PCI config reset, followed by an SMC | |
3354 | * reset. The PCI config reset only works if SMC firmware is running, so we | |
3355 | * have a dependency on the state of the device as to whether this reset will | |
3356 | * be effective. There are still cases where we won't be able to kick the | |
3357 | * device into working, but this greatly improves the usability overall. The | |
3358 | * config reset magic is relatively common on AMD GPUs, but the setup and SMC | |
3359 | * poking is largely ASIC specific. | |
3360 | */ | |
3361 | static bool vfio_radeon_smc_is_running(VFIOPCIDevice *vdev) | |
3362 | { | |
3363 | uint32_t clk, pc_c; | |
3364 | ||
3365 | /* | |
3366 | * Registers 200h and 204h are index and data registers for accessing | |
3367 | * indirect configuration registers within the device. | |
3368 | */ | |
3369 | vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4); | |
3370 | clk = vfio_region_read(&vdev->bars[5].region, 0x204, 4); | |
3371 | vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000370, 4); | |
3372 | pc_c = vfio_region_read(&vdev->bars[5].region, 0x204, 4); | |
3373 | ||
3374 | return (!(clk & 1) && (0x20100 <= pc_c)); | |
3375 | } | |
3376 | ||
3377 | /* | |
3378 | * The scope of a config reset is controlled by a mode bit in the misc register | |
3379 | * and a fuse, exposed as a bit in another register. The fuse is the default | |
3380 | * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the forumula | |
3381 | * scope = !(misc ^ fuse), where the resulting scope is defined the same as | |
3382 | * the fuse. A truth table therefore tells us that if misc == fuse, we need | |
3383 | * to flip the value of the bit in the misc register. | |
3384 | */ | |
3385 | static void vfio_radeon_set_gfx_only_reset(VFIOPCIDevice *vdev) | |
3386 | { | |
3387 | uint32_t misc, fuse; | |
3388 | bool a, b; | |
3389 | ||
3390 | vfio_region_write(&vdev->bars[5].region, 0x200, 0xc00c0000, 4); | |
3391 | fuse = vfio_region_read(&vdev->bars[5].region, 0x204, 4); | |
3392 | b = fuse & 64; | |
3393 | ||
3394 | vfio_region_write(&vdev->bars[5].region, 0x200, 0xc0000010, 4); | |
3395 | misc = vfio_region_read(&vdev->bars[5].region, 0x204, 4); | |
3396 | a = misc & 2; | |
3397 | ||
3398 | if (a == b) { | |
3399 | vfio_region_write(&vdev->bars[5].region, 0x204, misc ^ 2, 4); | |
3400 | vfio_region_read(&vdev->bars[5].region, 0x204, 4); /* flush */ | |
3401 | } | |
3402 | } | |
3403 | ||
3404 | static int vfio_radeon_reset(VFIOPCIDevice *vdev) | |
3405 | { | |
3406 | PCIDevice *pdev = &vdev->pdev; | |
3407 | int i, ret = 0; | |
3408 | uint32_t data; | |
3409 | ||
3410 | /* Defer to a kernel implemented reset */ | |
3411 | if (vdev->vbasedev.reset_works) { | |
3412 | return -ENODEV; | |
3413 | } | |
3414 | ||
3415 | /* Enable only memory BAR access */ | |
3416 | vfio_pci_write_config(pdev, PCI_COMMAND, PCI_COMMAND_MEMORY, 2); | |
3417 | ||
3418 | /* Reset only works if SMC firmware is loaded and running */ | |
3419 | if (!vfio_radeon_smc_is_running(vdev)) { | |
3420 | ret = -EINVAL; | |
3421 | goto out; | |
3422 | } | |
3423 | ||
3424 | /* Make sure only the GFX function is reset */ | |
3425 | vfio_radeon_set_gfx_only_reset(vdev); | |
3426 | ||
3427 | /* AMD PCI config reset */ | |
3428 | vfio_pci_write_config(pdev, 0x7c, 0x39d5e86b, 4); | |
3429 | usleep(100); | |
3430 | ||
3431 | /* Read back the memory size to make sure we're out of reset */ | |
3432 | for (i = 0; i < 100000; i++) { | |
3433 | if (vfio_region_read(&vdev->bars[5].region, 0x5428, 4) != 0xffffffff) { | |
3434 | break; | |
3435 | } | |
3436 | usleep(1); | |
3437 | } | |
3438 | ||
3439 | /* Reset SMC */ | |
3440 | vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000000, 4); | |
3441 | data = vfio_region_read(&vdev->bars[5].region, 0x204, 4); | |
3442 | data |= 1; | |
3443 | vfio_region_write(&vdev->bars[5].region, 0x204, data, 4); | |
3444 | ||
3445 | /* Disable SMC clock */ | |
3446 | vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4); | |
3447 | data = vfio_region_read(&vdev->bars[5].region, 0x204, 4); | |
3448 | data |= 1; | |
3449 | vfio_region_write(&vdev->bars[5].region, 0x204, data, 4); | |
3450 | ||
3451 | out: | |
3452 | /* Restore PCI command register */ | |
3453 | vfio_pci_write_config(pdev, PCI_COMMAND, 0, 2); | |
3454 | ||
3455 | return ret; | |
3456 | } | |
3457 | ||
3458 | static void vfio_setup_resetfn(VFIOPCIDevice *vdev) | |
3459 | { | |
3460 | PCIDevice *pdev = &vdev->pdev; | |
3461 | uint16_t vendor, device; | |
3462 | ||
3463 | vendor = pci_get_word(pdev->config + PCI_VENDOR_ID); | |
3464 | device = pci_get_word(pdev->config + PCI_DEVICE_ID); | |
3465 | ||
3466 | switch (vendor) { | |
3467 | case 0x1002: | |
3468 | switch (device) { | |
3469 | /* Bonaire */ | |
3470 | case 0x6649: /* Bonaire [FirePro W5100] */ | |
3471 | case 0x6650: | |
3472 | case 0x6651: | |
3473 | case 0x6658: /* Bonaire XTX [Radeon R7 260X] */ | |
3474 | case 0x665c: /* Bonaire XT [Radeon HD 7790/8770 / R9 260 OEM] */ | |
3475 | case 0x665d: /* Bonaire [Radeon R7 200 Series] */ | |
3476 | /* Hawaii */ | |
3477 | case 0x67A0: /* Hawaii XT GL [FirePro W9100] */ | |
3478 | case 0x67A1: /* Hawaii PRO GL [FirePro W8100] */ | |
3479 | case 0x67A2: | |
3480 | case 0x67A8: | |
3481 | case 0x67A9: | |
3482 | case 0x67AA: | |
3483 | case 0x67B0: /* Hawaii XT [Radeon R9 290X] */ | |
3484 | case 0x67B1: /* Hawaii PRO [Radeon R9 290] */ | |
3485 | case 0x67B8: | |
3486 | case 0x67B9: | |
3487 | case 0x67BA: | |
3488 | case 0x67BE: | |
3489 | vdev->resetfn = vfio_radeon_reset; | |
3490 | break; | |
3491 | } | |
3492 | break; | |
3493 | } | |
3494 | } | |
3495 | ||
3496 | static int vfio_initfn(PCIDevice *pdev) | |
3497 | { | |
3498 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); | |
3499 | VFIODevice *vbasedev_iter; | |
3500 | VFIOGroup *group; | |
3501 | char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name; | |
3502 | ssize_t len; | |
3503 | struct stat st; | |
3504 | int groupid; | |
3505 | int ret; | |
3506 | ||
3507 | /* Check that the host device exists */ | |
3508 | snprintf(path, sizeof(path), | |
3509 | "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/", | |
3510 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
3511 | vdev->host.function); | |
3512 | if (stat(path, &st) < 0) { | |
3513 | error_report("vfio: error: no such host device: %s", path); | |
3514 | return -errno; | |
3515 | } | |
3516 | ||
3517 | vdev->vbasedev.ops = &vfio_pci_ops; | |
3518 | ||
3519 | vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI; | |
3520 | vdev->vbasedev.name = g_strdup_printf("%04x:%02x:%02x.%01x", | |
3521 | vdev->host.domain, vdev->host.bus, | |
3522 | vdev->host.slot, vdev->host.function); | |
3523 | ||
3524 | strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1); | |
3525 | ||
3526 | len = readlink(path, iommu_group_path, sizeof(path)); | |
3527 | if (len <= 0 || len >= sizeof(path)) { | |
3528 | error_report("vfio: error no iommu_group for device"); | |
3529 | return len < 0 ? -errno : -ENAMETOOLONG; | |
3530 | } | |
3531 | ||
3532 | iommu_group_path[len] = 0; | |
3533 | group_name = basename(iommu_group_path); | |
3534 | ||
3535 | if (sscanf(group_name, "%d", &groupid) != 1) { | |
3536 | error_report("vfio: error reading %s: %m", path); | |
3537 | return -errno; | |
3538 | } | |
3539 | ||
3540 | trace_vfio_initfn(vdev->vbasedev.name, groupid); | |
3541 | ||
3542 | group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev)); | |
3543 | if (!group) { | |
3544 | error_report("vfio: failed to get group %d", groupid); | |
3545 | return -ENOENT; | |
3546 | } | |
3547 | ||
3548 | snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x", | |
3549 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
3550 | vdev->host.function); | |
3551 | ||
3552 | QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { | |
3553 | if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) { | |
3554 | error_report("vfio: error: device %s is already attached", path); | |
3555 | vfio_put_group(group); | |
3556 | return -EBUSY; | |
3557 | } | |
3558 | } | |
3559 | ||
3560 | ret = vfio_get_device(group, path, &vdev->vbasedev); | |
3561 | if (ret) { | |
3562 | error_report("vfio: failed to get device %s", path); | |
3563 | vfio_put_group(group); | |
3564 | return ret; | |
3565 | } | |
3566 | ||
3567 | ret = vfio_populate_device(vdev); | |
3568 | if (ret) { | |
3569 | return ret; | |
3570 | } | |
3571 | ||
3572 | /* Get a copy of config space */ | |
3573 | ret = pread(vdev->vbasedev.fd, vdev->pdev.config, | |
3574 | MIN(pci_config_size(&vdev->pdev), vdev->config_size), | |
3575 | vdev->config_offset); | |
3576 | if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { | |
3577 | ret = ret < 0 ? -errno : -EFAULT; | |
3578 | error_report("vfio: Failed to read device config space"); | |
3579 | return ret; | |
3580 | } | |
3581 | ||
3582 | /* vfio emulates a lot for us, but some bits need extra love */ | |
3583 | vdev->emulated_config_bits = g_malloc0(vdev->config_size); | |
3584 | ||
3585 | /* QEMU can choose to expose the ROM or not */ | |
3586 | memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4); | |
3587 | ||
3588 | /* QEMU can change multi-function devices to single function, or reverse */ | |
3589 | vdev->emulated_config_bits[PCI_HEADER_TYPE] = | |
3590 | PCI_HEADER_TYPE_MULTI_FUNCTION; | |
3591 | ||
3592 | /* Restore or clear multifunction, this is always controlled by QEMU */ | |
3593 | if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { | |
3594 | vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; | |
3595 | } else { | |
3596 | vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION; | |
3597 | } | |
3598 | ||
3599 | /* | |
3600 | * Clear host resource mapping info. If we choose not to register a | |
3601 | * BAR, such as might be the case with the option ROM, we can get | |
3602 | * confusing, unwritable, residual addresses from the host here. | |
3603 | */ | |
3604 | memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); | |
3605 | memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); | |
3606 | ||
3607 | vfio_pci_size_rom(vdev); | |
3608 | ||
3609 | ret = vfio_early_setup_msix(vdev); | |
3610 | if (ret) { | |
3611 | return ret; | |
3612 | } | |
3613 | ||
3614 | vfio_map_bars(vdev); | |
3615 | ||
3616 | ret = vfio_add_capabilities(vdev); | |
3617 | if (ret) { | |
3618 | goto out_teardown; | |
3619 | } | |
3620 | ||
3621 | /* QEMU emulates all of MSI & MSIX */ | |
3622 | if (pdev->cap_present & QEMU_PCI_CAP_MSIX) { | |
3623 | memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff, | |
3624 | MSIX_CAP_LENGTH); | |
3625 | } | |
3626 | ||
3627 | if (pdev->cap_present & QEMU_PCI_CAP_MSI) { | |
3628 | memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff, | |
3629 | vdev->msi_cap_size); | |
3630 | } | |
3631 | ||
3632 | if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { | |
3633 | vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, | |
3634 | vfio_intx_mmap_enable, vdev); | |
3635 | pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq); | |
3636 | ret = vfio_enable_intx(vdev); | |
3637 | if (ret) { | |
3638 | goto out_teardown; | |
3639 | } | |
3640 | } | |
3641 | ||
3642 | vfio_register_err_notifier(vdev); | |
3643 | vfio_register_req_notifier(vdev); | |
3644 | vfio_setup_resetfn(vdev); | |
3645 | ||
3646 | return 0; | |
3647 | ||
3648 | out_teardown: | |
3649 | pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); | |
3650 | vfio_teardown_msi(vdev); | |
3651 | vfio_unregister_bars(vdev); | |
3652 | return ret; | |
3653 | } | |
3654 | ||
3655 | static void vfio_instance_finalize(Object *obj) | |
3656 | { | |
3657 | PCIDevice *pci_dev = PCI_DEVICE(obj); | |
3658 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev); | |
3659 | VFIOGroup *group = vdev->vbasedev.group; | |
3660 | ||
3661 | vfio_unmap_bars(vdev); | |
3662 | g_free(vdev->emulated_config_bits); | |
3663 | g_free(vdev->rom); | |
3664 | vfio_put_device(vdev); | |
3665 | vfio_put_group(group); | |
3666 | } | |
3667 | ||
3668 | static void vfio_exitfn(PCIDevice *pdev) | |
3669 | { | |
3670 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); | |
3671 | ||
3672 | vfio_unregister_req_notifier(vdev); | |
3673 | vfio_unregister_err_notifier(vdev); | |
3674 | pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); | |
3675 | vfio_disable_interrupts(vdev); | |
3676 | if (vdev->intx.mmap_timer) { | |
3677 | timer_free(vdev->intx.mmap_timer); | |
3678 | } | |
3679 | vfio_teardown_msi(vdev); | |
3680 | vfio_unregister_bars(vdev); | |
3681 | } | |
3682 | ||
3683 | static void vfio_pci_reset(DeviceState *dev) | |
3684 | { | |
3685 | PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev); | |
3686 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); | |
3687 | ||
3688 | trace_vfio_pci_reset(vdev->vbasedev.name); | |
3689 | ||
3690 | vfio_pci_pre_reset(vdev); | |
3691 | ||
3692 | if (vdev->resetfn && !vdev->resetfn(vdev)) { | |
3693 | goto post_reset; | |
3694 | } | |
3695 | ||
3696 | if (vdev->vbasedev.reset_works && | |
3697 | (vdev->has_flr || !vdev->has_pm_reset) && | |
3698 | !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { | |
3699 | trace_vfio_pci_reset_flr(vdev->vbasedev.name); | |
3700 | goto post_reset; | |
3701 | } | |
3702 | ||
3703 | /* See if we can do our own bus reset */ | |
3704 | if (!vfio_pci_hot_reset_one(vdev)) { | |
3705 | goto post_reset; | |
3706 | } | |
3707 | ||
3708 | /* If nothing else works and the device supports PM reset, use it */ | |
3709 | if (vdev->vbasedev.reset_works && vdev->has_pm_reset && | |
3710 | !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { | |
3711 | trace_vfio_pci_reset_pm(vdev->vbasedev.name); | |
3712 | goto post_reset; | |
3713 | } | |
3714 | ||
3715 | post_reset: | |
3716 | vfio_pci_post_reset(vdev); | |
3717 | } | |
3718 | ||
3719 | static void vfio_instance_init(Object *obj) | |
3720 | { | |
3721 | PCIDevice *pci_dev = PCI_DEVICE(obj); | |
3722 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj)); | |
3723 | ||
3724 | device_add_bootindex_property(obj, &vdev->bootindex, | |
3725 | "bootindex", NULL, | |
3726 | &pci_dev->qdev, NULL); | |
3727 | } | |
3728 | ||
3729 | static Property vfio_pci_dev_properties[] = { | |
3730 | DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host), | |
3731 | DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice, | |
3732 | intx.mmap_timeout, 1100), | |
3733 | DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features, | |
3734 | VFIO_FEATURE_ENABLE_VGA_BIT, false), | |
3735 | DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features, | |
3736 | VFIO_FEATURE_ENABLE_REQ_BIT, true), | |
3737 | DEFINE_PROP_BOOL("x-mmap", VFIOPCIDevice, vbasedev.allow_mmap, true), | |
3738 | /* | |
3739 | * TODO - support passed fds... is this necessary? | |
3740 | * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name), | |
3741 | * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name), | |
3742 | */ | |
3743 | DEFINE_PROP_END_OF_LIST(), | |
3744 | }; | |
3745 | ||
3746 | static const VMStateDescription vfio_pci_vmstate = { | |
3747 | .name = "vfio-pci", | |
3748 | .unmigratable = 1, | |
3749 | }; | |
3750 | ||
3751 | static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) | |
3752 | { | |
3753 | DeviceClass *dc = DEVICE_CLASS(klass); | |
3754 | PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); | |
3755 | ||
3756 | dc->reset = vfio_pci_reset; | |
3757 | dc->props = vfio_pci_dev_properties; | |
3758 | dc->vmsd = &vfio_pci_vmstate; | |
3759 | dc->desc = "VFIO-based PCI device assignment"; | |
3760 | set_bit(DEVICE_CATEGORY_MISC, dc->categories); | |
3761 | pdc->init = vfio_initfn; | |
3762 | pdc->exit = vfio_exitfn; | |
3763 | pdc->config_read = vfio_pci_read_config; | |
3764 | pdc->config_write = vfio_pci_write_config; | |
3765 | pdc->is_express = 1; /* We might be */ | |
3766 | } | |
3767 | ||
3768 | static const TypeInfo vfio_pci_dev_info = { | |
3769 | .name = "vfio-pci", | |
3770 | .parent = TYPE_PCI_DEVICE, | |
3771 | .instance_size = sizeof(VFIOPCIDevice), | |
3772 | .class_init = vfio_pci_dev_class_init, | |
3773 | .instance_init = vfio_instance_init, | |
3774 | .instance_finalize = vfio_instance_finalize, | |
3775 | }; | |
3776 | ||
3777 | static void register_vfio_pci_dev_type(void) | |
3778 | { | |
3779 | type_register_static(&vfio_pci_dev_info); | |
3780 | } | |
3781 | ||
3782 | type_init(register_vfio_pci_dev_type) |