]>
Commit | Line | Data |
---|---|---|
65501a74 AW |
1 | /* |
2 | * vfio based device assignment support | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Alex Williamson <alex.williamson@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Based on qemu-kvm device-assignment: | |
13 | * Adapted for KVM by Qumranet. | |
14 | * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) | |
15 | * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) | |
16 | * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) | |
17 | * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) | |
18 | * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) | |
19 | */ | |
20 | ||
21 | #include <dirent.h> | |
6dcfdbad | 22 | #include <linux/vfio.h> |
65501a74 AW |
23 | #include <sys/ioctl.h> |
24 | #include <sys/mman.h> | |
25 | #include <sys/stat.h> | |
26 | #include <sys/types.h> | |
6dcfdbad | 27 | #include <unistd.h> |
65501a74 AW |
28 | |
29 | #include "config.h" | |
022c62cb | 30 | #include "exec/address-spaces.h" |
022c62cb | 31 | #include "exec/memory.h" |
83c9f4ca PB |
32 | #include "hw/pci/msi.h" |
33 | #include "hw/pci/msix.h" | |
34 | #include "hw/pci/pci.h" | |
5c97e5eb | 35 | #include "qemu-common.h" |
1de7afc9 | 36 | #include "qemu/error-report.h" |
6dcfdbad | 37 | #include "qemu/event_notifier.h" |
1de7afc9 PB |
38 | #include "qemu/queue.h" |
39 | #include "qemu/range.h" | |
6dcfdbad AW |
40 | #include "sysemu/kvm.h" |
41 | #include "sysemu/sysemu.h" | |
65501a74 AW |
42 | |
43 | /* #define DEBUG_VFIO */ | |
44 | #ifdef DEBUG_VFIO | |
45 | #define DPRINTF(fmt, ...) \ | |
46 | do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0) | |
47 | #else | |
48 | #define DPRINTF(fmt, ...) \ | |
49 | do { } while (0) | |
50 | #endif | |
51 | ||
82ca8912 AW |
52 | /* Extra debugging, trap acceleration paths for more logging */ |
53 | #define VFIO_ALLOW_MMAP 1 | |
54 | #define VFIO_ALLOW_KVM_INTX 1 | |
55 | ||
7076eabc AW |
56 | struct VFIODevice; |
57 | ||
58 | typedef struct VFIOQuirk { | |
59 | MemoryRegion mem; | |
60 | struct VFIODevice *vdev; | |
61 | QLIST_ENTRY(VFIOQuirk) next; | |
39360f0b AW |
62 | struct { |
63 | uint32_t base_offset:TARGET_PAGE_BITS; | |
64 | uint32_t address_offset:TARGET_PAGE_BITS; | |
65 | uint32_t address_size:3; | |
66 | uint32_t bar:3; | |
67 | ||
68 | uint32_t address_match; | |
69 | uint32_t address_mask; | |
70 | ||
71 | uint32_t address_val:TARGET_PAGE_BITS; | |
72 | uint32_t data_offset:TARGET_PAGE_BITS; | |
73 | uint32_t data_size:3; | |
74 | ||
75 | uint8_t flags; | |
76 | uint8_t read_flags; | |
77 | uint8_t write_flags; | |
78 | } data; | |
7076eabc AW |
79 | } VFIOQuirk; |
80 | ||
5c97e5eb AW |
81 | typedef struct VFIOBAR { |
82 | off_t fd_offset; /* offset of BAR within device fd */ | |
83 | int fd; /* device fd, allows us to pass VFIOBAR as opaque data */ | |
84 | MemoryRegion mem; /* slow, read/write access */ | |
85 | MemoryRegion mmap_mem; /* direct mapped access */ | |
86 | void *mmap; | |
87 | size_t size; | |
88 | uint32_t flags; /* VFIO region flags (rd/wr/mmap) */ | |
89 | uint8_t nr; /* cache the BAR number for debug */ | |
39360f0b AW |
90 | bool ioport; |
91 | bool mem64; | |
7076eabc | 92 | QLIST_HEAD(, VFIOQuirk) quirks; |
5c97e5eb AW |
93 | } VFIOBAR; |
94 | ||
f15689c7 AW |
95 | typedef struct VFIOVGARegion { |
96 | MemoryRegion mem; | |
97 | off_t offset; | |
98 | int nr; | |
7076eabc | 99 | QLIST_HEAD(, VFIOQuirk) quirks; |
f15689c7 AW |
100 | } VFIOVGARegion; |
101 | ||
102 | typedef struct VFIOVGA { | |
103 | off_t fd_offset; | |
104 | int fd; | |
105 | VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS]; | |
106 | } VFIOVGA; | |
107 | ||
5c97e5eb AW |
108 | typedef struct VFIOINTx { |
109 | bool pending; /* interrupt pending */ | |
110 | bool kvm_accel; /* set when QEMU bypass through KVM enabled */ | |
111 | uint8_t pin; /* which pin to pull for qemu_set_irq */ | |
112 | EventNotifier interrupt; /* eventfd triggered on interrupt */ | |
113 | EventNotifier unmask; /* eventfd for unmask on QEMU bypass */ | |
114 | PCIINTxRoute route; /* routing info for QEMU bypass */ | |
115 | uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */ | |
116 | QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */ | |
117 | } VFIOINTx; | |
118 | ||
5c97e5eb AW |
119 | typedef struct VFIOMSIVector { |
120 | EventNotifier interrupt; /* eventfd triggered on interrupt */ | |
121 | struct VFIODevice *vdev; /* back pointer to device */ | |
c7679d45 | 122 | MSIMessage msg; /* cache the MSI message so we know when it changes */ |
5c97e5eb AW |
123 | int virq; /* KVM irqchip route for QEMU bypass */ |
124 | bool use; | |
125 | } VFIOMSIVector; | |
126 | ||
127 | enum { | |
128 | VFIO_INT_NONE = 0, | |
129 | VFIO_INT_INTx = 1, | |
130 | VFIO_INT_MSI = 2, | |
131 | VFIO_INT_MSIX = 3, | |
132 | }; | |
133 | ||
134 | struct VFIOGroup; | |
135 | ||
136 | typedef struct VFIOContainer { | |
137 | int fd; /* /dev/vfio/vfio, empowered by the attached groups */ | |
138 | struct { | |
139 | /* enable abstraction to support various iommu backends */ | |
140 | union { | |
141 | MemoryListener listener; /* Used by type1 iommu */ | |
142 | }; | |
143 | void (*release)(struct VFIOContainer *); | |
144 | } iommu_data; | |
145 | QLIST_HEAD(, VFIOGroup) group_list; | |
146 | QLIST_ENTRY(VFIOContainer) next; | |
147 | } VFIOContainer; | |
148 | ||
149 | /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */ | |
150 | typedef struct VFIOMSIXInfo { | |
151 | uint8_t table_bar; | |
152 | uint8_t pba_bar; | |
153 | uint16_t entries; | |
154 | uint32_t table_offset; | |
155 | uint32_t pba_offset; | |
156 | MemoryRegion mmap_mem; | |
157 | void *mmap; | |
158 | } VFIOMSIXInfo; | |
159 | ||
160 | typedef struct VFIODevice { | |
161 | PCIDevice pdev; | |
162 | int fd; | |
163 | VFIOINTx intx; | |
164 | unsigned int config_size; | |
4b5d5e87 | 165 | uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */ |
5c97e5eb AW |
166 | off_t config_offset; /* Offset of config space region within device fd */ |
167 | unsigned int rom_size; | |
168 | off_t rom_offset; /* Offset of ROM region within device fd */ | |
6f864e6e | 169 | void *rom; |
5c97e5eb AW |
170 | int msi_cap_size; |
171 | VFIOMSIVector *msi_vectors; | |
172 | VFIOMSIXInfo *msix; | |
173 | int nr_vectors; /* Number of MSI/MSIX vectors currently in use */ | |
174 | int interrupt; /* Current interrupt type */ | |
175 | VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */ | |
f15689c7 | 176 | VFIOVGA vga; /* 0xa0000, 0x3b0, 0x3c0 */ |
5c97e5eb AW |
177 | PCIHostDeviceAddress host; |
178 | QLIST_ENTRY(VFIODevice) next; | |
179 | struct VFIOGroup *group; | |
7b4b0e9e | 180 | EventNotifier err_notifier; |
f15689c7 AW |
181 | uint32_t features; |
182 | #define VFIO_FEATURE_ENABLE_VGA_BIT 0 | |
183 | #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT) | |
c29029dd | 184 | int32_t bootindex; |
ba661818 | 185 | uint8_t pm_cap; |
5c97e5eb | 186 | bool reset_works; |
f15689c7 | 187 | bool has_vga; |
7b4b0e9e | 188 | bool pci_aer; |
befe5176 AW |
189 | bool has_flr; |
190 | bool has_pm_reset; | |
f16f39c3 | 191 | bool needs_reset; |
5c97e5eb AW |
192 | } VFIODevice; |
193 | ||
194 | typedef struct VFIOGroup { | |
195 | int fd; | |
196 | int groupid; | |
197 | VFIOContainer *container; | |
198 | QLIST_HEAD(, VFIODevice) device_list; | |
199 | QLIST_ENTRY(VFIOGroup) next; | |
200 | QLIST_ENTRY(VFIOGroup) container_next; | |
201 | } VFIOGroup; | |
202 | ||
65501a74 AW |
203 | #define MSIX_CAP_LENGTH 12 |
204 | ||
205 | static QLIST_HEAD(, VFIOContainer) | |
206 | container_list = QLIST_HEAD_INITIALIZER(container_list); | |
207 | ||
208 | static QLIST_HEAD(, VFIOGroup) | |
209 | group_list = QLIST_HEAD_INITIALIZER(group_list); | |
210 | ||
211 | static void vfio_disable_interrupts(VFIODevice *vdev); | |
212 | static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len); | |
7076eabc AW |
213 | static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, |
214 | uint32_t val, int len); | |
65501a74 AW |
215 | static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled); |
216 | ||
217 | /* | |
218 | * Common VFIO interrupt disable | |
219 | */ | |
220 | static void vfio_disable_irqindex(VFIODevice *vdev, int index) | |
221 | { | |
222 | struct vfio_irq_set irq_set = { | |
223 | .argsz = sizeof(irq_set), | |
224 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, | |
225 | .index = index, | |
226 | .start = 0, | |
227 | .count = 0, | |
228 | }; | |
229 | ||
230 | ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
65501a74 AW |
231 | } |
232 | ||
233 | /* | |
234 | * INTx | |
235 | */ | |
236 | static void vfio_unmask_intx(VFIODevice *vdev) | |
237 | { | |
238 | struct vfio_irq_set irq_set = { | |
239 | .argsz = sizeof(irq_set), | |
240 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, | |
241 | .index = VFIO_PCI_INTX_IRQ_INDEX, | |
242 | .start = 0, | |
243 | .count = 1, | |
244 | }; | |
245 | ||
246 | ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
247 | } | |
248 | ||
e1d1e586 AW |
249 | #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */ |
250 | static void vfio_mask_intx(VFIODevice *vdev) | |
251 | { | |
252 | struct vfio_irq_set irq_set = { | |
253 | .argsz = sizeof(irq_set), | |
254 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, | |
255 | .index = VFIO_PCI_INTX_IRQ_INDEX, | |
256 | .start = 0, | |
257 | .count = 1, | |
258 | }; | |
259 | ||
260 | ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
261 | } | |
262 | #endif | |
263 | ||
ea486926 AW |
264 | /* |
265 | * Disabling BAR mmaping can be slow, but toggling it around INTx can | |
266 | * also be a huge overhead. We try to get the best of both worlds by | |
267 | * waiting until an interrupt to disable mmaps (subsequent transitions | |
268 | * to the same state are effectively no overhead). If the interrupt has | |
269 | * been serviced and the time gap is long enough, we re-enable mmaps for | |
270 | * performance. This works well for things like graphics cards, which | |
271 | * may not use their interrupt at all and are penalized to an unusable | |
272 | * level by read/write BAR traps. Other devices, like NICs, have more | |
273 | * regular interrupts and see much better latency by staying in non-mmap | |
274 | * mode. We therefore set the default mmap_timeout such that a ping | |
275 | * is just enough to keep the mmap disabled. Users can experiment with | |
276 | * other options with the x-intx-mmap-timeout-ms parameter (a value of | |
277 | * zero disables the timer). | |
278 | */ | |
279 | static void vfio_intx_mmap_enable(void *opaque) | |
280 | { | |
281 | VFIODevice *vdev = opaque; | |
282 | ||
283 | if (vdev->intx.pending) { | |
bc72ad67 AB |
284 | timer_mod(vdev->intx.mmap_timer, |
285 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); | |
ea486926 AW |
286 | return; |
287 | } | |
288 | ||
289 | vfio_mmap_set_enabled(vdev, true); | |
290 | } | |
291 | ||
65501a74 AW |
292 | static void vfio_intx_interrupt(void *opaque) |
293 | { | |
294 | VFIODevice *vdev = opaque; | |
295 | ||
296 | if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) { | |
297 | return; | |
298 | } | |
299 | ||
300 | DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__, vdev->host.domain, | |
301 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
302 | 'A' + vdev->intx.pin); | |
303 | ||
304 | vdev->intx.pending = true; | |
68919cac | 305 | pci_irq_assert(&vdev->pdev); |
ea486926 AW |
306 | vfio_mmap_set_enabled(vdev, false); |
307 | if (vdev->intx.mmap_timeout) { | |
bc72ad67 AB |
308 | timer_mod(vdev->intx.mmap_timer, |
309 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); | |
ea486926 | 310 | } |
65501a74 AW |
311 | } |
312 | ||
313 | static void vfio_eoi(VFIODevice *vdev) | |
314 | { | |
315 | if (!vdev->intx.pending) { | |
316 | return; | |
317 | } | |
318 | ||
319 | DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__, vdev->host.domain, | |
320 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
321 | ||
322 | vdev->intx.pending = false; | |
68919cac | 323 | pci_irq_deassert(&vdev->pdev); |
65501a74 AW |
324 | vfio_unmask_intx(vdev); |
325 | } | |
326 | ||
e1d1e586 AW |
327 | static void vfio_enable_intx_kvm(VFIODevice *vdev) |
328 | { | |
329 | #ifdef CONFIG_KVM | |
330 | struct kvm_irqfd irqfd = { | |
331 | .fd = event_notifier_get_fd(&vdev->intx.interrupt), | |
332 | .gsi = vdev->intx.route.irq, | |
333 | .flags = KVM_IRQFD_FLAG_RESAMPLE, | |
334 | }; | |
335 | struct vfio_irq_set *irq_set; | |
336 | int ret, argsz; | |
337 | int32_t *pfd; | |
338 | ||
82ca8912 | 339 | if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() || |
e1d1e586 AW |
340 | vdev->intx.route.mode != PCI_INTX_ENABLED || |
341 | !kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) { | |
342 | return; | |
343 | } | |
344 | ||
345 | /* Get to a known interrupt state */ | |
346 | qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev); | |
347 | vfio_mask_intx(vdev); | |
348 | vdev->intx.pending = false; | |
68919cac | 349 | pci_irq_deassert(&vdev->pdev); |
e1d1e586 AW |
350 | |
351 | /* Get an eventfd for resample/unmask */ | |
352 | if (event_notifier_init(&vdev->intx.unmask, 0)) { | |
312fd5f2 | 353 | error_report("vfio: Error: event_notifier_init failed eoi"); |
e1d1e586 AW |
354 | goto fail; |
355 | } | |
356 | ||
357 | /* KVM triggers it, VFIO listens for it */ | |
358 | irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask); | |
359 | ||
360 | if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) { | |
312fd5f2 | 361 | error_report("vfio: Error: Failed to setup resample irqfd: %m"); |
e1d1e586 AW |
362 | goto fail_irqfd; |
363 | } | |
364 | ||
365 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
366 | ||
367 | irq_set = g_malloc0(argsz); | |
368 | irq_set->argsz = argsz; | |
369 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; | |
370 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
371 | irq_set->start = 0; | |
372 | irq_set->count = 1; | |
373 | pfd = (int32_t *)&irq_set->data; | |
374 | ||
375 | *pfd = irqfd.resamplefd; | |
376 | ||
377 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
378 | g_free(irq_set); | |
379 | if (ret) { | |
312fd5f2 | 380 | error_report("vfio: Error: Failed to setup INTx unmask fd: %m"); |
e1d1e586 AW |
381 | goto fail_vfio; |
382 | } | |
383 | ||
384 | /* Let'em rip */ | |
385 | vfio_unmask_intx(vdev); | |
386 | ||
387 | vdev->intx.kvm_accel = true; | |
388 | ||
389 | DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n", | |
390 | __func__, vdev->host.domain, vdev->host.bus, | |
391 | vdev->host.slot, vdev->host.function); | |
392 | ||
393 | return; | |
394 | ||
395 | fail_vfio: | |
396 | irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN; | |
397 | kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd); | |
398 | fail_irqfd: | |
399 | event_notifier_cleanup(&vdev->intx.unmask); | |
400 | fail: | |
401 | qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev); | |
402 | vfio_unmask_intx(vdev); | |
403 | #endif | |
404 | } | |
405 | ||
406 | static void vfio_disable_intx_kvm(VFIODevice *vdev) | |
407 | { | |
408 | #ifdef CONFIG_KVM | |
409 | struct kvm_irqfd irqfd = { | |
410 | .fd = event_notifier_get_fd(&vdev->intx.interrupt), | |
411 | .gsi = vdev->intx.route.irq, | |
412 | .flags = KVM_IRQFD_FLAG_DEASSIGN, | |
413 | }; | |
414 | ||
415 | if (!vdev->intx.kvm_accel) { | |
416 | return; | |
417 | } | |
418 | ||
419 | /* | |
420 | * Get to a known state, hardware masked, QEMU ready to accept new | |
421 | * interrupts, QEMU IRQ de-asserted. | |
422 | */ | |
423 | vfio_mask_intx(vdev); | |
424 | vdev->intx.pending = false; | |
68919cac | 425 | pci_irq_deassert(&vdev->pdev); |
e1d1e586 AW |
426 | |
427 | /* Tell KVM to stop listening for an INTx irqfd */ | |
428 | if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) { | |
312fd5f2 | 429 | error_report("vfio: Error: Failed to disable INTx irqfd: %m"); |
e1d1e586 AW |
430 | } |
431 | ||
432 | /* We only need to close the eventfd for VFIO to cleanup the kernel side */ | |
433 | event_notifier_cleanup(&vdev->intx.unmask); | |
434 | ||
435 | /* QEMU starts listening for interrupt events. */ | |
436 | qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev); | |
437 | ||
438 | vdev->intx.kvm_accel = false; | |
439 | ||
440 | /* If we've missed an event, let it re-fire through QEMU */ | |
441 | vfio_unmask_intx(vdev); | |
442 | ||
443 | DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n", | |
444 | __func__, vdev->host.domain, vdev->host.bus, | |
445 | vdev->host.slot, vdev->host.function); | |
446 | #endif | |
447 | } | |
448 | ||
449 | static void vfio_update_irq(PCIDevice *pdev) | |
450 | { | |
451 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
452 | PCIINTxRoute route; | |
453 | ||
454 | if (vdev->interrupt != VFIO_INT_INTx) { | |
455 | return; | |
456 | } | |
457 | ||
458 | route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); | |
459 | ||
460 | if (!pci_intx_route_changed(&vdev->intx.route, &route)) { | |
461 | return; /* Nothing changed */ | |
462 | } | |
463 | ||
464 | DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__, | |
465 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
466 | vdev->host.function, vdev->intx.route.irq, route.irq); | |
467 | ||
468 | vfio_disable_intx_kvm(vdev); | |
469 | ||
470 | vdev->intx.route = route; | |
471 | ||
472 | if (route.mode != PCI_INTX_ENABLED) { | |
473 | return; | |
474 | } | |
475 | ||
476 | vfio_enable_intx_kvm(vdev); | |
477 | ||
478 | /* Re-enable the interrupt in cased we missed an EOI */ | |
479 | vfio_eoi(vdev); | |
480 | } | |
481 | ||
65501a74 AW |
482 | static int vfio_enable_intx(VFIODevice *vdev) |
483 | { | |
65501a74 | 484 | uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1); |
1a403133 AW |
485 | int ret, argsz; |
486 | struct vfio_irq_set *irq_set; | |
487 | int32_t *pfd; | |
65501a74 | 488 | |
ea486926 | 489 | if (!pin) { |
65501a74 AW |
490 | return 0; |
491 | } | |
492 | ||
493 | vfio_disable_interrupts(vdev); | |
494 | ||
495 | vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ | |
68919cac | 496 | pci_config_set_interrupt_pin(vdev->pdev.config, pin); |
e1d1e586 AW |
497 | |
498 | #ifdef CONFIG_KVM | |
499 | /* | |
500 | * Only conditional to avoid generating error messages on platforms | |
501 | * where we won't actually use the result anyway. | |
502 | */ | |
d281084d AW |
503 | if (kvm_irqfds_enabled() && |
504 | kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) { | |
e1d1e586 AW |
505 | vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, |
506 | vdev->intx.pin); | |
507 | } | |
508 | #endif | |
509 | ||
65501a74 AW |
510 | ret = event_notifier_init(&vdev->intx.interrupt, 0); |
511 | if (ret) { | |
312fd5f2 | 512 | error_report("vfio: Error: event_notifier_init failed"); |
65501a74 AW |
513 | return ret; |
514 | } | |
515 | ||
1a403133 AW |
516 | argsz = sizeof(*irq_set) + sizeof(*pfd); |
517 | ||
518 | irq_set = g_malloc0(argsz); | |
519 | irq_set->argsz = argsz; | |
520 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
521 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
522 | irq_set->start = 0; | |
523 | irq_set->count = 1; | |
524 | pfd = (int32_t *)&irq_set->data; | |
525 | ||
526 | *pfd = event_notifier_get_fd(&vdev->intx.interrupt); | |
527 | qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev); | |
65501a74 | 528 | |
1a403133 AW |
529 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
530 | g_free(irq_set); | |
531 | if (ret) { | |
312fd5f2 | 532 | error_report("vfio: Error: Failed to setup INTx fd: %m"); |
1a403133 | 533 | qemu_set_fd_handler(*pfd, NULL, NULL, vdev); |
ce59af2d | 534 | event_notifier_cleanup(&vdev->intx.interrupt); |
65501a74 AW |
535 | return -errno; |
536 | } | |
537 | ||
e1d1e586 AW |
538 | vfio_enable_intx_kvm(vdev); |
539 | ||
65501a74 AW |
540 | vdev->interrupt = VFIO_INT_INTx; |
541 | ||
542 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, | |
543 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
544 | ||
545 | return 0; | |
546 | } | |
547 | ||
548 | static void vfio_disable_intx(VFIODevice *vdev) | |
549 | { | |
550 | int fd; | |
551 | ||
bc72ad67 | 552 | timer_del(vdev->intx.mmap_timer); |
e1d1e586 | 553 | vfio_disable_intx_kvm(vdev); |
65501a74 AW |
554 | vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX); |
555 | vdev->intx.pending = false; | |
68919cac | 556 | pci_irq_deassert(&vdev->pdev); |
65501a74 AW |
557 | vfio_mmap_set_enabled(vdev, true); |
558 | ||
559 | fd = event_notifier_get_fd(&vdev->intx.interrupt); | |
560 | qemu_set_fd_handler(fd, NULL, NULL, vdev); | |
561 | event_notifier_cleanup(&vdev->intx.interrupt); | |
562 | ||
563 | vdev->interrupt = VFIO_INT_NONE; | |
564 | ||
565 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, | |
566 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
567 | } | |
568 | ||
569 | /* | |
570 | * MSI/X | |
571 | */ | |
572 | static void vfio_msi_interrupt(void *opaque) | |
573 | { | |
574 | VFIOMSIVector *vector = opaque; | |
575 | VFIODevice *vdev = vector->vdev; | |
576 | int nr = vector - vdev->msi_vectors; | |
577 | ||
578 | if (!event_notifier_test_and_clear(&vector->interrupt)) { | |
579 | return; | |
580 | } | |
581 | ||
582 | DPRINTF("%s(%04x:%02x:%02x.%x) vector %d\n", __func__, | |
583 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
584 | vdev->host.function, nr); | |
585 | ||
586 | if (vdev->interrupt == VFIO_INT_MSIX) { | |
587 | msix_notify(&vdev->pdev, nr); | |
588 | } else if (vdev->interrupt == VFIO_INT_MSI) { | |
589 | msi_notify(&vdev->pdev, nr); | |
590 | } else { | |
312fd5f2 | 591 | error_report("vfio: MSI interrupt receieved, but not enabled?"); |
65501a74 AW |
592 | } |
593 | } | |
594 | ||
595 | static int vfio_enable_vectors(VFIODevice *vdev, bool msix) | |
596 | { | |
597 | struct vfio_irq_set *irq_set; | |
598 | int ret = 0, i, argsz; | |
599 | int32_t *fds; | |
600 | ||
601 | argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); | |
602 | ||
603 | irq_set = g_malloc0(argsz); | |
604 | irq_set->argsz = argsz; | |
605 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
606 | irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; | |
607 | irq_set->start = 0; | |
608 | irq_set->count = vdev->nr_vectors; | |
609 | fds = (int32_t *)&irq_set->data; | |
610 | ||
611 | for (i = 0; i < vdev->nr_vectors; i++) { | |
612 | if (!vdev->msi_vectors[i].use) { | |
613 | fds[i] = -1; | |
614 | continue; | |
615 | } | |
616 | ||
617 | fds[i] = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); | |
618 | } | |
619 | ||
620 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
621 | ||
622 | g_free(irq_set); | |
623 | ||
65501a74 AW |
624 | return ret; |
625 | } | |
626 | ||
b0223e29 AW |
627 | static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, |
628 | MSIMessage *msg, IOHandler *handler) | |
65501a74 AW |
629 | { |
630 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
631 | VFIOMSIVector *vector; | |
632 | int ret; | |
633 | ||
634 | DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__, | |
635 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
636 | vdev->host.function, nr); | |
637 | ||
65501a74 AW |
638 | vector = &vdev->msi_vectors[nr]; |
639 | vector->vdev = vdev; | |
640 | vector->use = true; | |
641 | ||
642 | msix_vector_use(pdev, nr); | |
643 | ||
644 | if (event_notifier_init(&vector->interrupt, 0)) { | |
312fd5f2 | 645 | error_report("vfio: Error: event_notifier_init failed"); |
65501a74 AW |
646 | } |
647 | ||
648 | /* | |
649 | * Attempt to enable route through KVM irqchip, | |
650 | * default to userspace handling if unavailable. | |
651 | */ | |
b0223e29 | 652 | vector->virq = msg ? kvm_irqchip_add_msi_route(kvm_state, *msg) : -1; |
65501a74 AW |
653 | if (vector->virq < 0 || |
654 | kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt, | |
ca916d37 | 655 | NULL, vector->virq) < 0) { |
65501a74 AW |
656 | if (vector->virq >= 0) { |
657 | kvm_irqchip_release_virq(kvm_state, vector->virq); | |
658 | vector->virq = -1; | |
659 | } | |
660 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), | |
b0223e29 | 661 | handler, NULL, vector); |
65501a74 AW |
662 | } |
663 | ||
664 | /* | |
665 | * We don't want to have the host allocate all possible MSI vectors | |
666 | * for a device if they're not in use, so we shutdown and incrementally | |
667 | * increase them as needed. | |
668 | */ | |
669 | if (vdev->nr_vectors < nr + 1) { | |
65501a74 AW |
670 | vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); |
671 | vdev->nr_vectors = nr + 1; | |
672 | ret = vfio_enable_vectors(vdev, true); | |
673 | if (ret) { | |
312fd5f2 | 674 | error_report("vfio: failed to enable vectors, %d", ret); |
65501a74 | 675 | } |
65501a74 | 676 | } else { |
1a403133 AW |
677 | int argsz; |
678 | struct vfio_irq_set *irq_set; | |
679 | int32_t *pfd; | |
680 | ||
681 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
682 | ||
683 | irq_set = g_malloc0(argsz); | |
684 | irq_set->argsz = argsz; | |
685 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
686 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
687 | irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; | |
688 | irq_set->start = nr; | |
689 | irq_set->count = 1; | |
690 | pfd = (int32_t *)&irq_set->data; | |
691 | ||
692 | *pfd = event_notifier_get_fd(&vector->interrupt); | |
693 | ||
694 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
695 | g_free(irq_set); | |
65501a74 | 696 | if (ret) { |
312fd5f2 | 697 | error_report("vfio: failed to modify vector, %d", ret); |
65501a74 | 698 | } |
65501a74 AW |
699 | } |
700 | ||
701 | return 0; | |
702 | } | |
703 | ||
b0223e29 AW |
704 | static int vfio_msix_vector_use(PCIDevice *pdev, |
705 | unsigned int nr, MSIMessage msg) | |
706 | { | |
707 | return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt); | |
708 | } | |
709 | ||
65501a74 AW |
710 | static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) |
711 | { | |
712 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
713 | VFIOMSIVector *vector = &vdev->msi_vectors[nr]; | |
1a403133 AW |
714 | int argsz; |
715 | struct vfio_irq_set *irq_set; | |
716 | int32_t *pfd; | |
65501a74 AW |
717 | |
718 | DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__, | |
719 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
720 | vdev->host.function, nr); | |
721 | ||
722 | /* | |
723 | * XXX What's the right thing to do here? This turns off the interrupt | |
724 | * completely, but do we really just want to switch the interrupt to | |
725 | * bouncing through userspace and let msix.c drop it? Not sure. | |
726 | */ | |
727 | msix_vector_unuse(pdev, nr); | |
1a403133 AW |
728 | |
729 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
730 | ||
731 | irq_set = g_malloc0(argsz); | |
732 | irq_set->argsz = argsz; | |
733 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
734 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
735 | irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; | |
736 | irq_set->start = nr; | |
737 | irq_set->count = 1; | |
738 | pfd = (int32_t *)&irq_set->data; | |
739 | ||
740 | *pfd = -1; | |
741 | ||
742 | ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
743 | ||
744 | g_free(irq_set); | |
65501a74 AW |
745 | |
746 | if (vector->virq < 0) { | |
747 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), | |
748 | NULL, NULL, NULL); | |
749 | } else { | |
750 | kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt, | |
751 | vector->virq); | |
752 | kvm_irqchip_release_virq(kvm_state, vector->virq); | |
753 | vector->virq = -1; | |
754 | } | |
755 | ||
756 | event_notifier_cleanup(&vector->interrupt); | |
757 | vector->use = false; | |
758 | } | |
759 | ||
fd704adc AW |
760 | static void vfio_enable_msix(VFIODevice *vdev) |
761 | { | |
762 | vfio_disable_interrupts(vdev); | |
763 | ||
764 | vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector)); | |
765 | ||
766 | vdev->interrupt = VFIO_INT_MSIX; | |
767 | ||
b0223e29 AW |
768 | /* |
769 | * Some communication channels between VF & PF or PF & fw rely on the | |
770 | * physical state of the device and expect that enabling MSI-X from the | |
771 | * guest enables the same on the host. When our guest is Linux, the | |
772 | * guest driver call to pci_enable_msix() sets the enabling bit in the | |
773 | * MSI-X capability, but leaves the vector table masked. We therefore | |
774 | * can't rely on a vector_use callback (from request_irq() in the guest) | |
775 | * to switch the physical device into MSI-X mode because that may come a | |
776 | * long time after pci_enable_msix(). This code enables vector 0 with | |
777 | * triggering to userspace, then immediately release the vector, leaving | |
778 | * the physical device with no vectors enabled, but MSI-X enabled, just | |
779 | * like the guest view. | |
780 | */ | |
781 | vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL); | |
782 | vfio_msix_vector_release(&vdev->pdev, 0); | |
783 | ||
fd704adc | 784 | if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, |
bbef882c | 785 | vfio_msix_vector_release, NULL)) { |
312fd5f2 | 786 | error_report("vfio: msix_set_vector_notifiers failed"); |
fd704adc AW |
787 | } |
788 | ||
789 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, | |
790 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
791 | } | |
792 | ||
65501a74 AW |
793 | static void vfio_enable_msi(VFIODevice *vdev) |
794 | { | |
795 | int ret, i; | |
796 | ||
797 | vfio_disable_interrupts(vdev); | |
798 | ||
799 | vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); | |
800 | retry: | |
801 | vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector)); | |
802 | ||
803 | for (i = 0; i < vdev->nr_vectors; i++) { | |
65501a74 AW |
804 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
805 | ||
806 | vector->vdev = vdev; | |
807 | vector->use = true; | |
808 | ||
809 | if (event_notifier_init(&vector->interrupt, 0)) { | |
312fd5f2 | 810 | error_report("vfio: Error: event_notifier_init failed"); |
65501a74 AW |
811 | } |
812 | ||
c7679d45 | 813 | vector->msg = msi_get_message(&vdev->pdev, i); |
65501a74 AW |
814 | |
815 | /* | |
816 | * Attempt to enable route through KVM irqchip, | |
817 | * default to userspace handling if unavailable. | |
818 | */ | |
c7679d45 | 819 | vector->virq = kvm_irqchip_add_msi_route(kvm_state, vector->msg); |
65501a74 AW |
820 | if (vector->virq < 0 || |
821 | kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt, | |
ca916d37 | 822 | NULL, vector->virq) < 0) { |
65501a74 AW |
823 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
824 | vfio_msi_interrupt, NULL, vector); | |
825 | } | |
826 | } | |
827 | ||
828 | ret = vfio_enable_vectors(vdev, false); | |
829 | if (ret) { | |
830 | if (ret < 0) { | |
312fd5f2 | 831 | error_report("vfio: Error: Failed to setup MSI fds: %m"); |
65501a74 AW |
832 | } else if (ret != vdev->nr_vectors) { |
833 | error_report("vfio: Error: Failed to enable %d " | |
312fd5f2 | 834 | "MSI vectors, retry with %d", vdev->nr_vectors, ret); |
65501a74 AW |
835 | } |
836 | ||
837 | for (i = 0; i < vdev->nr_vectors; i++) { | |
838 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
839 | if (vector->virq >= 0) { | |
840 | kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt, | |
841 | vector->virq); | |
842 | kvm_irqchip_release_virq(kvm_state, vector->virq); | |
843 | vector->virq = -1; | |
844 | } else { | |
845 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), | |
846 | NULL, NULL, NULL); | |
847 | } | |
848 | event_notifier_cleanup(&vector->interrupt); | |
849 | } | |
850 | ||
851 | g_free(vdev->msi_vectors); | |
852 | ||
853 | if (ret > 0 && ret != vdev->nr_vectors) { | |
854 | vdev->nr_vectors = ret; | |
855 | goto retry; | |
856 | } | |
857 | vdev->nr_vectors = 0; | |
858 | ||
859 | return; | |
860 | } | |
861 | ||
fd704adc AW |
862 | vdev->interrupt = VFIO_INT_MSI; |
863 | ||
65501a74 AW |
864 | DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__, |
865 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
866 | vdev->host.function, vdev->nr_vectors); | |
867 | } | |
868 | ||
fd704adc AW |
869 | static void vfio_disable_msi_common(VFIODevice *vdev) |
870 | { | |
871 | g_free(vdev->msi_vectors); | |
872 | vdev->msi_vectors = NULL; | |
873 | vdev->nr_vectors = 0; | |
874 | vdev->interrupt = VFIO_INT_NONE; | |
875 | ||
876 | vfio_enable_intx(vdev); | |
877 | } | |
878 | ||
879 | static void vfio_disable_msix(VFIODevice *vdev) | |
880 | { | |
881 | msix_unset_vector_notifiers(&vdev->pdev); | |
882 | ||
883 | if (vdev->nr_vectors) { | |
884 | vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); | |
885 | } | |
886 | ||
887 | vfio_disable_msi_common(vdev); | |
888 | ||
a011b10e AW |
889 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, |
890 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
fd704adc AW |
891 | } |
892 | ||
893 | static void vfio_disable_msi(VFIODevice *vdev) | |
65501a74 AW |
894 | { |
895 | int i; | |
896 | ||
fd704adc | 897 | vfio_disable_irqindex(vdev, VFIO_PCI_MSI_IRQ_INDEX); |
65501a74 AW |
898 | |
899 | for (i = 0; i < vdev->nr_vectors; i++) { | |
900 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
901 | ||
902 | if (!vector->use) { | |
903 | continue; | |
904 | } | |
905 | ||
906 | if (vector->virq >= 0) { | |
907 | kvm_irqchip_remove_irqfd_notifier(kvm_state, | |
908 | &vector->interrupt, vector->virq); | |
909 | kvm_irqchip_release_virq(kvm_state, vector->virq); | |
910 | vector->virq = -1; | |
911 | } else { | |
912 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), | |
913 | NULL, NULL, NULL); | |
914 | } | |
915 | ||
65501a74 AW |
916 | event_notifier_cleanup(&vector->interrupt); |
917 | } | |
918 | ||
fd704adc | 919 | vfio_disable_msi_common(vdev); |
65501a74 | 920 | |
fd704adc AW |
921 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, |
922 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
65501a74 AW |
923 | } |
924 | ||
c7679d45 AW |
925 | static void vfio_update_msi(VFIODevice *vdev) |
926 | { | |
927 | int i; | |
928 | ||
929 | for (i = 0; i < vdev->nr_vectors; i++) { | |
930 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
931 | MSIMessage msg; | |
932 | ||
933 | if (!vector->use || vector->virq < 0) { | |
934 | continue; | |
935 | } | |
936 | ||
937 | msg = msi_get_message(&vdev->pdev, i); | |
938 | ||
939 | if (msg.address != vector->msg.address || | |
940 | msg.data != vector->msg.data) { | |
941 | ||
942 | DPRINTF("%s(%04x:%02x:%02x.%x) MSI vector %d changed\n", | |
943 | __func__, vdev->host.domain, vdev->host.bus, | |
944 | vdev->host.slot, vdev->host.function, i); | |
945 | ||
946 | kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg); | |
947 | vector->msg = msg; | |
948 | } | |
949 | } | |
950 | } | |
951 | ||
65501a74 AW |
952 | /* |
953 | * IO Port/MMIO - Beware of the endians, VFIO is always little endian | |
954 | */ | |
a8170e5e | 955 | static void vfio_bar_write(void *opaque, hwaddr addr, |
65501a74 AW |
956 | uint64_t data, unsigned size) |
957 | { | |
958 | VFIOBAR *bar = opaque; | |
959 | union { | |
960 | uint8_t byte; | |
961 | uint16_t word; | |
962 | uint32_t dword; | |
963 | uint64_t qword; | |
964 | } buf; | |
965 | ||
966 | switch (size) { | |
967 | case 1: | |
968 | buf.byte = data; | |
969 | break; | |
970 | case 2: | |
971 | buf.word = cpu_to_le16(data); | |
972 | break; | |
973 | case 4: | |
974 | buf.dword = cpu_to_le32(data); | |
975 | break; | |
976 | default: | |
977 | hw_error("vfio: unsupported write size, %d bytes\n", size); | |
978 | break; | |
979 | } | |
980 | ||
981 | if (pwrite(bar->fd, &buf, size, bar->fd_offset + addr) != size) { | |
312fd5f2 | 982 | error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", |
65501a74 AW |
983 | __func__, addr, data, size); |
984 | } | |
985 | ||
82ca8912 AW |
986 | #ifdef DEBUG_VFIO |
987 | { | |
988 | VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]); | |
989 | ||
990 | DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%"PRIx64 | |
991 | ", %d)\n", __func__, vdev->host.domain, vdev->host.bus, | |
992 | vdev->host.slot, vdev->host.function, bar->nr, addr, | |
993 | data, size); | |
994 | } | |
995 | #endif | |
65501a74 AW |
996 | |
997 | /* | |
998 | * A read or write to a BAR always signals an INTx EOI. This will | |
999 | * do nothing if not pending (including not in INTx mode). We assume | |
1000 | * that a BAR access is in response to an interrupt and that BAR | |
1001 | * accesses will service the interrupt. Unfortunately, we don't know | |
1002 | * which access will service the interrupt, so we're potentially | |
1003 | * getting quite a few host interrupts per guest interrupt. | |
1004 | */ | |
3a4f2816 | 1005 | vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); |
65501a74 AW |
1006 | } |
1007 | ||
1008 | static uint64_t vfio_bar_read(void *opaque, | |
a8170e5e | 1009 | hwaddr addr, unsigned size) |
65501a74 AW |
1010 | { |
1011 | VFIOBAR *bar = opaque; | |
1012 | union { | |
1013 | uint8_t byte; | |
1014 | uint16_t word; | |
1015 | uint32_t dword; | |
1016 | uint64_t qword; | |
1017 | } buf; | |
1018 | uint64_t data = 0; | |
1019 | ||
1020 | if (pread(bar->fd, &buf, size, bar->fd_offset + addr) != size) { | |
312fd5f2 | 1021 | error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", |
65501a74 AW |
1022 | __func__, addr, size); |
1023 | return (uint64_t)-1; | |
1024 | } | |
1025 | ||
1026 | switch (size) { | |
1027 | case 1: | |
1028 | data = buf.byte; | |
1029 | break; | |
1030 | case 2: | |
1031 | data = le16_to_cpu(buf.word); | |
1032 | break; | |
1033 | case 4: | |
1034 | data = le32_to_cpu(buf.dword); | |
1035 | break; | |
1036 | default: | |
1037 | hw_error("vfio: unsupported read size, %d bytes\n", size); | |
1038 | break; | |
1039 | } | |
1040 | ||
82ca8912 AW |
1041 | #ifdef DEBUG_VFIO |
1042 | { | |
1043 | VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]); | |
1044 | ||
1045 | DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx | |
1046 | ", %d) = 0x%"PRIx64"\n", __func__, vdev->host.domain, | |
1047 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
1048 | bar->nr, addr, size, data); | |
1049 | } | |
1050 | #endif | |
65501a74 AW |
1051 | |
1052 | /* Same as write above */ | |
3a4f2816 | 1053 | vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); |
65501a74 AW |
1054 | |
1055 | return data; | |
1056 | } | |
1057 | ||
1058 | static const MemoryRegionOps vfio_bar_ops = { | |
1059 | .read = vfio_bar_read, | |
1060 | .write = vfio_bar_write, | |
1061 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1062 | }; | |
1063 | ||
6f864e6e AW |
1064 | static void vfio_pci_load_rom(VFIODevice *vdev) |
1065 | { | |
1066 | struct vfio_region_info reg_info = { | |
1067 | .argsz = sizeof(reg_info), | |
1068 | .index = VFIO_PCI_ROM_REGION_INDEX | |
1069 | }; | |
1070 | uint64_t size; | |
1071 | off_t off = 0; | |
1072 | size_t bytes; | |
1073 | ||
1074 | if (ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { | |
1075 | error_report("vfio: Error getting ROM info: %m"); | |
1076 | return; | |
1077 | } | |
1078 | ||
1079 | DPRINTF("Device %04x:%02x:%02x.%x ROM:\n", vdev->host.domain, | |
1080 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
1081 | DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n", | |
1082 | (unsigned long)reg_info.size, (unsigned long)reg_info.offset, | |
1083 | (unsigned long)reg_info.flags); | |
1084 | ||
1085 | vdev->rom_size = size = reg_info.size; | |
1086 | vdev->rom_offset = reg_info.offset; | |
1087 | ||
1088 | if (!vdev->rom_size) { | |
1089 | return; | |
1090 | } | |
1091 | ||
1092 | vdev->rom = g_malloc(size); | |
1093 | memset(vdev->rom, 0xff, size); | |
1094 | ||
1095 | while (size) { | |
1096 | bytes = pread(vdev->fd, vdev->rom + off, size, vdev->rom_offset + off); | |
1097 | if (bytes == 0) { | |
1098 | break; | |
1099 | } else if (bytes > 0) { | |
1100 | off += bytes; | |
1101 | size -= bytes; | |
1102 | } else { | |
1103 | if (errno == EINTR || errno == EAGAIN) { | |
1104 | continue; | |
1105 | } | |
1106 | error_report("vfio: Error reading device ROM: %m"); | |
1107 | break; | |
1108 | } | |
1109 | } | |
1110 | } | |
1111 | ||
1112 | static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size) | |
1113 | { | |
1114 | VFIODevice *vdev = opaque; | |
1115 | uint64_t val = ((uint64_t)1 << (size * 8)) - 1; | |
1116 | ||
1117 | /* Load the ROM lazily when the guest tries to read it */ | |
1118 | if (unlikely(!vdev->rom)) { | |
1119 | vfio_pci_load_rom(vdev); | |
1120 | } | |
1121 | ||
1122 | memcpy(&val, vdev->rom + addr, | |
1123 | (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0); | |
1124 | ||
1125 | DPRINTF("%s(%04x:%02x:%02x.%x, 0x%"HWADDR_PRIx", 0x%x) = 0x%"PRIx64"\n", | |
1126 | __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1127 | vdev->host.function, addr, size, val); | |
1128 | ||
1129 | return val; | |
1130 | } | |
1131 | ||
64fa25a0 AW |
1132 | static void vfio_rom_write(void *opaque, hwaddr addr, |
1133 | uint64_t data, unsigned size) | |
1134 | { | |
1135 | } | |
1136 | ||
6f864e6e AW |
1137 | static const MemoryRegionOps vfio_rom_ops = { |
1138 | .read = vfio_rom_read, | |
64fa25a0 | 1139 | .write = vfio_rom_write, |
6f864e6e AW |
1140 | .endianness = DEVICE_LITTLE_ENDIAN, |
1141 | }; | |
1142 | ||
1143 | static void vfio_pci_size_rom(VFIODevice *vdev) | |
1144 | { | |
b1c50c5f | 1145 | uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK); |
6f864e6e AW |
1146 | off_t offset = vdev->config_offset + PCI_ROM_ADDRESS; |
1147 | char name[32]; | |
1148 | ||
1149 | if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { | |
1150 | return; | |
1151 | } | |
1152 | ||
1153 | /* | |
1154 | * Use the same size ROM BAR as the physical device. The contents | |
1155 | * will get filled in later when the guest tries to read it. | |
1156 | */ | |
1157 | if (pread(vdev->fd, &orig, 4, offset) != 4 || | |
1158 | pwrite(vdev->fd, &size, 4, offset) != 4 || | |
1159 | pread(vdev->fd, &size, 4, offset) != 4 || | |
1160 | pwrite(vdev->fd, &orig, 4, offset) != 4) { | |
1161 | error_report("%s(%04x:%02x:%02x.%x) failed: %m", | |
1162 | __func__, vdev->host.domain, vdev->host.bus, | |
1163 | vdev->host.slot, vdev->host.function); | |
1164 | return; | |
1165 | } | |
1166 | ||
b1c50c5f | 1167 | size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1; |
6f864e6e AW |
1168 | |
1169 | if (!size) { | |
1170 | return; | |
1171 | } | |
1172 | ||
1173 | DPRINTF("%04x:%02x:%02x.%x ROM size 0x%x\n", vdev->host.domain, | |
1174 | vdev->host.bus, vdev->host.slot, vdev->host.function, size); | |
1175 | ||
1176 | snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom", | |
1177 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1178 | vdev->host.function); | |
1179 | ||
1180 | memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev), | |
1181 | &vfio_rom_ops, vdev, name, size); | |
1182 | ||
1183 | pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, | |
1184 | PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom); | |
1185 | ||
1186 | vdev->pdev.has_rom = true; | |
1187 | } | |
1188 | ||
f15689c7 AW |
1189 | static void vfio_vga_write(void *opaque, hwaddr addr, |
1190 | uint64_t data, unsigned size) | |
1191 | { | |
1192 | VFIOVGARegion *region = opaque; | |
1193 | VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); | |
1194 | union { | |
1195 | uint8_t byte; | |
1196 | uint16_t word; | |
1197 | uint32_t dword; | |
1198 | uint64_t qword; | |
1199 | } buf; | |
1200 | off_t offset = vga->fd_offset + region->offset + addr; | |
1201 | ||
1202 | switch (size) { | |
1203 | case 1: | |
1204 | buf.byte = data; | |
1205 | break; | |
1206 | case 2: | |
1207 | buf.word = cpu_to_le16(data); | |
1208 | break; | |
1209 | case 4: | |
1210 | buf.dword = cpu_to_le32(data); | |
1211 | break; | |
1212 | default: | |
1213 | hw_error("vfio: unsupported write size, %d bytes\n", size); | |
1214 | break; | |
1215 | } | |
1216 | ||
1217 | if (pwrite(vga->fd, &buf, size, offset) != size) { | |
1218 | error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", | |
1219 | __func__, region->offset + addr, data, size); | |
1220 | } | |
1221 | ||
1222 | DPRINTF("%s(0x%"HWADDR_PRIx", 0x%"PRIx64", %d)\n", | |
1223 | __func__, region->offset + addr, data, size); | |
1224 | } | |
1225 | ||
1226 | static uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size) | |
1227 | { | |
1228 | VFIOVGARegion *region = opaque; | |
1229 | VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); | |
1230 | union { | |
1231 | uint8_t byte; | |
1232 | uint16_t word; | |
1233 | uint32_t dword; | |
1234 | uint64_t qword; | |
1235 | } buf; | |
1236 | uint64_t data = 0; | |
1237 | off_t offset = vga->fd_offset + region->offset + addr; | |
1238 | ||
1239 | if (pread(vga->fd, &buf, size, offset) != size) { | |
1240 | error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", | |
1241 | __func__, region->offset + addr, size); | |
1242 | return (uint64_t)-1; | |
1243 | } | |
1244 | ||
1245 | switch (size) { | |
1246 | case 1: | |
1247 | data = buf.byte; | |
1248 | break; | |
1249 | case 2: | |
1250 | data = le16_to_cpu(buf.word); | |
1251 | break; | |
1252 | case 4: | |
1253 | data = le32_to_cpu(buf.dword); | |
1254 | break; | |
1255 | default: | |
1256 | hw_error("vfio: unsupported read size, %d bytes\n", size); | |
1257 | break; | |
1258 | } | |
1259 | ||
1260 | DPRINTF("%s(0x%"HWADDR_PRIx", %d) = 0x%"PRIx64"\n", | |
1261 | __func__, region->offset + addr, size, data); | |
1262 | ||
1263 | return data; | |
1264 | } | |
1265 | ||
1266 | static const MemoryRegionOps vfio_vga_ops = { | |
1267 | .read = vfio_vga_read, | |
1268 | .write = vfio_vga_write, | |
1269 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1270 | }; | |
1271 | ||
7076eabc AW |
1272 | /* |
1273 | * Device specific quirks | |
1274 | */ | |
1275 | ||
39360f0b AW |
1276 | /* Is range1 fully contained within range2? */ |
1277 | static bool vfio_range_contained(uint64_t first1, uint64_t len1, | |
1278 | uint64_t first2, uint64_t len2) { | |
1279 | return (first1 >= first2 && first1 + len1 <= first2 + len2); | |
1280 | } | |
7076eabc | 1281 | |
39360f0b AW |
1282 | static bool vfio_flags_enabled(uint8_t flags, uint8_t mask) |
1283 | { | |
1284 | return (mask && (flags & mask) == mask); | |
1285 | } | |
1286 | ||
1287 | static uint64_t vfio_generic_window_quirk_read(void *opaque, | |
1288 | hwaddr addr, unsigned size) | |
7076eabc AW |
1289 | { |
1290 | VFIOQuirk *quirk = opaque; | |
1291 | VFIODevice *vdev = quirk->vdev; | |
39360f0b | 1292 | uint64_t data; |
7076eabc | 1293 | |
39360f0b AW |
1294 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) && |
1295 | ranges_overlap(addr, size, | |
1296 | quirk->data.data_offset, quirk->data.data_size)) { | |
1297 | hwaddr offset = addr - quirk->data.data_offset; | |
1298 | ||
1299 | if (!vfio_range_contained(addr, size, quirk->data.data_offset, | |
1300 | quirk->data.data_size)) { | |
1301 | hw_error("%s: window data read not fully contained: %s\n", | |
1302 | __func__, memory_region_name(&quirk->mem)); | |
1303 | } | |
1304 | ||
1305 | data = vfio_pci_read_config(&vdev->pdev, | |
1306 | quirk->data.address_val + offset, size); | |
1307 | ||
1308 | DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", %d) = 0x%" | |
1309 | PRIx64"\n", memory_region_name(&quirk->mem), vdev->host.domain, | |
1310 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
1311 | quirk->data.bar, addr, size, data); | |
1312 | } else { | |
1313 | data = vfio_bar_read(&vdev->bars[quirk->data.bar], | |
1314 | addr + quirk->data.base_offset, size); | |
7076eabc AW |
1315 | } |
1316 | ||
1317 | return data; | |
1318 | } | |
1319 | ||
39360f0b AW |
1320 | static void vfio_generic_window_quirk_write(void *opaque, hwaddr addr, |
1321 | uint64_t data, unsigned size) | |
7076eabc | 1322 | { |
39360f0b AW |
1323 | VFIOQuirk *quirk = opaque; |
1324 | VFIODevice *vdev = quirk->vdev; | |
7076eabc | 1325 | |
39360f0b AW |
1326 | if (ranges_overlap(addr, size, |
1327 | quirk->data.address_offset, quirk->data.address_size)) { | |
7076eabc | 1328 | |
39360f0b AW |
1329 | if (addr != quirk->data.address_offset) { |
1330 | hw_error("%s: offset write into address window: %s\n", | |
1331 | __func__, memory_region_name(&quirk->mem)); | |
1332 | } | |
1333 | ||
1334 | if ((data & ~quirk->data.address_mask) == quirk->data.address_match) { | |
1335 | quirk->data.flags |= quirk->data.write_flags | | |
1336 | quirk->data.read_flags; | |
1337 | quirk->data.address_val = data & quirk->data.address_mask; | |
1338 | } else { | |
1339 | quirk->data.flags &= ~(quirk->data.write_flags | | |
1340 | quirk->data.read_flags); | |
1341 | } | |
7076eabc AW |
1342 | } |
1343 | ||
39360f0b AW |
1344 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) && |
1345 | ranges_overlap(addr, size, | |
1346 | quirk->data.data_offset, quirk->data.data_size)) { | |
1347 | hwaddr offset = addr - quirk->data.data_offset; | |
7076eabc | 1348 | |
39360f0b AW |
1349 | if (!vfio_range_contained(addr, size, quirk->data.data_offset, |
1350 | quirk->data.data_size)) { | |
1351 | hw_error("%s: window data write not fully contained: %s\n", | |
1352 | __func__, memory_region_name(&quirk->mem)); | |
1353 | } | |
7076eabc | 1354 | |
39360f0b AW |
1355 | vfio_pci_write_config(&vdev->pdev, |
1356 | quirk->data.address_val + offset, data, size); | |
1357 | DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%" | |
1358 | PRIx64", %d)\n", memory_region_name(&quirk->mem), | |
1359 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1360 | vdev->host.function, quirk->data.bar, addr, data, size); | |
1361 | return; | |
1362 | } | |
7076eabc | 1363 | |
39360f0b AW |
1364 | vfio_bar_write(&vdev->bars[quirk->data.bar], |
1365 | addr + quirk->data.base_offset, data, size); | |
7076eabc AW |
1366 | } |
1367 | ||
39360f0b AW |
1368 | static const MemoryRegionOps vfio_generic_window_quirk = { |
1369 | .read = vfio_generic_window_quirk_read, | |
1370 | .write = vfio_generic_window_quirk_write, | |
1371 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1372 | }; | |
1373 | ||
1374 | static uint64_t vfio_generic_quirk_read(void *opaque, | |
1375 | hwaddr addr, unsigned size) | |
7076eabc AW |
1376 | { |
1377 | VFIOQuirk *quirk = opaque; | |
1378 | VFIODevice *vdev = quirk->vdev; | |
39360f0b AW |
1379 | hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; |
1380 | hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK; | |
1381 | uint64_t data; | |
7076eabc | 1382 | |
39360f0b AW |
1383 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) && |
1384 | ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) { | |
1385 | if (!vfio_range_contained(addr, size, offset, | |
1386 | quirk->data.address_mask + 1)) { | |
1387 | hw_error("%s: read not fully contained: %s\n", | |
1388 | __func__, memory_region_name(&quirk->mem)); | |
1389 | } | |
7076eabc | 1390 | |
39360f0b AW |
1391 | data = vfio_pci_read_config(&vdev->pdev, addr - offset, size); |
1392 | ||
1393 | DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", %d) = 0x%" | |
1394 | PRIx64"\n", memory_region_name(&quirk->mem), vdev->host.domain, | |
1395 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
1396 | quirk->data.bar, addr + base, size, data); | |
1397 | } else { | |
1398 | data = vfio_bar_read(&vdev->bars[quirk->data.bar], addr + base, size); | |
1399 | } | |
7076eabc AW |
1400 | |
1401 | return data; | |
1402 | } | |
1403 | ||
39360f0b AW |
1404 | static void vfio_generic_quirk_write(void *opaque, hwaddr addr, |
1405 | uint64_t data, unsigned size) | |
7076eabc AW |
1406 | { |
1407 | VFIOQuirk *quirk = opaque; | |
1408 | VFIODevice *vdev = quirk->vdev; | |
39360f0b AW |
1409 | hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; |
1410 | hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK; | |
1411 | ||
1412 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) && | |
1413 | ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) { | |
1414 | if (!vfio_range_contained(addr, size, offset, | |
1415 | quirk->data.address_mask + 1)) { | |
1416 | hw_error("%s: write not fully contained: %s\n", | |
1417 | __func__, memory_region_name(&quirk->mem)); | |
1418 | } | |
7076eabc | 1419 | |
39360f0b | 1420 | vfio_pci_write_config(&vdev->pdev, addr - offset, data, size); |
7076eabc | 1421 | |
39360f0b AW |
1422 | DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%" |
1423 | PRIx64", %d)\n", memory_region_name(&quirk->mem), | |
1424 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1425 | vdev->host.function, quirk->data.bar, addr + base, data, size); | |
1426 | } else { | |
1427 | vfio_bar_write(&vdev->bars[quirk->data.bar], addr + base, data, size); | |
1428 | } | |
7076eabc AW |
1429 | } |
1430 | ||
39360f0b AW |
1431 | static const MemoryRegionOps vfio_generic_quirk = { |
1432 | .read = vfio_generic_quirk_read, | |
1433 | .write = vfio_generic_quirk_write, | |
7076eabc AW |
1434 | .endianness = DEVICE_LITTLE_ENDIAN, |
1435 | }; | |
1436 | ||
39360f0b AW |
1437 | #define PCI_VENDOR_ID_ATI 0x1002 |
1438 | ||
1439 | /* | |
1440 | * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR | |
1441 | * through VGA register 0x3c3. On newer cards, the I/O port BAR is always | |
1442 | * BAR4 (older cards like the X550 used BAR1, but we don't care to support | |
1443 | * those). Note that on bare metal, a read of 0x3c3 doesn't always return the | |
1444 | * I/O port BAR address. Originally this was coded to return the virtual BAR | |
1445 | * address only if the physical register read returns the actual BAR address, | |
1446 | * but users have reported greater success if we return the virtual address | |
1447 | * unconditionally. | |
1448 | */ | |
1449 | static uint64_t vfio_ati_3c3_quirk_read(void *opaque, | |
1450 | hwaddr addr, unsigned size) | |
1451 | { | |
1452 | VFIOQuirk *quirk = opaque; | |
1453 | VFIODevice *vdev = quirk->vdev; | |
1454 | uint64_t data = vfio_pci_read_config(&vdev->pdev, | |
1455 | PCI_BASE_ADDRESS_0 + (4 * 4) + 1, | |
1456 | size); | |
1457 | DPRINTF("%s(0x3c3, 1) = 0x%"PRIx64"\n", __func__, data); | |
1458 | ||
1459 | return data; | |
1460 | } | |
1461 | ||
1462 | static const MemoryRegionOps vfio_ati_3c3_quirk = { | |
1463 | .read = vfio_ati_3c3_quirk_read, | |
1464 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1465 | }; | |
1466 | ||
1467 | static void vfio_vga_probe_ati_3c3_quirk(VFIODevice *vdev) | |
7076eabc AW |
1468 | { |
1469 | PCIDevice *pdev = &vdev->pdev; | |
7076eabc AW |
1470 | VFIOQuirk *quirk; |
1471 | ||
39360f0b | 1472 | if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { |
7076eabc AW |
1473 | return; |
1474 | } | |
1475 | ||
39360f0b AW |
1476 | /* |
1477 | * As long as the BAR is >= 256 bytes it will be aligned such that the | |
1478 | * lower byte is always zero. Filter out anything else, if it exists. | |
1479 | */ | |
1480 | if (!vdev->bars[4].ioport || vdev->bars[4].size < 256) { | |
7076eabc AW |
1481 | return; |
1482 | } | |
1483 | ||
1484 | quirk = g_malloc0(sizeof(*quirk)); | |
1485 | quirk->vdev = vdev; | |
1486 | ||
39360f0b AW |
1487 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, quirk, |
1488 | "vfio-ati-3c3-quirk", 1); | |
1489 | memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, | |
1490 | 3 /* offset 3 bytes from 0x3c0 */, &quirk->mem); | |
7076eabc | 1491 | |
39360f0b AW |
1492 | QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, |
1493 | quirk, next); | |
7076eabc | 1494 | |
39360f0b | 1495 | DPRINTF("Enabled ATI/AMD quirk 0x3c3 BAR4for device %04x:%02x:%02x.%x\n", |
7076eabc AW |
1496 | vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1497 | vdev->host.function); | |
1498 | } | |
1499 | ||
1500 | /* | |
39360f0b AW |
1501 | * Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI |
1502 | * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access | |
1503 | * the MMIO space directly, but a window to this space is provided through | |
1504 | * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the | |
1505 | * data register. When the address is programmed to a range of 0x4000-0x4fff | |
1506 | * PCI configuration space is available. Experimentation seems to indicate | |
1507 | * that only read-only access is provided, but we drop writes when the window | |
1508 | * is enabled to config space nonetheless. | |
7076eabc | 1509 | */ |
39360f0b | 1510 | static void vfio_probe_ati_bar4_window_quirk(VFIODevice *vdev, int nr) |
7076eabc | 1511 | { |
7076eabc | 1512 | PCIDevice *pdev = &vdev->pdev; |
39360f0b | 1513 | VFIOQuirk *quirk; |
7076eabc | 1514 | |
39360f0b AW |
1515 | if (!vdev->has_vga || nr != 4 || |
1516 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { | |
1517 | return; | |
7076eabc AW |
1518 | } |
1519 | ||
39360f0b AW |
1520 | quirk = g_malloc0(sizeof(*quirk)); |
1521 | quirk->vdev = vdev; | |
1522 | quirk->data.address_size = 4; | |
1523 | quirk->data.data_offset = 4; | |
1524 | quirk->data.data_size = 4; | |
1525 | quirk->data.address_match = 0x4000; | |
1526 | quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1; | |
1527 | quirk->data.bar = nr; | |
1528 | quirk->data.read_flags = quirk->data.write_flags = 1; | |
1529 | ||
1530 | memory_region_init_io(&quirk->mem, OBJECT(vdev), | |
1531 | &vfio_generic_window_quirk, quirk, | |
1532 | "vfio-ati-bar4-window-quirk", 8); | |
1533 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, | |
1534 | quirk->data.base_offset, &quirk->mem, 1); | |
7076eabc | 1535 | |
39360f0b | 1536 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
7076eabc | 1537 | |
39360f0b AW |
1538 | DPRINTF("Enabled ATI/AMD BAR4 window quirk for device %04x:%02x:%02x.%x\n", |
1539 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1540 | vdev->host.function); | |
7076eabc AW |
1541 | } |
1542 | ||
39360f0b AW |
1543 | /* |
1544 | * Trap the BAR2 MMIO window to config space as well. | |
1545 | */ | |
1546 | static void vfio_probe_ati_bar2_4000_quirk(VFIODevice *vdev, int nr) | |
7076eabc AW |
1547 | { |
1548 | PCIDevice *pdev = &vdev->pdev; | |
7076eabc AW |
1549 | VFIOQuirk *quirk; |
1550 | ||
39360f0b AW |
1551 | /* Only enable on newer devices where BAR2 is 64bit */ |
1552 | if (!vdev->has_vga || nr != 2 || !vdev->bars[2].mem64 || | |
7076eabc AW |
1553 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { |
1554 | return; | |
1555 | } | |
1556 | ||
7076eabc AW |
1557 | quirk = g_malloc0(sizeof(*quirk)); |
1558 | quirk->vdev = vdev; | |
39360f0b AW |
1559 | quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1; |
1560 | quirk->data.address_match = 0x4000; | |
1561 | quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1; | |
1562 | quirk->data.bar = nr; | |
1563 | ||
1564 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk, | |
1565 | "vfio-ati-bar2-4000-quirk", | |
1566 | TARGET_PAGE_ALIGN(quirk->data.address_mask + 1)); | |
1567 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, | |
1568 | quirk->data.address_match & TARGET_PAGE_MASK, | |
1569 | &quirk->mem, 1); | |
7076eabc AW |
1570 | |
1571 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1572 | ||
39360f0b | 1573 | DPRINTF("Enabled ATI/AMD BAR2 0x4000 quirk for device %04x:%02x:%02x.%x\n", |
7076eabc AW |
1574 | vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1575 | vdev->host.function); | |
1576 | } | |
1577 | ||
39360f0b AW |
1578 | /* |
1579 | * Older ATI/AMD cards like the X550 have a similar window to that above. | |
1580 | * I/O port BAR1 provides a window to a mirror of PCI config space located | |
1581 | * in BAR2 at offset 0xf00. We don't care to support such older cards, but | |
1582 | * note it for future reference. | |
1583 | */ | |
1584 | ||
7076eabc AW |
1585 | #define PCI_VENDOR_ID_NVIDIA 0x10de |
1586 | ||
1587 | /* | |
1588 | * Nvidia has several different methods to get to config space, the | |
1589 | * nouveu project has several of these documented here: | |
1590 | * https://github.com/pathscale/envytools/tree/master/hwdocs | |
1591 | * | |
1592 | * The first quirk is actually not documented in envytools and is found | |
1593 | * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an | |
1594 | * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access | |
1595 | * the mirror of PCI config space found at BAR0 offset 0x1800. The access | |
1596 | * sequence first writes 0x338 to I/O port 0x3d4. The target offset is | |
1597 | * then written to 0x3d0. Finally 0x538 is written for a read and 0x738 | |
1598 | * is written for a write to 0x3d4. The BAR0 offset is then accessible | |
1599 | * through 0x3d0. This quirk doesn't seem to be necessary on newer cards | |
1600 | * that use the I/O port BAR5 window but it doesn't hurt to leave it. | |
1601 | */ | |
1602 | enum { | |
39360f0b | 1603 | NV_3D0_NONE = 0, |
7076eabc AW |
1604 | NV_3D0_SELECT, |
1605 | NV_3D0_WINDOW, | |
1606 | NV_3D0_READ, | |
1607 | NV_3D0_WRITE, | |
1608 | }; | |
1609 | ||
1610 | static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque, | |
1611 | hwaddr addr, unsigned size) | |
1612 | { | |
1613 | VFIOQuirk *quirk = opaque; | |
1614 | VFIODevice *vdev = quirk->vdev; | |
1615 | PCIDevice *pdev = &vdev->pdev; | |
1616 | uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], | |
39360f0b | 1617 | addr + quirk->data.base_offset, size); |
7076eabc | 1618 | |
39360f0b AW |
1619 | if (quirk->data.flags == NV_3D0_READ && addr == quirk->data.data_offset) { |
1620 | data = vfio_pci_read_config(pdev, quirk->data.address_val, size); | |
7076eabc AW |
1621 | DPRINTF("%s(0x3d0, %d) = 0x%"PRIx64"\n", __func__, size, data); |
1622 | } | |
1623 | ||
39360f0b | 1624 | quirk->data.flags = NV_3D0_NONE; |
7076eabc AW |
1625 | |
1626 | return data; | |
1627 | } | |
1628 | ||
1629 | static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr, | |
1630 | uint64_t data, unsigned size) | |
1631 | { | |
1632 | VFIOQuirk *quirk = opaque; | |
1633 | VFIODevice *vdev = quirk->vdev; | |
1634 | PCIDevice *pdev = &vdev->pdev; | |
1635 | ||
39360f0b | 1636 | switch (quirk->data.flags) { |
7076eabc | 1637 | case NV_3D0_NONE: |
39360f0b AW |
1638 | if (addr == quirk->data.address_offset && data == 0x338) { |
1639 | quirk->data.flags = NV_3D0_SELECT; | |
7076eabc AW |
1640 | } |
1641 | break; | |
1642 | case NV_3D0_SELECT: | |
39360f0b AW |
1643 | quirk->data.flags = NV_3D0_NONE; |
1644 | if (addr == quirk->data.data_offset && | |
1645 | (data & ~quirk->data.address_mask) == quirk->data.address_match) { | |
1646 | quirk->data.flags = NV_3D0_WINDOW; | |
1647 | quirk->data.address_val = data & quirk->data.address_mask; | |
7076eabc AW |
1648 | } |
1649 | break; | |
1650 | case NV_3D0_WINDOW: | |
39360f0b AW |
1651 | quirk->data.flags = NV_3D0_NONE; |
1652 | if (addr == quirk->data.address_offset) { | |
7076eabc | 1653 | if (data == 0x538) { |
39360f0b | 1654 | quirk->data.flags = NV_3D0_READ; |
7076eabc | 1655 | } else if (data == 0x738) { |
39360f0b | 1656 | quirk->data.flags = NV_3D0_WRITE; |
7076eabc AW |
1657 | } |
1658 | } | |
1659 | break; | |
1660 | case NV_3D0_WRITE: | |
39360f0b AW |
1661 | quirk->data.flags = NV_3D0_NONE; |
1662 | if (addr == quirk->data.data_offset) { | |
1663 | vfio_pci_write_config(pdev, quirk->data.address_val, data, size); | |
7076eabc AW |
1664 | DPRINTF("%s(0x3d0, 0x%"PRIx64", %d)\n", __func__, data, size); |
1665 | return; | |
1666 | } | |
1667 | break; | |
7076eabc AW |
1668 | } |
1669 | ||
1670 | vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], | |
39360f0b | 1671 | addr + quirk->data.base_offset, data, size); |
7076eabc AW |
1672 | } |
1673 | ||
1674 | static const MemoryRegionOps vfio_nvidia_3d0_quirk = { | |
1675 | .read = vfio_nvidia_3d0_quirk_read, | |
1676 | .write = vfio_nvidia_3d0_quirk_write, | |
1677 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1678 | }; | |
1679 | ||
1680 | static void vfio_vga_probe_nvidia_3d0_quirk(VFIODevice *vdev) | |
1681 | { | |
1682 | PCIDevice *pdev = &vdev->pdev; | |
1683 | VFIOQuirk *quirk; | |
1684 | ||
1685 | if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA || | |
1686 | !vdev->bars[1].size) { | |
1687 | return; | |
1688 | } | |
1689 | ||
1690 | quirk = g_malloc0(sizeof(*quirk)); | |
1691 | quirk->vdev = vdev; | |
39360f0b AW |
1692 | quirk->data.base_offset = 0x10; |
1693 | quirk->data.address_offset = 4; | |
1694 | quirk->data.address_size = 2; | |
1695 | quirk->data.address_match = 0x1800; | |
1696 | quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1; | |
1697 | quirk->data.data_offset = 0; | |
1698 | quirk->data.data_size = 4; | |
1699 | ||
1700 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_3d0_quirk, | |
1701 | quirk, "vfio-nvidia-3d0-quirk", 6); | |
7076eabc | 1702 | memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, |
39360f0b | 1703 | quirk->data.base_offset, &quirk->mem); |
7076eabc AW |
1704 | |
1705 | QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, | |
1706 | quirk, next); | |
1707 | ||
1708 | DPRINTF("Enabled NVIDIA VGA 0x3d0 quirk for device %04x:%02x:%02x.%x\n", | |
1709 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1710 | vdev->host.function); | |
1711 | } | |
1712 | ||
1713 | /* | |
1714 | * The second quirk is documented in envytools. The I/O port BAR5 is just | |
1715 | * a set of address/data ports to the MMIO BARs. The BAR we care about is | |
1716 | * again BAR0. This backdoor is apparently a bit newer than the one above | |
1717 | * so we need to not only trap 256 bytes @0x1800, but all of PCI config | |
1718 | * space, including extended space is available at the 4k @0x88000. | |
1719 | */ | |
1720 | enum { | |
1721 | NV_BAR5_ADDRESS = 0x1, | |
1722 | NV_BAR5_ENABLE = 0x2, | |
1723 | NV_BAR5_MASTER = 0x4, | |
1724 | NV_BAR5_VALID = 0x7, | |
1725 | }; | |
1726 | ||
7076eabc AW |
1727 | static void vfio_nvidia_bar5_window_quirk_write(void *opaque, hwaddr addr, |
1728 | uint64_t data, unsigned size) | |
1729 | { | |
1730 | VFIOQuirk *quirk = opaque; | |
7076eabc | 1731 | |
7076eabc AW |
1732 | switch (addr) { |
1733 | case 0x0: | |
1734 | if (data & 0x1) { | |
39360f0b | 1735 | quirk->data.flags |= NV_BAR5_MASTER; |
7076eabc | 1736 | } else { |
39360f0b | 1737 | quirk->data.flags &= ~NV_BAR5_MASTER; |
7076eabc AW |
1738 | } |
1739 | break; | |
1740 | case 0x4: | |
1741 | if (data & 0x1) { | |
39360f0b | 1742 | quirk->data.flags |= NV_BAR5_ENABLE; |
7076eabc | 1743 | } else { |
39360f0b | 1744 | quirk->data.flags &= ~NV_BAR5_ENABLE; |
7076eabc AW |
1745 | } |
1746 | break; | |
1747 | case 0x8: | |
39360f0b | 1748 | if (quirk->data.flags & NV_BAR5_MASTER) { |
7076eabc | 1749 | if ((data & ~0xfff) == 0x88000) { |
39360f0b AW |
1750 | quirk->data.flags |= NV_BAR5_ADDRESS; |
1751 | quirk->data.address_val = data & 0xfff; | |
7076eabc | 1752 | } else if ((data & ~0xff) == 0x1800) { |
39360f0b AW |
1753 | quirk->data.flags |= NV_BAR5_ADDRESS; |
1754 | quirk->data.address_val = data & 0xff; | |
7076eabc | 1755 | } else { |
39360f0b | 1756 | quirk->data.flags &= ~NV_BAR5_ADDRESS; |
7076eabc AW |
1757 | } |
1758 | } | |
1759 | break; | |
7076eabc AW |
1760 | } |
1761 | ||
39360f0b | 1762 | vfio_generic_window_quirk_write(opaque, addr, data, size); |
7076eabc AW |
1763 | } |
1764 | ||
1765 | static const MemoryRegionOps vfio_nvidia_bar5_window_quirk = { | |
39360f0b | 1766 | .read = vfio_generic_window_quirk_read, |
7076eabc AW |
1767 | .write = vfio_nvidia_bar5_window_quirk_write, |
1768 | .valid.min_access_size = 4, | |
1769 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1770 | }; | |
1771 | ||
1772 | static void vfio_probe_nvidia_bar5_window_quirk(VFIODevice *vdev, int nr) | |
1773 | { | |
1774 | PCIDevice *pdev = &vdev->pdev; | |
1775 | VFIOQuirk *quirk; | |
1776 | ||
1777 | if (!vdev->has_vga || nr != 5 || | |
1778 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { | |
1779 | return; | |
1780 | } | |
1781 | ||
1782 | quirk = g_malloc0(sizeof(*quirk)); | |
1783 | quirk->vdev = vdev; | |
39360f0b AW |
1784 | quirk->data.read_flags = quirk->data.write_flags = NV_BAR5_VALID; |
1785 | quirk->data.address_offset = 0x8; | |
1786 | quirk->data.address_size = 0; /* actually 4, but avoids generic code */ | |
1787 | quirk->data.data_offset = 0xc; | |
1788 | quirk->data.data_size = 4; | |
1789 | quirk->data.bar = nr; | |
1790 | ||
1791 | memory_region_init_io(&quirk->mem, OBJECT(vdev), | |
1792 | &vfio_nvidia_bar5_window_quirk, quirk, | |
7076eabc AW |
1793 | "vfio-nvidia-bar5-window-quirk", 16); |
1794 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, 0, &quirk->mem, 1); | |
1795 | ||
1796 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1797 | ||
1798 | DPRINTF("Enabled NVIDIA BAR5 window quirk for device %04x:%02x:%02x.%x\n", | |
1799 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1800 | vdev->host.function); | |
1801 | } | |
1802 | ||
1803 | /* | |
1804 | * Finally, BAR0 itself. We want to redirect any accesses to either | |
1805 | * 0x1800 or 0x88000 through the PCI config space access functions. | |
1806 | * | |
1807 | * NB - quirk at a page granularity or else they don't seem to work when | |
1808 | * BARs are mmap'd | |
1809 | * | |
1810 | * Here's offset 0x88000... | |
1811 | */ | |
7076eabc AW |
1812 | static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice *vdev, int nr) |
1813 | { | |
1814 | PCIDevice *pdev = &vdev->pdev; | |
1815 | VFIOQuirk *quirk; | |
1816 | ||
1817 | if (!vdev->has_vga || nr != 0 || | |
1818 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { | |
1819 | return; | |
1820 | } | |
1821 | ||
1822 | quirk = g_malloc0(sizeof(*quirk)); | |
1823 | quirk->vdev = vdev; | |
39360f0b AW |
1824 | quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1; |
1825 | quirk->data.address_match = 0x88000; | |
1826 | quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1; | |
1827 | quirk->data.bar = nr; | |
1828 | ||
1829 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, | |
1830 | quirk, "vfio-nvidia-bar0-88000-quirk", | |
1831 | TARGET_PAGE_ALIGN(quirk->data.address_mask + 1)); | |
7076eabc | 1832 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, |
39360f0b AW |
1833 | quirk->data.address_match & TARGET_PAGE_MASK, |
1834 | &quirk->mem, 1); | |
7076eabc AW |
1835 | |
1836 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1837 | ||
1838 | DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n", | |
1839 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1840 | vdev->host.function); | |
1841 | } | |
1842 | ||
1843 | /* | |
1844 | * And here's the same for BAR0 offset 0x1800... | |
1845 | */ | |
7076eabc AW |
1846 | static void vfio_probe_nvidia_bar0_1800_quirk(VFIODevice *vdev, int nr) |
1847 | { | |
1848 | PCIDevice *pdev = &vdev->pdev; | |
1849 | VFIOQuirk *quirk; | |
1850 | ||
1851 | if (!vdev->has_vga || nr != 0 || | |
1852 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { | |
1853 | return; | |
1854 | } | |
1855 | ||
1856 | /* Log the chipset ID */ | |
1857 | DPRINTF("Nvidia NV%02x\n", | |
1858 | (unsigned int)(vfio_bar_read(&vdev->bars[0], 0, 4) >> 20) & 0xff); | |
1859 | ||
1860 | quirk = g_malloc0(sizeof(*quirk)); | |
1861 | quirk->vdev = vdev; | |
39360f0b AW |
1862 | quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1; |
1863 | quirk->data.address_match = 0x1800; | |
1864 | quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1; | |
1865 | quirk->data.bar = nr; | |
7076eabc | 1866 | |
39360f0b | 1867 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk, |
7076eabc | 1868 | "vfio-nvidia-bar0-1800-quirk", |
39360f0b | 1869 | TARGET_PAGE_ALIGN(quirk->data.address_mask + 1)); |
7076eabc | 1870 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, |
39360f0b AW |
1871 | quirk->data.address_match & TARGET_PAGE_MASK, |
1872 | &quirk->mem, 1); | |
7076eabc AW |
1873 | |
1874 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1875 | ||
1876 | DPRINTF("Enabled NVIDIA BAR0 0x1800 quirk for device %04x:%02x:%02x.%x\n", | |
1877 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1878 | vdev->host.function); | |
1879 | } | |
1880 | ||
1881 | /* | |
1882 | * TODO - Some Nvidia devices provide config access to their companion HDA | |
1883 | * device and even to their parent bridge via these config space mirrors. | |
1884 | * Add quirks for those regions. | |
1885 | */ | |
1886 | ||
1887 | /* | |
1888 | * Common quirk probe entry points. | |
1889 | */ | |
1890 | static void vfio_vga_quirk_setup(VFIODevice *vdev) | |
1891 | { | |
1892 | vfio_vga_probe_ati_3c3_quirk(vdev); | |
1893 | vfio_vga_probe_nvidia_3d0_quirk(vdev); | |
1894 | } | |
1895 | ||
1896 | static void vfio_vga_quirk_teardown(VFIODevice *vdev) | |
1897 | { | |
1898 | int i; | |
1899 | ||
1900 | for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) { | |
1901 | while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) { | |
1902 | VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks); | |
1903 | memory_region_del_subregion(&vdev->vga.region[i].mem, &quirk->mem); | |
1904 | QLIST_REMOVE(quirk, next); | |
1905 | g_free(quirk); | |
1906 | } | |
1907 | } | |
1908 | } | |
1909 | ||
1910 | static void vfio_bar_quirk_setup(VFIODevice *vdev, int nr) | |
1911 | { | |
39360f0b AW |
1912 | vfio_probe_ati_bar4_window_quirk(vdev, nr); |
1913 | vfio_probe_ati_bar2_4000_quirk(vdev, nr); | |
7076eabc AW |
1914 | vfio_probe_nvidia_bar5_window_quirk(vdev, nr); |
1915 | vfio_probe_nvidia_bar0_88000_quirk(vdev, nr); | |
1916 | vfio_probe_nvidia_bar0_1800_quirk(vdev, nr); | |
1917 | } | |
1918 | ||
1919 | static void vfio_bar_quirk_teardown(VFIODevice *vdev, int nr) | |
1920 | { | |
1921 | VFIOBAR *bar = &vdev->bars[nr]; | |
1922 | ||
1923 | while (!QLIST_EMPTY(&bar->quirks)) { | |
1924 | VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks); | |
1925 | memory_region_del_subregion(&bar->mem, &quirk->mem); | |
1926 | QLIST_REMOVE(quirk, next); | |
1927 | g_free(quirk); | |
1928 | } | |
1929 | } | |
1930 | ||
65501a74 AW |
1931 | /* |
1932 | * PCI config space | |
1933 | */ | |
1934 | static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) | |
1935 | { | |
1936 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
4b5d5e87 | 1937 | uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val; |
65501a74 | 1938 | |
4b5d5e87 AW |
1939 | memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); |
1940 | emu_bits = le32_to_cpu(emu_bits); | |
65501a74 | 1941 | |
4b5d5e87 AW |
1942 | if (emu_bits) { |
1943 | emu_val = pci_default_read_config(pdev, addr, len); | |
1944 | } | |
1945 | ||
1946 | if (~emu_bits & (0xffffffffU >> (32 - len * 8))) { | |
1947 | ssize_t ret; | |
1948 | ||
1949 | ret = pread(vdev->fd, &phys_val, len, vdev->config_offset + addr); | |
1950 | if (ret != len) { | |
312fd5f2 | 1951 | error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m", |
65501a74 AW |
1952 | __func__, vdev->host.domain, vdev->host.bus, |
1953 | vdev->host.slot, vdev->host.function, addr, len); | |
1954 | return -errno; | |
1955 | } | |
4b5d5e87 | 1956 | phys_val = le32_to_cpu(phys_val); |
65501a74 AW |
1957 | } |
1958 | ||
4b5d5e87 | 1959 | val = (emu_val & emu_bits) | (phys_val & ~emu_bits); |
65501a74 AW |
1960 | |
1961 | DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__, | |
1962 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1963 | vdev->host.function, addr, len, val); | |
1964 | ||
1965 | return val; | |
1966 | } | |
1967 | ||
1968 | static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, | |
1969 | uint32_t val, int len) | |
1970 | { | |
1971 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
1972 | uint32_t val_le = cpu_to_le32(val); | |
1973 | ||
1974 | DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__, | |
1975 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1976 | vdev->host.function, addr, val, len); | |
1977 | ||
1978 | /* Write everything to VFIO, let it filter out what we can't write */ | |
1979 | if (pwrite(vdev->fd, &val_le, len, vdev->config_offset + addr) != len) { | |
312fd5f2 | 1980 | error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m", |
65501a74 AW |
1981 | __func__, vdev->host.domain, vdev->host.bus, |
1982 | vdev->host.slot, vdev->host.function, addr, val, len); | |
1983 | } | |
1984 | ||
65501a74 AW |
1985 | /* MSI/MSI-X Enabling/Disabling */ |
1986 | if (pdev->cap_present & QEMU_PCI_CAP_MSI && | |
1987 | ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) { | |
1988 | int is_enabled, was_enabled = msi_enabled(pdev); | |
1989 | ||
1990 | pci_default_write_config(pdev, addr, val, len); | |
1991 | ||
1992 | is_enabled = msi_enabled(pdev); | |
1993 | ||
c7679d45 AW |
1994 | if (!was_enabled) { |
1995 | if (is_enabled) { | |
1996 | vfio_enable_msi(vdev); | |
1997 | } | |
1998 | } else { | |
1999 | if (!is_enabled) { | |
2000 | vfio_disable_msi(vdev); | |
2001 | } else { | |
2002 | vfio_update_msi(vdev); | |
2003 | } | |
65501a74 | 2004 | } |
4b5d5e87 | 2005 | } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX && |
65501a74 AW |
2006 | ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) { |
2007 | int is_enabled, was_enabled = msix_enabled(pdev); | |
2008 | ||
2009 | pci_default_write_config(pdev, addr, val, len); | |
2010 | ||
2011 | is_enabled = msix_enabled(pdev); | |
2012 | ||
2013 | if (!was_enabled && is_enabled) { | |
fd704adc | 2014 | vfio_enable_msix(vdev); |
65501a74 | 2015 | } else if (was_enabled && !is_enabled) { |
fd704adc | 2016 | vfio_disable_msix(vdev); |
65501a74 | 2017 | } |
4b5d5e87 AW |
2018 | } else { |
2019 | /* Write everything to QEMU to keep emulated bits correct */ | |
2020 | pci_default_write_config(pdev, addr, val, len); | |
65501a74 AW |
2021 | } |
2022 | } | |
2023 | ||
2024 | /* | |
2025 | * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 | |
2026 | */ | |
af6bc27e | 2027 | static int vfio_dma_unmap(VFIOContainer *container, |
a8170e5e | 2028 | hwaddr iova, ram_addr_t size) |
af6bc27e AW |
2029 | { |
2030 | struct vfio_iommu_type1_dma_unmap unmap = { | |
2031 | .argsz = sizeof(unmap), | |
2032 | .flags = 0, | |
2033 | .iova = iova, | |
2034 | .size = size, | |
2035 | }; | |
2036 | ||
2037 | if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { | |
2038 | DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno); | |
2039 | return -errno; | |
2040 | } | |
2041 | ||
2042 | return 0; | |
2043 | } | |
2044 | ||
a8170e5e | 2045 | static int vfio_dma_map(VFIOContainer *container, hwaddr iova, |
65501a74 AW |
2046 | ram_addr_t size, void *vaddr, bool readonly) |
2047 | { | |
2048 | struct vfio_iommu_type1_dma_map map = { | |
2049 | .argsz = sizeof(map), | |
2050 | .flags = VFIO_DMA_MAP_FLAG_READ, | |
5976cdd5 | 2051 | .vaddr = (__u64)(uintptr_t)vaddr, |
65501a74 AW |
2052 | .iova = iova, |
2053 | .size = size, | |
2054 | }; | |
2055 | ||
2056 | if (!readonly) { | |
2057 | map.flags |= VFIO_DMA_MAP_FLAG_WRITE; | |
2058 | } | |
2059 | ||
12af1344 AW |
2060 | /* |
2061 | * Try the mapping, if it fails with EBUSY, unmap the region and try | |
2062 | * again. This shouldn't be necessary, but we sometimes see it in | |
2063 | * the the VGA ROM space. | |
2064 | */ | |
2065 | if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || | |
2066 | (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 && | |
2067 | ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { | |
2068 | return 0; | |
65501a74 AW |
2069 | } |
2070 | ||
12af1344 AW |
2071 | DPRINTF("VFIO_MAP_DMA: %d\n", -errno); |
2072 | return -errno; | |
65501a74 AW |
2073 | } |
2074 | ||
65501a74 AW |
2075 | static bool vfio_listener_skipped_section(MemoryRegionSection *section) |
2076 | { | |
2077 | return !memory_region_is_ram(section->mr); | |
2078 | } | |
2079 | ||
2080 | static void vfio_listener_region_add(MemoryListener *listener, | |
2081 | MemoryRegionSection *section) | |
2082 | { | |
2083 | VFIOContainer *container = container_of(listener, VFIOContainer, | |
2084 | iommu_data.listener); | |
a8170e5e | 2085 | hwaddr iova, end; |
65501a74 AW |
2086 | void *vaddr; |
2087 | int ret; | |
2088 | ||
06d985f5 AK |
2089 | assert(!memory_region_is_iommu(section->mr)); |
2090 | ||
65501a74 | 2091 | if (vfio_listener_skipped_section(section)) { |
82ca8912 | 2092 | DPRINTF("SKIPPING region_add %"HWADDR_PRIx" - %"PRIx64"\n", |
65501a74 | 2093 | section->offset_within_address_space, |
1d5bf692 AK |
2094 | section->offset_within_address_space + |
2095 | int128_get64(int128_sub(section->size, int128_one()))); | |
65501a74 AW |
2096 | return; |
2097 | } | |
2098 | ||
2099 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != | |
2100 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { | |
312fd5f2 | 2101 | error_report("%s received unaligned region", __func__); |
65501a74 AW |
2102 | return; |
2103 | } | |
2104 | ||
2105 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); | |
052e87b0 | 2106 | end = (section->offset_within_address_space + int128_get64(section->size)) & |
65501a74 AW |
2107 | TARGET_PAGE_MASK; |
2108 | ||
2109 | if (iova >= end) { | |
2110 | return; | |
2111 | } | |
2112 | ||
2113 | vaddr = memory_region_get_ram_ptr(section->mr) + | |
2114 | section->offset_within_region + | |
2115 | (iova - section->offset_within_address_space); | |
2116 | ||
82ca8912 | 2117 | DPRINTF("region_add %"HWADDR_PRIx" - %"HWADDR_PRIx" [%p]\n", |
65501a74 AW |
2118 | iova, end - 1, vaddr); |
2119 | ||
dfde4e6e | 2120 | memory_region_ref(section->mr); |
65501a74 AW |
2121 | ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly); |
2122 | if (ret) { | |
a8170e5e | 2123 | error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " |
312fd5f2 | 2124 | "0x%"HWADDR_PRIx", %p) = %d (%m)", |
65501a74 AW |
2125 | container, iova, end - iova, vaddr, ret); |
2126 | } | |
2127 | } | |
2128 | ||
2129 | static void vfio_listener_region_del(MemoryListener *listener, | |
2130 | MemoryRegionSection *section) | |
2131 | { | |
2132 | VFIOContainer *container = container_of(listener, VFIOContainer, | |
2133 | iommu_data.listener); | |
a8170e5e | 2134 | hwaddr iova, end; |
65501a74 AW |
2135 | int ret; |
2136 | ||
2137 | if (vfio_listener_skipped_section(section)) { | |
82ca8912 | 2138 | DPRINTF("SKIPPING region_del %"HWADDR_PRIx" - %"PRIx64"\n", |
65501a74 | 2139 | section->offset_within_address_space, |
1d5bf692 AK |
2140 | section->offset_within_address_space + |
2141 | int128_get64(int128_sub(section->size, int128_one()))); | |
65501a74 AW |
2142 | return; |
2143 | } | |
2144 | ||
2145 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != | |
2146 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { | |
312fd5f2 | 2147 | error_report("%s received unaligned region", __func__); |
65501a74 AW |
2148 | return; |
2149 | } | |
2150 | ||
2151 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); | |
052e87b0 | 2152 | end = (section->offset_within_address_space + int128_get64(section->size)) & |
65501a74 AW |
2153 | TARGET_PAGE_MASK; |
2154 | ||
2155 | if (iova >= end) { | |
2156 | return; | |
2157 | } | |
2158 | ||
82ca8912 | 2159 | DPRINTF("region_del %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", |
65501a74 AW |
2160 | iova, end - 1); |
2161 | ||
2162 | ret = vfio_dma_unmap(container, iova, end - iova); | |
dfde4e6e | 2163 | memory_region_unref(section->mr); |
65501a74 | 2164 | if (ret) { |
a8170e5e | 2165 | error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " |
312fd5f2 | 2166 | "0x%"HWADDR_PRIx") = %d (%m)", |
65501a74 AW |
2167 | container, iova, end - iova, ret); |
2168 | } | |
2169 | } | |
2170 | ||
2171 | static MemoryListener vfio_memory_listener = { | |
65501a74 AW |
2172 | .region_add = vfio_listener_region_add, |
2173 | .region_del = vfio_listener_region_del, | |
65501a74 AW |
2174 | }; |
2175 | ||
2176 | static void vfio_listener_release(VFIOContainer *container) | |
2177 | { | |
2178 | memory_listener_unregister(&container->iommu_data.listener); | |
2179 | } | |
2180 | ||
2181 | /* | |
2182 | * Interrupt setup | |
2183 | */ | |
2184 | static void vfio_disable_interrupts(VFIODevice *vdev) | |
2185 | { | |
2186 | switch (vdev->interrupt) { | |
2187 | case VFIO_INT_INTx: | |
2188 | vfio_disable_intx(vdev); | |
2189 | break; | |
2190 | case VFIO_INT_MSI: | |
fd704adc | 2191 | vfio_disable_msi(vdev); |
65501a74 AW |
2192 | break; |
2193 | case VFIO_INT_MSIX: | |
fd704adc | 2194 | vfio_disable_msix(vdev); |
65501a74 AW |
2195 | break; |
2196 | } | |
2197 | } | |
2198 | ||
2199 | static int vfio_setup_msi(VFIODevice *vdev, int pos) | |
2200 | { | |
2201 | uint16_t ctrl; | |
2202 | bool msi_64bit, msi_maskbit; | |
2203 | int ret, entries; | |
2204 | ||
65501a74 AW |
2205 | if (pread(vdev->fd, &ctrl, sizeof(ctrl), |
2206 | vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { | |
2207 | return -errno; | |
2208 | } | |
2209 | ctrl = le16_to_cpu(ctrl); | |
2210 | ||
2211 | msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT); | |
2212 | msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT); | |
2213 | entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1); | |
2214 | ||
2215 | DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev->host.domain, | |
2216 | vdev->host.bus, vdev->host.slot, vdev->host.function, pos); | |
2217 | ||
2218 | ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit); | |
2219 | if (ret < 0) { | |
e43b9a5a AW |
2220 | if (ret == -ENOTSUP) { |
2221 | return 0; | |
2222 | } | |
312fd5f2 | 2223 | error_report("vfio: msi_init failed"); |
65501a74 AW |
2224 | return ret; |
2225 | } | |
2226 | vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); | |
2227 | ||
2228 | return 0; | |
2229 | } | |
2230 | ||
2231 | /* | |
2232 | * We don't have any control over how pci_add_capability() inserts | |
2233 | * capabilities into the chain. In order to setup MSI-X we need a | |
2234 | * MemoryRegion for the BAR. In order to setup the BAR and not | |
2235 | * attempt to mmap the MSI-X table area, which VFIO won't allow, we | |
2236 | * need to first look for where the MSI-X table lives. So we | |
2237 | * unfortunately split MSI-X setup across two functions. | |
2238 | */ | |
2239 | static int vfio_early_setup_msix(VFIODevice *vdev) | |
2240 | { | |
2241 | uint8_t pos; | |
2242 | uint16_t ctrl; | |
2243 | uint32_t table, pba; | |
2244 | ||
2245 | pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); | |
2246 | if (!pos) { | |
2247 | return 0; | |
2248 | } | |
2249 | ||
2250 | if (pread(vdev->fd, &ctrl, sizeof(ctrl), | |
2251 | vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { | |
2252 | return -errno; | |
2253 | } | |
2254 | ||
2255 | if (pread(vdev->fd, &table, sizeof(table), | |
2256 | vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) { | |
2257 | return -errno; | |
2258 | } | |
2259 | ||
2260 | if (pread(vdev->fd, &pba, sizeof(pba), | |
2261 | vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) { | |
2262 | return -errno; | |
2263 | } | |
2264 | ||
2265 | ctrl = le16_to_cpu(ctrl); | |
2266 | table = le32_to_cpu(table); | |
2267 | pba = le32_to_cpu(pba); | |
2268 | ||
2269 | vdev->msix = g_malloc0(sizeof(*(vdev->msix))); | |
2270 | vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK; | |
2271 | vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; | |
2272 | vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; | |
2273 | vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; | |
2274 | vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; | |
2275 | ||
2276 | DPRINTF("%04x:%02x:%02x.%x " | |
2277 | "PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n", | |
2278 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2279 | vdev->host.function, pos, vdev->msix->table_bar, | |
2280 | vdev->msix->table_offset, vdev->msix->entries); | |
2281 | ||
2282 | return 0; | |
2283 | } | |
2284 | ||
2285 | static int vfio_setup_msix(VFIODevice *vdev, int pos) | |
2286 | { | |
2287 | int ret; | |
2288 | ||
65501a74 AW |
2289 | ret = msix_init(&vdev->pdev, vdev->msix->entries, |
2290 | &vdev->bars[vdev->msix->table_bar].mem, | |
2291 | vdev->msix->table_bar, vdev->msix->table_offset, | |
2292 | &vdev->bars[vdev->msix->pba_bar].mem, | |
2293 | vdev->msix->pba_bar, vdev->msix->pba_offset, pos); | |
2294 | if (ret < 0) { | |
e43b9a5a AW |
2295 | if (ret == -ENOTSUP) { |
2296 | return 0; | |
2297 | } | |
312fd5f2 | 2298 | error_report("vfio: msix_init failed"); |
65501a74 AW |
2299 | return ret; |
2300 | } | |
2301 | ||
65501a74 AW |
2302 | return 0; |
2303 | } | |
2304 | ||
2305 | static void vfio_teardown_msi(VFIODevice *vdev) | |
2306 | { | |
2307 | msi_uninit(&vdev->pdev); | |
2308 | ||
2309 | if (vdev->msix) { | |
65501a74 AW |
2310 | msix_uninit(&vdev->pdev, &vdev->bars[vdev->msix->table_bar].mem, |
2311 | &vdev->bars[vdev->msix->pba_bar].mem); | |
2312 | } | |
2313 | } | |
2314 | ||
2315 | /* | |
2316 | * Resource setup | |
2317 | */ | |
2318 | static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled) | |
2319 | { | |
2320 | int i; | |
2321 | ||
2322 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
2323 | VFIOBAR *bar = &vdev->bars[i]; | |
2324 | ||
2325 | if (!bar->size) { | |
2326 | continue; | |
2327 | } | |
2328 | ||
2329 | memory_region_set_enabled(&bar->mmap_mem, enabled); | |
2330 | if (vdev->msix && vdev->msix->table_bar == i) { | |
2331 | memory_region_set_enabled(&vdev->msix->mmap_mem, enabled); | |
2332 | } | |
2333 | } | |
2334 | } | |
2335 | ||
2336 | static void vfio_unmap_bar(VFIODevice *vdev, int nr) | |
2337 | { | |
2338 | VFIOBAR *bar = &vdev->bars[nr]; | |
2339 | ||
2340 | if (!bar->size) { | |
2341 | return; | |
2342 | } | |
2343 | ||
7076eabc AW |
2344 | vfio_bar_quirk_teardown(vdev, nr); |
2345 | ||
65501a74 AW |
2346 | memory_region_del_subregion(&bar->mem, &bar->mmap_mem); |
2347 | munmap(bar->mmap, memory_region_size(&bar->mmap_mem)); | |
2348 | ||
2349 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
2350 | memory_region_del_subregion(&bar->mem, &vdev->msix->mmap_mem); | |
2351 | munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem)); | |
2352 | } | |
2353 | ||
2354 | memory_region_destroy(&bar->mem); | |
2355 | } | |
2356 | ||
5cb022a1 PB |
2357 | static int vfio_mmap_bar(VFIODevice *vdev, VFIOBAR *bar, |
2358 | MemoryRegion *mem, MemoryRegion *submem, | |
65501a74 AW |
2359 | void **map, size_t size, off_t offset, |
2360 | const char *name) | |
2361 | { | |
2362 | int ret = 0; | |
2363 | ||
82ca8912 | 2364 | if (VFIO_ALLOW_MMAP && size && bar->flags & VFIO_REGION_INFO_FLAG_MMAP) { |
65501a74 AW |
2365 | int prot = 0; |
2366 | ||
2367 | if (bar->flags & VFIO_REGION_INFO_FLAG_READ) { | |
2368 | prot |= PROT_READ; | |
2369 | } | |
2370 | ||
2371 | if (bar->flags & VFIO_REGION_INFO_FLAG_WRITE) { | |
2372 | prot |= PROT_WRITE; | |
2373 | } | |
2374 | ||
2375 | *map = mmap(NULL, size, prot, MAP_SHARED, | |
2376 | bar->fd, bar->fd_offset + offset); | |
2377 | if (*map == MAP_FAILED) { | |
2378 | *map = NULL; | |
2379 | ret = -errno; | |
2380 | goto empty_region; | |
2381 | } | |
2382 | ||
5cb022a1 | 2383 | memory_region_init_ram_ptr(submem, OBJECT(vdev), name, size, *map); |
65501a74 AW |
2384 | } else { |
2385 | empty_region: | |
2386 | /* Create a zero sized sub-region to make cleanup easy. */ | |
5cb022a1 | 2387 | memory_region_init(submem, OBJECT(vdev), name, 0); |
65501a74 AW |
2388 | } |
2389 | ||
2390 | memory_region_add_subregion(mem, offset, submem); | |
2391 | ||
2392 | return ret; | |
2393 | } | |
2394 | ||
2395 | static void vfio_map_bar(VFIODevice *vdev, int nr) | |
2396 | { | |
2397 | VFIOBAR *bar = &vdev->bars[nr]; | |
2398 | unsigned size = bar->size; | |
2399 | char name[64]; | |
2400 | uint32_t pci_bar; | |
2401 | uint8_t type; | |
2402 | int ret; | |
2403 | ||
2404 | /* Skip both unimplemented BARs and the upper half of 64bit BARS. */ | |
2405 | if (!size) { | |
2406 | return; | |
2407 | } | |
2408 | ||
2409 | snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d", | |
2410 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2411 | vdev->host.function, nr); | |
2412 | ||
2413 | /* Determine what type of BAR this is for registration */ | |
2414 | ret = pread(vdev->fd, &pci_bar, sizeof(pci_bar), | |
2415 | vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr)); | |
2416 | if (ret != sizeof(pci_bar)) { | |
312fd5f2 | 2417 | error_report("vfio: Failed to read BAR %d (%m)", nr); |
65501a74 AW |
2418 | return; |
2419 | } | |
2420 | ||
2421 | pci_bar = le32_to_cpu(pci_bar); | |
39360f0b AW |
2422 | bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO); |
2423 | bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64); | |
2424 | type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK : | |
2425 | ~PCI_BASE_ADDRESS_MEM_MASK); | |
65501a74 AW |
2426 | |
2427 | /* A "slow" read/write mapping underlies all BARs */ | |
39360f0b AW |
2428 | memory_region_init_io(&bar->mem, OBJECT(vdev), &vfio_bar_ops, |
2429 | bar, name, size); | |
65501a74 AW |
2430 | pci_register_bar(&vdev->pdev, nr, type, &bar->mem); |
2431 | ||
2432 | /* | |
2433 | * We can't mmap areas overlapping the MSIX vector table, so we | |
2434 | * potentially insert a direct-mapped subregion before and after it. | |
2435 | */ | |
2436 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
2437 | size = vdev->msix->table_offset & TARGET_PAGE_MASK; | |
2438 | } | |
2439 | ||
2440 | strncat(name, " mmap", sizeof(name) - strlen(name) - 1); | |
5cb022a1 | 2441 | if (vfio_mmap_bar(vdev, bar, &bar->mem, |
65501a74 | 2442 | &bar->mmap_mem, &bar->mmap, size, 0, name)) { |
312fd5f2 | 2443 | error_report("%s unsupported. Performance may be slow", name); |
65501a74 AW |
2444 | } |
2445 | ||
2446 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
2447 | unsigned start; | |
2448 | ||
2449 | start = TARGET_PAGE_ALIGN(vdev->msix->table_offset + | |
2450 | (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); | |
2451 | ||
2452 | size = start < bar->size ? bar->size - start : 0; | |
2453 | strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1); | |
2454 | /* VFIOMSIXInfo contains another MemoryRegion for this mapping */ | |
5cb022a1 | 2455 | if (vfio_mmap_bar(vdev, bar, &bar->mem, &vdev->msix->mmap_mem, |
65501a74 | 2456 | &vdev->msix->mmap, size, start, name)) { |
312fd5f2 | 2457 | error_report("%s unsupported. Performance may be slow", name); |
65501a74 AW |
2458 | } |
2459 | } | |
7076eabc AW |
2460 | |
2461 | vfio_bar_quirk_setup(vdev, nr); | |
65501a74 AW |
2462 | } |
2463 | ||
2464 | static void vfio_map_bars(VFIODevice *vdev) | |
2465 | { | |
2466 | int i; | |
2467 | ||
2468 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
2469 | vfio_map_bar(vdev, i); | |
2470 | } | |
f15689c7 AW |
2471 | |
2472 | if (vdev->has_vga) { | |
3c161542 PB |
2473 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem, |
2474 | OBJECT(vdev), &vfio_vga_ops, | |
f15689c7 AW |
2475 | &vdev->vga.region[QEMU_PCI_VGA_MEM], |
2476 | "vfio-vga-mmio@0xa0000", | |
2477 | QEMU_PCI_VGA_MEM_SIZE); | |
3c161542 PB |
2478 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, |
2479 | OBJECT(vdev), &vfio_vga_ops, | |
f15689c7 AW |
2480 | &vdev->vga.region[QEMU_PCI_VGA_IO_LO], |
2481 | "vfio-vga-io@0x3b0", | |
2482 | QEMU_PCI_VGA_IO_LO_SIZE); | |
3c161542 PB |
2483 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, |
2484 | OBJECT(vdev), &vfio_vga_ops, | |
f15689c7 AW |
2485 | &vdev->vga.region[QEMU_PCI_VGA_IO_HI], |
2486 | "vfio-vga-io@0x3c0", | |
2487 | QEMU_PCI_VGA_IO_HI_SIZE); | |
2488 | ||
2489 | pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem, | |
2490 | &vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, | |
2491 | &vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem); | |
7076eabc | 2492 | vfio_vga_quirk_setup(vdev); |
f15689c7 | 2493 | } |
65501a74 AW |
2494 | } |
2495 | ||
2496 | static void vfio_unmap_bars(VFIODevice *vdev) | |
2497 | { | |
2498 | int i; | |
2499 | ||
2500 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
2501 | vfio_unmap_bar(vdev, i); | |
2502 | } | |
f15689c7 AW |
2503 | |
2504 | if (vdev->has_vga) { | |
7076eabc | 2505 | vfio_vga_quirk_teardown(vdev); |
f15689c7 AW |
2506 | pci_unregister_vga(&vdev->pdev); |
2507 | memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem); | |
2508 | memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem); | |
2509 | memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem); | |
2510 | } | |
65501a74 AW |
2511 | } |
2512 | ||
2513 | /* | |
2514 | * General setup | |
2515 | */ | |
2516 | static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos) | |
2517 | { | |
2518 | uint8_t tmp, next = 0xff; | |
2519 | ||
2520 | for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp; | |
2521 | tmp = pdev->config[tmp + 1]) { | |
2522 | if (tmp > pos && tmp < next) { | |
2523 | next = tmp; | |
2524 | } | |
2525 | } | |
2526 | ||
2527 | return next - pos; | |
2528 | } | |
2529 | ||
96adc5c7 AW |
2530 | static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask) |
2531 | { | |
2532 | pci_set_word(buf, (pci_get_word(buf) & ~mask) | val); | |
2533 | } | |
2534 | ||
2535 | static void vfio_add_emulated_word(VFIODevice *vdev, int pos, | |
2536 | uint16_t val, uint16_t mask) | |
2537 | { | |
2538 | vfio_set_word_bits(vdev->pdev.config + pos, val, mask); | |
2539 | vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask); | |
2540 | vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask); | |
2541 | } | |
2542 | ||
2543 | static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask) | |
2544 | { | |
2545 | pci_set_long(buf, (pci_get_long(buf) & ~mask) | val); | |
2546 | } | |
2547 | ||
2548 | static void vfio_add_emulated_long(VFIODevice *vdev, int pos, | |
2549 | uint32_t val, uint32_t mask) | |
2550 | { | |
2551 | vfio_set_long_bits(vdev->pdev.config + pos, val, mask); | |
2552 | vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask); | |
2553 | vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask); | |
2554 | } | |
2555 | ||
2556 | static int vfio_setup_pcie_cap(VFIODevice *vdev, int pos, uint8_t size) | |
2557 | { | |
2558 | uint16_t flags; | |
2559 | uint8_t type; | |
2560 | ||
2561 | flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); | |
2562 | type = (flags & PCI_EXP_FLAGS_TYPE) >> 4; | |
2563 | ||
2564 | if (type != PCI_EXP_TYPE_ENDPOINT && | |
2565 | type != PCI_EXP_TYPE_LEG_END && | |
2566 | type != PCI_EXP_TYPE_RC_END) { | |
2567 | ||
2568 | error_report("vfio: Assignment of PCIe type 0x%x " | |
2569 | "devices is not currently supported", type); | |
2570 | return -EINVAL; | |
2571 | } | |
2572 | ||
2573 | if (!pci_bus_is_express(vdev->pdev.bus)) { | |
2574 | /* | |
2575 | * Use express capability as-is on PCI bus. It doesn't make much | |
2576 | * sense to even expose, but some drivers (ex. tg3) depend on it | |
2577 | * and guests don't seem to be particular about it. We'll need | |
2578 | * to revist this or force express devices to express buses if we | |
2579 | * ever expose an IOMMU to the guest. | |
2580 | */ | |
2581 | } else if (pci_bus_is_root(vdev->pdev.bus)) { | |
2582 | /* | |
2583 | * On a Root Complex bus Endpoints become Root Complex Integrated | |
2584 | * Endpoints, which changes the type and clears the LNK & LNK2 fields. | |
2585 | */ | |
2586 | if (type == PCI_EXP_TYPE_ENDPOINT) { | |
2587 | vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, | |
2588 | PCI_EXP_TYPE_RC_END << 4, | |
2589 | PCI_EXP_FLAGS_TYPE); | |
2590 | ||
2591 | /* Link Capabilities, Status, and Control goes away */ | |
2592 | if (size > PCI_EXP_LNKCTL) { | |
2593 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0); | |
2594 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); | |
2595 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0); | |
2596 | ||
2597 | #ifndef PCI_EXP_LNKCAP2 | |
2598 | #define PCI_EXP_LNKCAP2 44 | |
2599 | #endif | |
2600 | #ifndef PCI_EXP_LNKSTA2 | |
2601 | #define PCI_EXP_LNKSTA2 50 | |
2602 | #endif | |
2603 | /* Link 2 Capabilities, Status, and Control goes away */ | |
2604 | if (size > PCI_EXP_LNKCAP2) { | |
2605 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0); | |
2606 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0); | |
2607 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0); | |
2608 | } | |
2609 | } | |
2610 | ||
2611 | } else if (type == PCI_EXP_TYPE_LEG_END) { | |
2612 | /* | |
2613 | * Legacy endpoints don't belong on the root complex. Windows | |
2614 | * seems to be happier with devices if we skip the capability. | |
2615 | */ | |
2616 | return 0; | |
2617 | } | |
2618 | ||
2619 | } else { | |
2620 | /* | |
2621 | * Convert Root Complex Integrated Endpoints to regular endpoints. | |
2622 | * These devices don't support LNK/LNK2 capabilities, so make them up. | |
2623 | */ | |
2624 | if (type == PCI_EXP_TYPE_RC_END) { | |
2625 | vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, | |
2626 | PCI_EXP_TYPE_ENDPOINT << 4, | |
2627 | PCI_EXP_FLAGS_TYPE); | |
2628 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, | |
2629 | PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0); | |
2630 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); | |
2631 | } | |
2632 | ||
2633 | /* Mark the Link Status bits as emulated to allow virtual negotiation */ | |
2634 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, | |
2635 | pci_get_word(vdev->pdev.config + pos + | |
2636 | PCI_EXP_LNKSTA), | |
2637 | PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); | |
2638 | } | |
2639 | ||
2640 | pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size); | |
2641 | if (pos >= 0) { | |
2642 | vdev->pdev.exp.exp_cap = pos; | |
2643 | } | |
2644 | ||
2645 | return pos; | |
2646 | } | |
2647 | ||
befe5176 AW |
2648 | static void vfio_check_pcie_flr(VFIODevice *vdev, uint8_t pos) |
2649 | { | |
2650 | uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP); | |
2651 | ||
2652 | if (cap & PCI_EXP_DEVCAP_FLR) { | |
2653 | DPRINTF("%04x:%02x:%02x.%x Supports FLR via PCIe cap\n", | |
2654 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2655 | vdev->host.function); | |
2656 | vdev->has_flr = true; | |
2657 | } | |
2658 | } | |
2659 | ||
2660 | static void vfio_check_pm_reset(VFIODevice *vdev, uint8_t pos) | |
2661 | { | |
2662 | uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL); | |
2663 | ||
2664 | if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) { | |
2665 | DPRINTF("%04x:%02x:%02x.%x Supports PM reset\n", | |
2666 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2667 | vdev->host.function); | |
2668 | vdev->has_pm_reset = true; | |
2669 | } | |
2670 | } | |
2671 | ||
2672 | static void vfio_check_af_flr(VFIODevice *vdev, uint8_t pos) | |
2673 | { | |
2674 | uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP); | |
2675 | ||
2676 | if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) { | |
2677 | DPRINTF("%04x:%02x:%02x.%x Supports FLR via AF cap\n", | |
2678 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2679 | vdev->host.function); | |
2680 | vdev->has_flr = true; | |
2681 | } | |
2682 | } | |
2683 | ||
65501a74 AW |
2684 | static int vfio_add_std_cap(VFIODevice *vdev, uint8_t pos) |
2685 | { | |
2686 | PCIDevice *pdev = &vdev->pdev; | |
2687 | uint8_t cap_id, next, size; | |
2688 | int ret; | |
2689 | ||
2690 | cap_id = pdev->config[pos]; | |
2691 | next = pdev->config[pos + 1]; | |
2692 | ||
2693 | /* | |
2694 | * If it becomes important to configure capabilities to their actual | |
2695 | * size, use this as the default when it's something we don't recognize. | |
2696 | * Since QEMU doesn't actually handle many of the config accesses, | |
2697 | * exact size doesn't seem worthwhile. | |
2698 | */ | |
2699 | size = vfio_std_cap_max_size(pdev, pos); | |
2700 | ||
2701 | /* | |
2702 | * pci_add_capability always inserts the new capability at the head | |
2703 | * of the chain. Therefore to end up with a chain that matches the | |
2704 | * physical device, we insert from the end by making this recursive. | |
2705 | * This is also why we pre-caclulate size above as cached config space | |
2706 | * will be changed as we unwind the stack. | |
2707 | */ | |
2708 | if (next) { | |
2709 | ret = vfio_add_std_cap(vdev, next); | |
2710 | if (ret) { | |
2711 | return ret; | |
2712 | } | |
2713 | } else { | |
96adc5c7 AW |
2714 | /* Begin the rebuild, use QEMU emulated list bits */ |
2715 | pdev->config[PCI_CAPABILITY_LIST] = 0; | |
2716 | vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff; | |
2717 | vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST; | |
65501a74 AW |
2718 | } |
2719 | ||
96adc5c7 AW |
2720 | /* Use emulated next pointer to allow dropping caps */ |
2721 | pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff); | |
2722 | ||
65501a74 AW |
2723 | switch (cap_id) { |
2724 | case PCI_CAP_ID_MSI: | |
2725 | ret = vfio_setup_msi(vdev, pos); | |
2726 | break; | |
96adc5c7 | 2727 | case PCI_CAP_ID_EXP: |
befe5176 | 2728 | vfio_check_pcie_flr(vdev, pos); |
96adc5c7 AW |
2729 | ret = vfio_setup_pcie_cap(vdev, pos, size); |
2730 | break; | |
65501a74 AW |
2731 | case PCI_CAP_ID_MSIX: |
2732 | ret = vfio_setup_msix(vdev, pos); | |
2733 | break; | |
ba661818 | 2734 | case PCI_CAP_ID_PM: |
befe5176 | 2735 | vfio_check_pm_reset(vdev, pos); |
ba661818 | 2736 | vdev->pm_cap = pos; |
befe5176 AW |
2737 | ret = pci_add_capability(pdev, cap_id, pos, size); |
2738 | break; | |
2739 | case PCI_CAP_ID_AF: | |
2740 | vfio_check_af_flr(vdev, pos); | |
2741 | ret = pci_add_capability(pdev, cap_id, pos, size); | |
2742 | break; | |
65501a74 AW |
2743 | default: |
2744 | ret = pci_add_capability(pdev, cap_id, pos, size); | |
2745 | break; | |
2746 | } | |
2747 | ||
2748 | if (ret < 0) { | |
2749 | error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability " | |
312fd5f2 | 2750 | "0x%x[0x%x]@0x%x: %d", vdev->host.domain, |
65501a74 AW |
2751 | vdev->host.bus, vdev->host.slot, vdev->host.function, |
2752 | cap_id, size, pos, ret); | |
2753 | return ret; | |
2754 | } | |
2755 | ||
2756 | return 0; | |
2757 | } | |
2758 | ||
2759 | static int vfio_add_capabilities(VFIODevice *vdev) | |
2760 | { | |
2761 | PCIDevice *pdev = &vdev->pdev; | |
2762 | ||
2763 | if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) || | |
2764 | !pdev->config[PCI_CAPABILITY_LIST]) { | |
2765 | return 0; /* Nothing to add */ | |
2766 | } | |
2767 | ||
2768 | return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]); | |
2769 | } | |
2770 | ||
f16f39c3 AW |
2771 | static void vfio_pci_pre_reset(VFIODevice *vdev) |
2772 | { | |
2773 | PCIDevice *pdev = &vdev->pdev; | |
2774 | uint16_t cmd; | |
2775 | ||
2776 | vfio_disable_interrupts(vdev); | |
2777 | ||
2778 | /* Make sure the device is in D0 */ | |
2779 | if (vdev->pm_cap) { | |
2780 | uint16_t pmcsr; | |
2781 | uint8_t state; | |
2782 | ||
2783 | pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); | |
2784 | state = pmcsr & PCI_PM_CTRL_STATE_MASK; | |
2785 | if (state) { | |
2786 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
2787 | vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2); | |
2788 | /* vfio handles the necessary delay here */ | |
2789 | pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); | |
2790 | state = pmcsr & PCI_PM_CTRL_STATE_MASK; | |
2791 | if (state) { | |
2792 | error_report("vfio: Unable to power on device, stuck in D%d\n", | |
2793 | state); | |
2794 | } | |
2795 | } | |
2796 | } | |
2797 | ||
2798 | /* | |
2799 | * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master. | |
2800 | * Also put INTx Disable in known state. | |
2801 | */ | |
2802 | cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2); | |
2803 | cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | | |
2804 | PCI_COMMAND_INTX_DISABLE); | |
2805 | vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2); | |
2806 | } | |
2807 | ||
2808 | static void vfio_pci_post_reset(VFIODevice *vdev) | |
2809 | { | |
2810 | vfio_enable_intx(vdev); | |
2811 | } | |
2812 | ||
2813 | static bool vfio_pci_host_match(PCIHostDeviceAddress *host1, | |
2814 | PCIHostDeviceAddress *host2) | |
2815 | { | |
2816 | return (host1->domain == host2->domain && host1->bus == host2->bus && | |
2817 | host1->slot == host2->slot && host1->function == host2->function); | |
2818 | } | |
2819 | ||
2820 | static int vfio_pci_hot_reset(VFIODevice *vdev, bool single) | |
2821 | { | |
2822 | VFIOGroup *group; | |
2823 | struct vfio_pci_hot_reset_info *info; | |
2824 | struct vfio_pci_dependent_device *devices; | |
2825 | struct vfio_pci_hot_reset *reset; | |
2826 | int32_t *fds; | |
2827 | int ret, i, count; | |
2828 | bool multi = false; | |
2829 | ||
2830 | DPRINTF("%s(%04x:%02x:%02x.%x) %s\n", __func__, vdev->host.domain, | |
2831 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
2832 | single ? "one" : "multi"); | |
2833 | ||
2834 | vfio_pci_pre_reset(vdev); | |
2835 | vdev->needs_reset = false; | |
2836 | ||
2837 | info = g_malloc0(sizeof(*info)); | |
2838 | info->argsz = sizeof(*info); | |
2839 | ||
2840 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); | |
2841 | if (ret && errno != ENOSPC) { | |
2842 | ret = -errno; | |
2843 | if (!vdev->has_pm_reset) { | |
2844 | error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, " | |
2845 | "no available reset mechanism.", vdev->host.domain, | |
2846 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
2847 | } | |
2848 | goto out_single; | |
2849 | } | |
2850 | ||
2851 | count = info->count; | |
2852 | info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices))); | |
2853 | info->argsz = sizeof(*info) + (count * sizeof(*devices)); | |
2854 | devices = &info->devices[0]; | |
2855 | ||
2856 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); | |
2857 | if (ret) { | |
2858 | ret = -errno; | |
2859 | error_report("vfio: hot reset info failed: %m"); | |
2860 | goto out_single; | |
2861 | } | |
2862 | ||
2863 | DPRINTF("%04x:%02x:%02x.%x: hot reset dependent devices:\n", | |
2864 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2865 | vdev->host.function); | |
2866 | ||
2867 | /* Verify that we have all the groups required */ | |
2868 | for (i = 0; i < info->count; i++) { | |
2869 | PCIHostDeviceAddress host; | |
2870 | VFIODevice *tmp; | |
2871 | ||
2872 | host.domain = devices[i].segment; | |
2873 | host.bus = devices[i].bus; | |
2874 | host.slot = PCI_SLOT(devices[i].devfn); | |
2875 | host.function = PCI_FUNC(devices[i].devfn); | |
2876 | ||
2877 | DPRINTF("\t%04x:%02x:%02x.%x group %d\n", host.domain, | |
2878 | host.bus, host.slot, host.function, devices[i].group_id); | |
2879 | ||
2880 | if (vfio_pci_host_match(&host, &vdev->host)) { | |
2881 | continue; | |
2882 | } | |
2883 | ||
2884 | QLIST_FOREACH(group, &group_list, next) { | |
2885 | if (group->groupid == devices[i].group_id) { | |
2886 | break; | |
2887 | } | |
2888 | } | |
2889 | ||
2890 | if (!group) { | |
2891 | if (!vdev->has_pm_reset) { | |
2892 | error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, " | |
2893 | "depends on group %d which is not owned.", | |
2894 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2895 | vdev->host.function, devices[i].group_id); | |
2896 | } | |
2897 | ret = -EPERM; | |
2898 | goto out; | |
2899 | } | |
2900 | ||
2901 | /* Prep dependent devices for reset and clear our marker. */ | |
2902 | QLIST_FOREACH(tmp, &group->device_list, next) { | |
2903 | if (vfio_pci_host_match(&host, &tmp->host)) { | |
2904 | if (single) { | |
2905 | DPRINTF("vfio: found another in-use device " | |
2906 | "%04x:%02x:%02x.%x\n", host.domain, host.bus, | |
2907 | host.slot, host.function); | |
2908 | ret = -EINVAL; | |
2909 | goto out_single; | |
2910 | } | |
2911 | vfio_pci_pre_reset(tmp); | |
2912 | tmp->needs_reset = false; | |
2913 | multi = true; | |
2914 | break; | |
2915 | } | |
2916 | } | |
2917 | } | |
2918 | ||
2919 | if (!single && !multi) { | |
2920 | DPRINTF("vfio: No other in-use devices for multi hot reset\n"); | |
2921 | ret = -EINVAL; | |
2922 | goto out_single; | |
2923 | } | |
2924 | ||
2925 | /* Determine how many group fds need to be passed */ | |
2926 | count = 0; | |
2927 | QLIST_FOREACH(group, &group_list, next) { | |
2928 | for (i = 0; i < info->count; i++) { | |
2929 | if (group->groupid == devices[i].group_id) { | |
2930 | count++; | |
2931 | break; | |
2932 | } | |
2933 | } | |
2934 | } | |
2935 | ||
2936 | reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); | |
2937 | reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); | |
2938 | fds = &reset->group_fds[0]; | |
2939 | ||
2940 | /* Fill in group fds */ | |
2941 | QLIST_FOREACH(group, &group_list, next) { | |
2942 | for (i = 0; i < info->count; i++) { | |
2943 | if (group->groupid == devices[i].group_id) { | |
2944 | fds[reset->count++] = group->fd; | |
2945 | break; | |
2946 | } | |
2947 | } | |
2948 | } | |
2949 | ||
2950 | /* Bus reset! */ | |
2951 | ret = ioctl(vdev->fd, VFIO_DEVICE_PCI_HOT_RESET, reset); | |
2952 | g_free(reset); | |
2953 | ||
2954 | DPRINTF("%04x:%02x:%02x.%x hot reset: %s\n", vdev->host.domain, | |
2955 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
2956 | ret ? "%m" : "Success"); | |
2957 | ||
2958 | out: | |
2959 | /* Re-enable INTx on affected devices */ | |
2960 | for (i = 0; i < info->count; i++) { | |
2961 | PCIHostDeviceAddress host; | |
2962 | VFIODevice *tmp; | |
2963 | ||
2964 | host.domain = devices[i].segment; | |
2965 | host.bus = devices[i].bus; | |
2966 | host.slot = PCI_SLOT(devices[i].devfn); | |
2967 | host.function = PCI_FUNC(devices[i].devfn); | |
2968 | ||
2969 | if (vfio_pci_host_match(&host, &vdev->host)) { | |
2970 | continue; | |
2971 | } | |
2972 | ||
2973 | QLIST_FOREACH(group, &group_list, next) { | |
2974 | if (group->groupid == devices[i].group_id) { | |
2975 | break; | |
2976 | } | |
2977 | } | |
2978 | ||
2979 | if (!group) { | |
2980 | break; | |
2981 | } | |
2982 | ||
2983 | QLIST_FOREACH(tmp, &group->device_list, next) { | |
2984 | if (vfio_pci_host_match(&host, &tmp->host)) { | |
2985 | vfio_pci_post_reset(tmp); | |
2986 | break; | |
2987 | } | |
2988 | } | |
2989 | } | |
2990 | out_single: | |
2991 | vfio_pci_post_reset(vdev); | |
2992 | g_free(info); | |
2993 | ||
2994 | return ret; | |
2995 | } | |
2996 | ||
2997 | /* | |
2998 | * We want to differentiate hot reset of mulitple in-use devices vs hot reset | |
2999 | * of a single in-use device. VFIO_DEVICE_RESET will already handle the case | |
3000 | * of doing hot resets when there is only a single device per bus. The in-use | |
3001 | * here refers to how many VFIODevices are affected. A hot reset that affects | |
3002 | * multiple devices, but only a single in-use device, means that we can call | |
3003 | * it from our bus ->reset() callback since the extent is effectively a single | |
3004 | * device. This allows us to make use of it in the hotplug path. When there | |
3005 | * are multiple in-use devices, we can only trigger the hot reset during a | |
3006 | * system reset and thus from our reset handler. We separate _one vs _multi | |
3007 | * here so that we don't overlap and do a double reset on the system reset | |
3008 | * path where both our reset handler and ->reset() callback are used. Calling | |
3009 | * _one() will only do a hot reset for the one in-use devices case, calling | |
3010 | * _multi() will do nothing if a _one() would have been sufficient. | |
3011 | */ | |
3012 | static int vfio_pci_hot_reset_one(VFIODevice *vdev) | |
3013 | { | |
3014 | return vfio_pci_hot_reset(vdev, true); | |
3015 | } | |
3016 | ||
3017 | static int vfio_pci_hot_reset_multi(VFIODevice *vdev) | |
3018 | { | |
3019 | return vfio_pci_hot_reset(vdev, false); | |
3020 | } | |
3021 | ||
3022 | static void vfio_pci_reset_handler(void *opaque) | |
3023 | { | |
3024 | VFIOGroup *group; | |
3025 | VFIODevice *vdev; | |
3026 | ||
3027 | QLIST_FOREACH(group, &group_list, next) { | |
3028 | QLIST_FOREACH(vdev, &group->device_list, next) { | |
3029 | if (!vdev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) { | |
3030 | vdev->needs_reset = true; | |
3031 | } | |
3032 | } | |
3033 | } | |
3034 | ||
3035 | QLIST_FOREACH(group, &group_list, next) { | |
3036 | QLIST_FOREACH(vdev, &group->device_list, next) { | |
3037 | if (vdev->needs_reset) { | |
3038 | vfio_pci_hot_reset_multi(vdev); | |
3039 | } | |
3040 | } | |
3041 | } | |
3042 | } | |
3043 | ||
65501a74 AW |
3044 | static int vfio_connect_container(VFIOGroup *group) |
3045 | { | |
3046 | VFIOContainer *container; | |
3047 | int ret, fd; | |
3048 | ||
3049 | if (group->container) { | |
3050 | return 0; | |
3051 | } | |
3052 | ||
3053 | QLIST_FOREACH(container, &container_list, next) { | |
3054 | if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { | |
3055 | group->container = container; | |
3056 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); | |
3057 | return 0; | |
3058 | } | |
3059 | } | |
3060 | ||
3061 | fd = qemu_open("/dev/vfio/vfio", O_RDWR); | |
3062 | if (fd < 0) { | |
312fd5f2 | 3063 | error_report("vfio: failed to open /dev/vfio/vfio: %m"); |
65501a74 AW |
3064 | return -errno; |
3065 | } | |
3066 | ||
3067 | ret = ioctl(fd, VFIO_GET_API_VERSION); | |
3068 | if (ret != VFIO_API_VERSION) { | |
3069 | error_report("vfio: supported vfio version: %d, " | |
312fd5f2 | 3070 | "reported version: %d", VFIO_API_VERSION, ret); |
65501a74 AW |
3071 | close(fd); |
3072 | return -EINVAL; | |
3073 | } | |
3074 | ||
3075 | container = g_malloc0(sizeof(*container)); | |
3076 | container->fd = fd; | |
3077 | ||
3078 | if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) { | |
3079 | ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); | |
3080 | if (ret) { | |
312fd5f2 | 3081 | error_report("vfio: failed to set group container: %m"); |
65501a74 AW |
3082 | g_free(container); |
3083 | close(fd); | |
3084 | return -errno; | |
3085 | } | |
3086 | ||
3087 | ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU); | |
3088 | if (ret) { | |
312fd5f2 | 3089 | error_report("vfio: failed to set iommu for container: %m"); |
65501a74 AW |
3090 | g_free(container); |
3091 | close(fd); | |
3092 | return -errno; | |
3093 | } | |
3094 | ||
3095 | container->iommu_data.listener = vfio_memory_listener; | |
3096 | container->iommu_data.release = vfio_listener_release; | |
3097 | ||
f6790af6 | 3098 | memory_listener_register(&container->iommu_data.listener, &address_space_memory); |
65501a74 | 3099 | } else { |
312fd5f2 | 3100 | error_report("vfio: No available IOMMU models"); |
65501a74 AW |
3101 | g_free(container); |
3102 | close(fd); | |
3103 | return -EINVAL; | |
3104 | } | |
3105 | ||
3106 | QLIST_INIT(&container->group_list); | |
3107 | QLIST_INSERT_HEAD(&container_list, container, next); | |
3108 | ||
3109 | group->container = container; | |
3110 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); | |
3111 | ||
3112 | return 0; | |
3113 | } | |
3114 | ||
3115 | static void vfio_disconnect_container(VFIOGroup *group) | |
3116 | { | |
3117 | VFIOContainer *container = group->container; | |
3118 | ||
3119 | if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { | |
312fd5f2 | 3120 | error_report("vfio: error disconnecting group %d from container", |
65501a74 AW |
3121 | group->groupid); |
3122 | } | |
3123 | ||
3124 | QLIST_REMOVE(group, container_next); | |
3125 | group->container = NULL; | |
3126 | ||
3127 | if (QLIST_EMPTY(&container->group_list)) { | |
3128 | if (container->iommu_data.release) { | |
3129 | container->iommu_data.release(container); | |
3130 | } | |
3131 | QLIST_REMOVE(container, next); | |
3132 | DPRINTF("vfio_disconnect_container: close container->fd\n"); | |
3133 | close(container->fd); | |
3134 | g_free(container); | |
3135 | } | |
3136 | } | |
3137 | ||
3138 | static VFIOGroup *vfio_get_group(int groupid) | |
3139 | { | |
3140 | VFIOGroup *group; | |
3141 | char path[32]; | |
3142 | struct vfio_group_status status = { .argsz = sizeof(status) }; | |
3143 | ||
3144 | QLIST_FOREACH(group, &group_list, next) { | |
3145 | if (group->groupid == groupid) { | |
3146 | return group; | |
3147 | } | |
3148 | } | |
3149 | ||
3150 | group = g_malloc0(sizeof(*group)); | |
3151 | ||
3152 | snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); | |
3153 | group->fd = qemu_open(path, O_RDWR); | |
3154 | if (group->fd < 0) { | |
312fd5f2 | 3155 | error_report("vfio: error opening %s: %m", path); |
65501a74 AW |
3156 | g_free(group); |
3157 | return NULL; | |
3158 | } | |
3159 | ||
3160 | if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { | |
312fd5f2 | 3161 | error_report("vfio: error getting group status: %m"); |
65501a74 AW |
3162 | close(group->fd); |
3163 | g_free(group); | |
3164 | return NULL; | |
3165 | } | |
3166 | ||
3167 | if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { | |
3168 | error_report("vfio: error, group %d is not viable, please ensure " | |
3169 | "all devices within the iommu_group are bound to their " | |
312fd5f2 | 3170 | "vfio bus driver.", groupid); |
65501a74 AW |
3171 | close(group->fd); |
3172 | g_free(group); | |
3173 | return NULL; | |
3174 | } | |
3175 | ||
3176 | group->groupid = groupid; | |
3177 | QLIST_INIT(&group->device_list); | |
3178 | ||
3179 | if (vfio_connect_container(group)) { | |
312fd5f2 | 3180 | error_report("vfio: failed to setup container for group %d", groupid); |
65501a74 AW |
3181 | close(group->fd); |
3182 | g_free(group); | |
3183 | return NULL; | |
3184 | } | |
3185 | ||
f16f39c3 AW |
3186 | if (QLIST_EMPTY(&group_list)) { |
3187 | qemu_register_reset(vfio_pci_reset_handler, NULL); | |
3188 | } | |
3189 | ||
65501a74 AW |
3190 | QLIST_INSERT_HEAD(&group_list, group, next); |
3191 | ||
3192 | return group; | |
3193 | } | |
3194 | ||
3195 | static void vfio_put_group(VFIOGroup *group) | |
3196 | { | |
3197 | if (!QLIST_EMPTY(&group->device_list)) { | |
3198 | return; | |
3199 | } | |
3200 | ||
3201 | vfio_disconnect_container(group); | |
3202 | QLIST_REMOVE(group, next); | |
3203 | DPRINTF("vfio_put_group: close group->fd\n"); | |
3204 | close(group->fd); | |
3205 | g_free(group); | |
f16f39c3 AW |
3206 | |
3207 | if (QLIST_EMPTY(&group_list)) { | |
3208 | qemu_unregister_reset(vfio_pci_reset_handler, NULL); | |
3209 | } | |
65501a74 AW |
3210 | } |
3211 | ||
3212 | static int vfio_get_device(VFIOGroup *group, const char *name, VFIODevice *vdev) | |
3213 | { | |
3214 | struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; | |
3215 | struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; | |
7b4b0e9e | 3216 | struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) }; |
65501a74 AW |
3217 | int ret, i; |
3218 | ||
3219 | ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); | |
3220 | if (ret < 0) { | |
1a9522cc | 3221 | error_report("vfio: error getting device %s from group %d: %m", |
65501a74 | 3222 | name, group->groupid); |
1a9522cc | 3223 | error_printf("Verify all devices in group %d are bound to vfio-pci " |
65501a74 AW |
3224 | "or pci-stub and not already in use\n", group->groupid); |
3225 | return ret; | |
3226 | } | |
3227 | ||
3228 | vdev->fd = ret; | |
3229 | vdev->group = group; | |
3230 | QLIST_INSERT_HEAD(&group->device_list, vdev, next); | |
3231 | ||
3232 | /* Sanity check device */ | |
3233 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &dev_info); | |
3234 | if (ret) { | |
312fd5f2 | 3235 | error_report("vfio: error getting device info: %m"); |
65501a74 AW |
3236 | goto error; |
3237 | } | |
3238 | ||
3239 | DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name, | |
3240 | dev_info.flags, dev_info.num_regions, dev_info.num_irqs); | |
3241 | ||
3242 | if (!(dev_info.flags & VFIO_DEVICE_FLAGS_PCI)) { | |
312fd5f2 | 3243 | error_report("vfio: Um, this isn't a PCI device"); |
65501a74 AW |
3244 | goto error; |
3245 | } | |
3246 | ||
3247 | vdev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); | |
65501a74 | 3248 | |
8fc94e5a | 3249 | if (dev_info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { |
312fd5f2 | 3250 | error_report("vfio: unexpected number of io regions %u", |
65501a74 AW |
3251 | dev_info.num_regions); |
3252 | goto error; | |
3253 | } | |
3254 | ||
8fc94e5a | 3255 | if (dev_info.num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) { |
312fd5f2 | 3256 | error_report("vfio: unexpected number of irqs %u", dev_info.num_irqs); |
65501a74 AW |
3257 | goto error; |
3258 | } | |
3259 | ||
3260 | for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) { | |
3261 | reg_info.index = i; | |
3262 | ||
3263 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); | |
3264 | if (ret) { | |
312fd5f2 | 3265 | error_report("vfio: Error getting region %d info: %m", i); |
65501a74 AW |
3266 | goto error; |
3267 | } | |
3268 | ||
3269 | DPRINTF("Device %s region %d:\n", name, i); | |
3270 | DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n", | |
3271 | (unsigned long)reg_info.size, (unsigned long)reg_info.offset, | |
3272 | (unsigned long)reg_info.flags); | |
3273 | ||
3274 | vdev->bars[i].flags = reg_info.flags; | |
3275 | vdev->bars[i].size = reg_info.size; | |
3276 | vdev->bars[i].fd_offset = reg_info.offset; | |
3277 | vdev->bars[i].fd = vdev->fd; | |
3278 | vdev->bars[i].nr = i; | |
7076eabc | 3279 | QLIST_INIT(&vdev->bars[i].quirks); |
65501a74 AW |
3280 | } |
3281 | ||
65501a74 AW |
3282 | reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX; |
3283 | ||
3284 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); | |
3285 | if (ret) { | |
312fd5f2 | 3286 | error_report("vfio: Error getting config info: %m"); |
65501a74 AW |
3287 | goto error; |
3288 | } | |
3289 | ||
3290 | DPRINTF("Device %s config:\n", name); | |
3291 | DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n", | |
3292 | (unsigned long)reg_info.size, (unsigned long)reg_info.offset, | |
3293 | (unsigned long)reg_info.flags); | |
3294 | ||
3295 | vdev->config_size = reg_info.size; | |
6a659bbf AW |
3296 | if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) { |
3297 | vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; | |
3298 | } | |
65501a74 AW |
3299 | vdev->config_offset = reg_info.offset; |
3300 | ||
f15689c7 AW |
3301 | if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) && |
3302 | dev_info.num_regions > VFIO_PCI_VGA_REGION_INDEX) { | |
3303 | struct vfio_region_info vga_info = { | |
3304 | .argsz = sizeof(vga_info), | |
3305 | .index = VFIO_PCI_VGA_REGION_INDEX, | |
3306 | }; | |
3307 | ||
3308 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info); | |
3309 | if (ret) { | |
3310 | error_report( | |
3311 | "vfio: Device does not support requested feature x-vga"); | |
3312 | goto error; | |
3313 | } | |
3314 | ||
3315 | if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) || | |
3316 | !(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) || | |
3317 | vga_info.size < 0xbffff + 1) { | |
3318 | error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx", | |
3319 | (unsigned long)vga_info.flags, | |
3320 | (unsigned long)vga_info.size); | |
3321 | goto error; | |
3322 | } | |
3323 | ||
3324 | vdev->vga.fd_offset = vga_info.offset; | |
3325 | vdev->vga.fd = vdev->fd; | |
3326 | ||
3327 | vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE; | |
3328 | vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM; | |
7076eabc | 3329 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks); |
f15689c7 AW |
3330 | |
3331 | vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE; | |
3332 | vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO; | |
7076eabc | 3333 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks); |
f15689c7 AW |
3334 | |
3335 | vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE; | |
3336 | vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI; | |
7076eabc | 3337 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks); |
f15689c7 AW |
3338 | |
3339 | vdev->has_vga = true; | |
3340 | } | |
7b4b0e9e VMP |
3341 | irq_info.index = VFIO_PCI_ERR_IRQ_INDEX; |
3342 | ||
3343 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); | |
3344 | if (ret) { | |
3345 | /* This can fail for an old kernel or legacy PCI dev */ | |
8fbf47c3 | 3346 | DPRINTF("VFIO_DEVICE_GET_IRQ_INFO failure: %m\n"); |
7b4b0e9e VMP |
3347 | ret = 0; |
3348 | } else if (irq_info.count == 1) { | |
3349 | vdev->pci_aer = true; | |
3350 | } else { | |
8fbf47c3 AW |
3351 | error_report("vfio: %04x:%02x:%02x.%x " |
3352 | "Could not enable error recovery for the device", | |
3353 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
3354 | vdev->host.function); | |
7b4b0e9e | 3355 | } |
f15689c7 | 3356 | |
65501a74 AW |
3357 | error: |
3358 | if (ret) { | |
3359 | QLIST_REMOVE(vdev, next); | |
3360 | vdev->group = NULL; | |
3361 | close(vdev->fd); | |
3362 | } | |
3363 | return ret; | |
3364 | } | |
3365 | ||
3366 | static void vfio_put_device(VFIODevice *vdev) | |
3367 | { | |
3368 | QLIST_REMOVE(vdev, next); | |
3369 | vdev->group = NULL; | |
3370 | DPRINTF("vfio_put_device: close vdev->fd\n"); | |
3371 | close(vdev->fd); | |
3372 | if (vdev->msix) { | |
3373 | g_free(vdev->msix); | |
3374 | vdev->msix = NULL; | |
3375 | } | |
3376 | } | |
3377 | ||
7b4b0e9e VMP |
3378 | static void vfio_err_notifier_handler(void *opaque) |
3379 | { | |
3380 | VFIODevice *vdev = opaque; | |
3381 | ||
3382 | if (!event_notifier_test_and_clear(&vdev->err_notifier)) { | |
3383 | return; | |
3384 | } | |
3385 | ||
3386 | /* | |
3387 | * TBD. Retrieve the error details and decide what action | |
3388 | * needs to be taken. One of the actions could be to pass | |
3389 | * the error to the guest and have the guest driver recover | |
3390 | * from the error. This requires that PCIe capabilities be | |
3391 | * exposed to the guest. For now, we just terminate the | |
3392 | * guest to contain the error. | |
3393 | */ | |
3394 | ||
8fbf47c3 AW |
3395 | error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. " |
3396 | "Please collect any data possible and then kill the guest", | |
3397 | __func__, vdev->host.domain, vdev->host.bus, | |
3398 | vdev->host.slot, vdev->host.function); | |
7b4b0e9e VMP |
3399 | |
3400 | vm_stop(RUN_STATE_IO_ERROR); | |
3401 | } | |
3402 | ||
3403 | /* | |
3404 | * Registers error notifier for devices supporting error recovery. | |
3405 | * If we encounter a failure in this function, we report an error | |
3406 | * and continue after disabling error recovery support for the | |
3407 | * device. | |
3408 | */ | |
3409 | static void vfio_register_err_notifier(VFIODevice *vdev) | |
3410 | { | |
3411 | int ret; | |
3412 | int argsz; | |
3413 | struct vfio_irq_set *irq_set; | |
3414 | int32_t *pfd; | |
3415 | ||
3416 | if (!vdev->pci_aer) { | |
3417 | return; | |
3418 | } | |
3419 | ||
3420 | if (event_notifier_init(&vdev->err_notifier, 0)) { | |
8fbf47c3 | 3421 | error_report("vfio: Unable to init event notifier for error detection"); |
7b4b0e9e VMP |
3422 | vdev->pci_aer = false; |
3423 | return; | |
3424 | } | |
3425 | ||
3426 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
3427 | ||
3428 | irq_set = g_malloc0(argsz); | |
3429 | irq_set->argsz = argsz; | |
3430 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
3431 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
3432 | irq_set->index = VFIO_PCI_ERR_IRQ_INDEX; | |
3433 | irq_set->start = 0; | |
3434 | irq_set->count = 1; | |
3435 | pfd = (int32_t *)&irq_set->data; | |
3436 | ||
3437 | *pfd = event_notifier_get_fd(&vdev->err_notifier); | |
3438 | qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev); | |
3439 | ||
3440 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
3441 | if (ret) { | |
8fbf47c3 | 3442 | error_report("vfio: Failed to set up error notification"); |
7b4b0e9e VMP |
3443 | qemu_set_fd_handler(*pfd, NULL, NULL, vdev); |
3444 | event_notifier_cleanup(&vdev->err_notifier); | |
3445 | vdev->pci_aer = false; | |
3446 | } | |
3447 | g_free(irq_set); | |
3448 | } | |
3449 | ||
3450 | static void vfio_unregister_err_notifier(VFIODevice *vdev) | |
3451 | { | |
3452 | int argsz; | |
3453 | struct vfio_irq_set *irq_set; | |
3454 | int32_t *pfd; | |
3455 | int ret; | |
3456 | ||
3457 | if (!vdev->pci_aer) { | |
3458 | return; | |
3459 | } | |
3460 | ||
3461 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
3462 | ||
3463 | irq_set = g_malloc0(argsz); | |
3464 | irq_set->argsz = argsz; | |
3465 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
3466 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
3467 | irq_set->index = VFIO_PCI_ERR_IRQ_INDEX; | |
3468 | irq_set->start = 0; | |
3469 | irq_set->count = 1; | |
3470 | pfd = (int32_t *)&irq_set->data; | |
3471 | *pfd = -1; | |
3472 | ||
3473 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
3474 | if (ret) { | |
8fbf47c3 | 3475 | error_report("vfio: Failed to de-assign error fd: %m"); |
7b4b0e9e VMP |
3476 | } |
3477 | g_free(irq_set); | |
3478 | qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier), | |
3479 | NULL, NULL, vdev); | |
3480 | event_notifier_cleanup(&vdev->err_notifier); | |
3481 | } | |
3482 | ||
65501a74 AW |
3483 | static int vfio_initfn(PCIDevice *pdev) |
3484 | { | |
3485 | VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
3486 | VFIOGroup *group; | |
3487 | char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name; | |
3488 | ssize_t len; | |
3489 | struct stat st; | |
3490 | int groupid; | |
3491 | int ret; | |
3492 | ||
3493 | /* Check that the host device exists */ | |
3494 | snprintf(path, sizeof(path), | |
3495 | "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/", | |
3496 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
3497 | vdev->host.function); | |
3498 | if (stat(path, &st) < 0) { | |
312fd5f2 | 3499 | error_report("vfio: error: no such host device: %s", path); |
65501a74 AW |
3500 | return -errno; |
3501 | } | |
3502 | ||
3503 | strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1); | |
3504 | ||
3505 | len = readlink(path, iommu_group_path, PATH_MAX); | |
3506 | if (len <= 0) { | |
312fd5f2 | 3507 | error_report("vfio: error no iommu_group for device"); |
65501a74 AW |
3508 | return -errno; |
3509 | } | |
3510 | ||
3511 | iommu_group_path[len] = 0; | |
3512 | group_name = basename(iommu_group_path); | |
3513 | ||
3514 | if (sscanf(group_name, "%d", &groupid) != 1) { | |
312fd5f2 | 3515 | error_report("vfio: error reading %s: %m", path); |
65501a74 AW |
3516 | return -errno; |
3517 | } | |
3518 | ||
3519 | DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain, | |
3520 | vdev->host.bus, vdev->host.slot, vdev->host.function, groupid); | |
3521 | ||
3522 | group = vfio_get_group(groupid); | |
3523 | if (!group) { | |
312fd5f2 | 3524 | error_report("vfio: failed to get group %d", groupid); |
65501a74 AW |
3525 | return -ENOENT; |
3526 | } | |
3527 | ||
3528 | snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x", | |
3529 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
3530 | vdev->host.function); | |
3531 | ||
3532 | QLIST_FOREACH(pvdev, &group->device_list, next) { | |
3533 | if (pvdev->host.domain == vdev->host.domain && | |
3534 | pvdev->host.bus == vdev->host.bus && | |
3535 | pvdev->host.slot == vdev->host.slot && | |
3536 | pvdev->host.function == vdev->host.function) { | |
3537 | ||
312fd5f2 | 3538 | error_report("vfio: error: device %s is already attached", path); |
65501a74 AW |
3539 | vfio_put_group(group); |
3540 | return -EBUSY; | |
3541 | } | |
3542 | } | |
3543 | ||
3544 | ret = vfio_get_device(group, path, vdev); | |
3545 | if (ret) { | |
312fd5f2 | 3546 | error_report("vfio: failed to get device %s", path); |
65501a74 AW |
3547 | vfio_put_group(group); |
3548 | return ret; | |
3549 | } | |
3550 | ||
3551 | /* Get a copy of config space */ | |
3552 | ret = pread(vdev->fd, vdev->pdev.config, | |
3553 | MIN(pci_config_size(&vdev->pdev), vdev->config_size), | |
3554 | vdev->config_offset); | |
3555 | if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { | |
3556 | ret = ret < 0 ? -errno : -EFAULT; | |
312fd5f2 | 3557 | error_report("vfio: Failed to read device config space"); |
65501a74 AW |
3558 | goto out_put; |
3559 | } | |
3560 | ||
4b5d5e87 AW |
3561 | /* vfio emulates a lot for us, but some bits need extra love */ |
3562 | vdev->emulated_config_bits = g_malloc0(vdev->config_size); | |
3563 | ||
3564 | /* QEMU can choose to expose the ROM or not */ | |
3565 | memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4); | |
3566 | ||
3567 | /* QEMU can change multi-function devices to single function, or reverse */ | |
3568 | vdev->emulated_config_bits[PCI_HEADER_TYPE] = | |
3569 | PCI_HEADER_TYPE_MULTI_FUNCTION; | |
3570 | ||
8d07d6c4 AW |
3571 | /* Restore or clear multifunction, this is always controlled by QEMU */ |
3572 | if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { | |
3573 | vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; | |
3574 | } else { | |
3575 | vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION; | |
3576 | } | |
3577 | ||
65501a74 AW |
3578 | /* |
3579 | * Clear host resource mapping info. If we choose not to register a | |
3580 | * BAR, such as might be the case with the option ROM, we can get | |
3581 | * confusing, unwritable, residual addresses from the host here. | |
3582 | */ | |
3583 | memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); | |
3584 | memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); | |
3585 | ||
6f864e6e | 3586 | vfio_pci_size_rom(vdev); |
65501a74 AW |
3587 | |
3588 | ret = vfio_early_setup_msix(vdev); | |
3589 | if (ret) { | |
3590 | goto out_put; | |
3591 | } | |
3592 | ||
3593 | vfio_map_bars(vdev); | |
3594 | ||
3595 | ret = vfio_add_capabilities(vdev); | |
3596 | if (ret) { | |
3597 | goto out_teardown; | |
3598 | } | |
3599 | ||
4b5d5e87 AW |
3600 | /* QEMU emulates all of MSI & MSIX */ |
3601 | if (pdev->cap_present & QEMU_PCI_CAP_MSIX) { | |
3602 | memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff, | |
3603 | MSIX_CAP_LENGTH); | |
3604 | } | |
3605 | ||
3606 | if (pdev->cap_present & QEMU_PCI_CAP_MSI) { | |
3607 | memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff, | |
3608 | vdev->msi_cap_size); | |
3609 | } | |
3610 | ||
65501a74 | 3611 | if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { |
bc72ad67 | 3612 | vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, |
ea486926 | 3613 | vfio_intx_mmap_enable, vdev); |
e1d1e586 | 3614 | pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq); |
65501a74 AW |
3615 | ret = vfio_enable_intx(vdev); |
3616 | if (ret) { | |
3617 | goto out_teardown; | |
3618 | } | |
3619 | } | |
3620 | ||
c29029dd | 3621 | add_boot_device_path(vdev->bootindex, &pdev->qdev, NULL); |
7b4b0e9e | 3622 | vfio_register_err_notifier(vdev); |
c29029dd | 3623 | |
65501a74 AW |
3624 | return 0; |
3625 | ||
3626 | out_teardown: | |
3627 | pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); | |
3628 | vfio_teardown_msi(vdev); | |
3629 | vfio_unmap_bars(vdev); | |
3630 | out_put: | |
4b5d5e87 | 3631 | g_free(vdev->emulated_config_bits); |
65501a74 AW |
3632 | vfio_put_device(vdev); |
3633 | vfio_put_group(group); | |
3634 | return ret; | |
3635 | } | |
3636 | ||
3637 | static void vfio_exitfn(PCIDevice *pdev) | |
3638 | { | |
3639 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
3640 | VFIOGroup *group = vdev->group; | |
3641 | ||
7b4b0e9e | 3642 | vfio_unregister_err_notifier(vdev); |
65501a74 AW |
3643 | pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); |
3644 | vfio_disable_interrupts(vdev); | |
ea486926 | 3645 | if (vdev->intx.mmap_timer) { |
bc72ad67 | 3646 | timer_free(vdev->intx.mmap_timer); |
ea486926 | 3647 | } |
65501a74 AW |
3648 | vfio_teardown_msi(vdev); |
3649 | vfio_unmap_bars(vdev); | |
4b5d5e87 | 3650 | g_free(vdev->emulated_config_bits); |
6f864e6e | 3651 | g_free(vdev->rom); |
65501a74 AW |
3652 | vfio_put_device(vdev); |
3653 | vfio_put_group(group); | |
3654 | } | |
3655 | ||
3656 | static void vfio_pci_reset(DeviceState *dev) | |
3657 | { | |
3658 | PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev); | |
3659 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
3660 | ||
5834a83f AW |
3661 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, |
3662 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
3663 | ||
f16f39c3 | 3664 | vfio_pci_pre_reset(vdev); |
ba661818 | 3665 | |
f16f39c3 AW |
3666 | if (vdev->reset_works && (vdev->has_flr || !vdev->has_pm_reset) && |
3667 | !ioctl(vdev->fd, VFIO_DEVICE_RESET)) { | |
3668 | DPRINTF("%04x:%02x:%02x.%x FLR/VFIO_DEVICE_RESET\n", vdev->host.domain, | |
3669 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
3670 | goto post_reset; | |
ba661818 AW |
3671 | } |
3672 | ||
f16f39c3 AW |
3673 | /* See if we can do our own bus reset */ |
3674 | if (!vfio_pci_hot_reset_one(vdev)) { | |
3675 | goto post_reset; | |
3676 | } | |
5834a83f | 3677 | |
f16f39c3 AW |
3678 | /* If nothing else works and the device supports PM reset, use it */ |
3679 | if (vdev->reset_works && vdev->has_pm_reset && | |
3680 | !ioctl(vdev->fd, VFIO_DEVICE_RESET)) { | |
3681 | DPRINTF("%04x:%02x:%02x.%x PCI PM Reset\n", vdev->host.domain, | |
3682 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
3683 | goto post_reset; | |
65501a74 | 3684 | } |
5834a83f | 3685 | |
f16f39c3 AW |
3686 | post_reset: |
3687 | vfio_pci_post_reset(vdev); | |
65501a74 AW |
3688 | } |
3689 | ||
3690 | static Property vfio_pci_dev_properties[] = { | |
3691 | DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice, host), | |
ea486926 AW |
3692 | DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice, |
3693 | intx.mmap_timeout, 1100), | |
f15689c7 AW |
3694 | DEFINE_PROP_BIT("x-vga", VFIODevice, features, |
3695 | VFIO_FEATURE_ENABLE_VGA_BIT, false), | |
c29029dd | 3696 | DEFINE_PROP_INT32("bootindex", VFIODevice, bootindex, -1), |
65501a74 AW |
3697 | /* |
3698 | * TODO - support passed fds... is this necessary? | |
3699 | * DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name), | |
3700 | * DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name), | |
3701 | */ | |
3702 | DEFINE_PROP_END_OF_LIST(), | |
3703 | }; | |
3704 | ||
d9f0e638 AW |
3705 | static const VMStateDescription vfio_pci_vmstate = { |
3706 | .name = "vfio-pci", | |
3707 | .unmigratable = 1, | |
3708 | }; | |
65501a74 AW |
3709 | |
3710 | static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) | |
3711 | { | |
3712 | DeviceClass *dc = DEVICE_CLASS(klass); | |
3713 | PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); | |
3714 | ||
3715 | dc->reset = vfio_pci_reset; | |
3716 | dc->props = vfio_pci_dev_properties; | |
d9f0e638 AW |
3717 | dc->vmsd = &vfio_pci_vmstate; |
3718 | dc->desc = "VFIO-based PCI device assignment"; | |
125ee0ed | 3719 | set_bit(DEVICE_CATEGORY_MISC, dc->categories); |
65501a74 AW |
3720 | pdc->init = vfio_initfn; |
3721 | pdc->exit = vfio_exitfn; | |
3722 | pdc->config_read = vfio_pci_read_config; | |
3723 | pdc->config_write = vfio_pci_write_config; | |
6a659bbf | 3724 | pdc->is_express = 1; /* We might be */ |
65501a74 AW |
3725 | } |
3726 | ||
3727 | static const TypeInfo vfio_pci_dev_info = { | |
3728 | .name = "vfio-pci", | |
3729 | .parent = TYPE_PCI_DEVICE, | |
3730 | .instance_size = sizeof(VFIODevice), | |
3731 | .class_init = vfio_pci_dev_class_init, | |
3732 | }; | |
3733 | ||
3734 | static void register_vfio_pci_dev_type(void) | |
3735 | { | |
3736 | type_register_static(&vfio_pci_dev_info); | |
3737 | } | |
3738 | ||
3739 | type_init(register_vfio_pci_dev_type) |