]>
Commit | Line | Data |
---|---|---|
65501a74 AW |
1 | /* |
2 | * vfio based device assignment support | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Alex Williamson <alex.williamson@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Based on qemu-kvm device-assignment: | |
13 | * Adapted for KVM by Qumranet. | |
14 | * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) | |
15 | * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) | |
16 | * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) | |
17 | * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) | |
18 | * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) | |
19 | */ | |
20 | ||
21 | #include <dirent.h> | |
6dcfdbad | 22 | #include <linux/vfio.h> |
65501a74 AW |
23 | #include <sys/ioctl.h> |
24 | #include <sys/mman.h> | |
25 | #include <sys/stat.h> | |
26 | #include <sys/types.h> | |
6dcfdbad | 27 | #include <unistd.h> |
65501a74 AW |
28 | |
29 | #include "config.h" | |
022c62cb | 30 | #include "exec/address-spaces.h" |
022c62cb | 31 | #include "exec/memory.h" |
83c9f4ca PB |
32 | #include "hw/pci/msi.h" |
33 | #include "hw/pci/msix.h" | |
34 | #include "hw/pci/pci.h" | |
5c97e5eb | 35 | #include "qemu-common.h" |
1de7afc9 | 36 | #include "qemu/error-report.h" |
6dcfdbad | 37 | #include "qemu/event_notifier.h" |
1de7afc9 PB |
38 | #include "qemu/queue.h" |
39 | #include "qemu/range.h" | |
6dcfdbad AW |
40 | #include "sysemu/kvm.h" |
41 | #include "sysemu/sysemu.h" | |
6d8be4c3 | 42 | #include "hw/misc/vfio.h" |
65501a74 AW |
43 | |
44 | /* #define DEBUG_VFIO */ | |
45 | #ifdef DEBUG_VFIO | |
46 | #define DPRINTF(fmt, ...) \ | |
47 | do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0) | |
48 | #else | |
49 | #define DPRINTF(fmt, ...) \ | |
50 | do { } while (0) | |
51 | #endif | |
52 | ||
82ca8912 AW |
53 | /* Extra debugging, trap acceleration paths for more logging */ |
54 | #define VFIO_ALLOW_MMAP 1 | |
55 | #define VFIO_ALLOW_KVM_INTX 1 | |
b3ebc10c AW |
56 | #define VFIO_ALLOW_KVM_MSI 1 |
57 | #define VFIO_ALLOW_KVM_MSIX 1 | |
82ca8912 | 58 | |
7076eabc AW |
59 | struct VFIODevice; |
60 | ||
61 | typedef struct VFIOQuirk { | |
62 | MemoryRegion mem; | |
63 | struct VFIODevice *vdev; | |
64 | QLIST_ENTRY(VFIOQuirk) next; | |
39360f0b AW |
65 | struct { |
66 | uint32_t base_offset:TARGET_PAGE_BITS; | |
67 | uint32_t address_offset:TARGET_PAGE_BITS; | |
68 | uint32_t address_size:3; | |
69 | uint32_t bar:3; | |
70 | ||
71 | uint32_t address_match; | |
72 | uint32_t address_mask; | |
73 | ||
74 | uint32_t address_val:TARGET_PAGE_BITS; | |
75 | uint32_t data_offset:TARGET_PAGE_BITS; | |
76 | uint32_t data_size:3; | |
77 | ||
78 | uint8_t flags; | |
79 | uint8_t read_flags; | |
80 | uint8_t write_flags; | |
81 | } data; | |
7076eabc AW |
82 | } VFIOQuirk; |
83 | ||
5c97e5eb AW |
84 | typedef struct VFIOBAR { |
85 | off_t fd_offset; /* offset of BAR within device fd */ | |
86 | int fd; /* device fd, allows us to pass VFIOBAR as opaque data */ | |
87 | MemoryRegion mem; /* slow, read/write access */ | |
88 | MemoryRegion mmap_mem; /* direct mapped access */ | |
89 | void *mmap; | |
90 | size_t size; | |
91 | uint32_t flags; /* VFIO region flags (rd/wr/mmap) */ | |
92 | uint8_t nr; /* cache the BAR number for debug */ | |
39360f0b AW |
93 | bool ioport; |
94 | bool mem64; | |
7076eabc | 95 | QLIST_HEAD(, VFIOQuirk) quirks; |
5c97e5eb AW |
96 | } VFIOBAR; |
97 | ||
f15689c7 AW |
98 | typedef struct VFIOVGARegion { |
99 | MemoryRegion mem; | |
100 | off_t offset; | |
101 | int nr; | |
7076eabc | 102 | QLIST_HEAD(, VFIOQuirk) quirks; |
f15689c7 AW |
103 | } VFIOVGARegion; |
104 | ||
105 | typedef struct VFIOVGA { | |
106 | off_t fd_offset; | |
107 | int fd; | |
108 | VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS]; | |
109 | } VFIOVGA; | |
110 | ||
5c97e5eb AW |
111 | typedef struct VFIOINTx { |
112 | bool pending; /* interrupt pending */ | |
113 | bool kvm_accel; /* set when QEMU bypass through KVM enabled */ | |
114 | uint8_t pin; /* which pin to pull for qemu_set_irq */ | |
115 | EventNotifier interrupt; /* eventfd triggered on interrupt */ | |
116 | EventNotifier unmask; /* eventfd for unmask on QEMU bypass */ | |
117 | PCIINTxRoute route; /* routing info for QEMU bypass */ | |
118 | uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */ | |
119 | QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */ | |
120 | } VFIOINTx; | |
121 | ||
5c97e5eb | 122 | typedef struct VFIOMSIVector { |
c048be5c AW |
123 | /* |
124 | * Two interrupt paths are configured per vector. The first, is only used | |
125 | * for interrupts injected via QEMU. This is typically the non-accel path, | |
126 | * but may also be used when we want QEMU to handle masking and pending | |
127 | * bits. The KVM path bypasses QEMU and is therefore higher performance, | |
128 | * but requires masking at the device. virq is used to track the MSI route | |
129 | * through KVM, thus kvm_interrupt is only available when virq is set to a | |
130 | * valid (>= 0) value. | |
131 | */ | |
132 | EventNotifier interrupt; | |
133 | EventNotifier kvm_interrupt; | |
5c97e5eb | 134 | struct VFIODevice *vdev; /* back pointer to device */ |
c048be5c | 135 | int virq; |
5c97e5eb AW |
136 | bool use; |
137 | } VFIOMSIVector; | |
138 | ||
139 | enum { | |
140 | VFIO_INT_NONE = 0, | |
141 | VFIO_INT_INTx = 1, | |
142 | VFIO_INT_MSI = 2, | |
143 | VFIO_INT_MSIX = 3, | |
144 | }; | |
145 | ||
3df3e0a5 DG |
146 | typedef struct VFIOAddressSpace { |
147 | AddressSpace *as; | |
148 | QLIST_HEAD(, VFIOContainer) containers; | |
149 | QLIST_ENTRY(VFIOAddressSpace) list; | |
150 | } VFIOAddressSpace; | |
151 | ||
0688448b DG |
152 | static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = |
153 | QLIST_HEAD_INITIALIZER(vfio_address_spaces); | |
3df3e0a5 | 154 | |
5c97e5eb AW |
155 | struct VFIOGroup; |
156 | ||
87ca1f77 AW |
157 | typedef struct VFIOType1 { |
158 | MemoryListener listener; | |
159 | int error; | |
160 | bool initialized; | |
161 | } VFIOType1; | |
162 | ||
5c97e5eb | 163 | typedef struct VFIOContainer { |
3df3e0a5 | 164 | VFIOAddressSpace *space; |
5c97e5eb AW |
165 | int fd; /* /dev/vfio/vfio, empowered by the attached groups */ |
166 | struct { | |
167 | /* enable abstraction to support various iommu backends */ | |
168 | union { | |
87ca1f77 | 169 | VFIOType1 type1; |
5c97e5eb AW |
170 | }; |
171 | void (*release)(struct VFIOContainer *); | |
172 | } iommu_data; | |
5e70018b | 173 | QLIST_HEAD(, VFIOGuestIOMMU) giommu_list; |
5c97e5eb AW |
174 | QLIST_HEAD(, VFIOGroup) group_list; |
175 | QLIST_ENTRY(VFIOContainer) next; | |
176 | } VFIOContainer; | |
177 | ||
5e70018b DG |
178 | typedef struct VFIOGuestIOMMU { |
179 | VFIOContainer *container; | |
180 | MemoryRegion *iommu; | |
181 | Notifier n; | |
182 | QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; | |
183 | } VFIOGuestIOMMU; | |
184 | ||
5c97e5eb AW |
185 | /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */ |
186 | typedef struct VFIOMSIXInfo { | |
187 | uint8_t table_bar; | |
188 | uint8_t pba_bar; | |
189 | uint16_t entries; | |
190 | uint32_t table_offset; | |
191 | uint32_t pba_offset; | |
192 | MemoryRegion mmap_mem; | |
193 | void *mmap; | |
194 | } VFIOMSIXInfo; | |
195 | ||
196 | typedef struct VFIODevice { | |
197 | PCIDevice pdev; | |
198 | int fd; | |
199 | VFIOINTx intx; | |
200 | unsigned int config_size; | |
4b5d5e87 | 201 | uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */ |
5c97e5eb AW |
202 | off_t config_offset; /* Offset of config space region within device fd */ |
203 | unsigned int rom_size; | |
204 | off_t rom_offset; /* Offset of ROM region within device fd */ | |
6f864e6e | 205 | void *rom; |
5c97e5eb AW |
206 | int msi_cap_size; |
207 | VFIOMSIVector *msi_vectors; | |
208 | VFIOMSIXInfo *msix; | |
209 | int nr_vectors; /* Number of MSI/MSIX vectors currently in use */ | |
210 | int interrupt; /* Current interrupt type */ | |
211 | VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */ | |
f15689c7 | 212 | VFIOVGA vga; /* 0xa0000, 0x3b0, 0x3c0 */ |
5c97e5eb AW |
213 | PCIHostDeviceAddress host; |
214 | QLIST_ENTRY(VFIODevice) next; | |
215 | struct VFIOGroup *group; | |
7b4b0e9e | 216 | EventNotifier err_notifier; |
f15689c7 AW |
217 | uint32_t features; |
218 | #define VFIO_FEATURE_ENABLE_VGA_BIT 0 | |
219 | #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT) | |
c29029dd | 220 | int32_t bootindex; |
ba661818 | 221 | uint8_t pm_cap; |
5c97e5eb | 222 | bool reset_works; |
f15689c7 | 223 | bool has_vga; |
7b4b0e9e | 224 | bool pci_aer; |
befe5176 AW |
225 | bool has_flr; |
226 | bool has_pm_reset; | |
f16f39c3 | 227 | bool needs_reset; |
e638073c | 228 | bool rom_read_failed; |
5c97e5eb AW |
229 | } VFIODevice; |
230 | ||
231 | typedef struct VFIOGroup { | |
232 | int fd; | |
233 | int groupid; | |
234 | VFIOContainer *container; | |
235 | QLIST_HEAD(, VFIODevice) device_list; | |
236 | QLIST_ENTRY(VFIOGroup) next; | |
237 | QLIST_ENTRY(VFIOGroup) container_next; | |
238 | } VFIOGroup; | |
239 | ||
4b943029 BD |
240 | typedef struct VFIORomBlacklistEntry { |
241 | uint16_t vendor_id; | |
242 | uint16_t device_id; | |
243 | } VFIORomBlacklistEntry; | |
244 | ||
245 | /* | |
246 | * List of device ids/vendor ids for which to disable | |
247 | * option rom loading. This avoids the guest hangs during rom | |
248 | * execution as noticed with the BCM 57810 card for lack of a | |
249 | * more better way to handle such issues. | |
250 | * The user can still override by specifying a romfile or | |
251 | * rombar=1. | |
252 | * Please see https://bugs.launchpad.net/qemu/+bug/1284874 | |
253 | * for an analysis of the 57810 card hang. When adding | |
254 | * a new vendor id/device id combination below, please also add | |
255 | * your card/environment details and information that could | |
256 | * help in debugging to the bug tracking this issue | |
257 | */ | |
258 | static const VFIORomBlacklistEntry romblacklist[] = { | |
259 | /* Broadcom BCM 57810 */ | |
260 | { 0x14e4, 0x168e } | |
261 | }; | |
262 | ||
65501a74 AW |
263 | #define MSIX_CAP_LENGTH 12 |
264 | ||
65501a74 AW |
265 | static QLIST_HEAD(, VFIOGroup) |
266 | group_list = QLIST_HEAD_INITIALIZER(group_list); | |
267 | ||
5b49ab18 AW |
268 | #ifdef CONFIG_KVM |
269 | /* | |
270 | * We have a single VFIO pseudo device per KVM VM. Once created it lives | |
271 | * for the life of the VM. Closing the file descriptor only drops our | |
272 | * reference to it and the device's reference to kvm. Therefore once | |
273 | * initialized, this file descriptor is only released on QEMU exit and | |
274 | * we'll re-use it should another vfio device be attached before then. | |
275 | */ | |
276 | static int vfio_kvm_device_fd = -1; | |
277 | #endif | |
278 | ||
65501a74 AW |
279 | static void vfio_disable_interrupts(VFIODevice *vdev); |
280 | static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len); | |
7076eabc AW |
281 | static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, |
282 | uint32_t val, int len); | |
65501a74 AW |
283 | static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled); |
284 | ||
285 | /* | |
286 | * Common VFIO interrupt disable | |
287 | */ | |
288 | static void vfio_disable_irqindex(VFIODevice *vdev, int index) | |
289 | { | |
290 | struct vfio_irq_set irq_set = { | |
291 | .argsz = sizeof(irq_set), | |
292 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, | |
293 | .index = index, | |
294 | .start = 0, | |
295 | .count = 0, | |
296 | }; | |
297 | ||
298 | ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
65501a74 AW |
299 | } |
300 | ||
301 | /* | |
302 | * INTx | |
303 | */ | |
304 | static void vfio_unmask_intx(VFIODevice *vdev) | |
305 | { | |
306 | struct vfio_irq_set irq_set = { | |
307 | .argsz = sizeof(irq_set), | |
308 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, | |
309 | .index = VFIO_PCI_INTX_IRQ_INDEX, | |
310 | .start = 0, | |
311 | .count = 1, | |
312 | }; | |
313 | ||
314 | ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
315 | } | |
316 | ||
e1d1e586 AW |
317 | #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */ |
318 | static void vfio_mask_intx(VFIODevice *vdev) | |
319 | { | |
320 | struct vfio_irq_set irq_set = { | |
321 | .argsz = sizeof(irq_set), | |
322 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, | |
323 | .index = VFIO_PCI_INTX_IRQ_INDEX, | |
324 | .start = 0, | |
325 | .count = 1, | |
326 | }; | |
327 | ||
328 | ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
329 | } | |
330 | #endif | |
331 | ||
ea486926 AW |
332 | /* |
333 | * Disabling BAR mmaping can be slow, but toggling it around INTx can | |
334 | * also be a huge overhead. We try to get the best of both worlds by | |
335 | * waiting until an interrupt to disable mmaps (subsequent transitions | |
336 | * to the same state are effectively no overhead). If the interrupt has | |
337 | * been serviced and the time gap is long enough, we re-enable mmaps for | |
338 | * performance. This works well for things like graphics cards, which | |
339 | * may not use their interrupt at all and are penalized to an unusable | |
340 | * level by read/write BAR traps. Other devices, like NICs, have more | |
341 | * regular interrupts and see much better latency by staying in non-mmap | |
342 | * mode. We therefore set the default mmap_timeout such that a ping | |
343 | * is just enough to keep the mmap disabled. Users can experiment with | |
344 | * other options with the x-intx-mmap-timeout-ms parameter (a value of | |
345 | * zero disables the timer). | |
346 | */ | |
347 | static void vfio_intx_mmap_enable(void *opaque) | |
348 | { | |
349 | VFIODevice *vdev = opaque; | |
350 | ||
351 | if (vdev->intx.pending) { | |
bc72ad67 AB |
352 | timer_mod(vdev->intx.mmap_timer, |
353 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); | |
ea486926 AW |
354 | return; |
355 | } | |
356 | ||
357 | vfio_mmap_set_enabled(vdev, true); | |
358 | } | |
359 | ||
65501a74 AW |
360 | static void vfio_intx_interrupt(void *opaque) |
361 | { | |
362 | VFIODevice *vdev = opaque; | |
363 | ||
364 | if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) { | |
365 | return; | |
366 | } | |
367 | ||
368 | DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__, vdev->host.domain, | |
369 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
370 | 'A' + vdev->intx.pin); | |
371 | ||
372 | vdev->intx.pending = true; | |
68919cac | 373 | pci_irq_assert(&vdev->pdev); |
ea486926 AW |
374 | vfio_mmap_set_enabled(vdev, false); |
375 | if (vdev->intx.mmap_timeout) { | |
bc72ad67 AB |
376 | timer_mod(vdev->intx.mmap_timer, |
377 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); | |
ea486926 | 378 | } |
65501a74 AW |
379 | } |
380 | ||
381 | static void vfio_eoi(VFIODevice *vdev) | |
382 | { | |
383 | if (!vdev->intx.pending) { | |
384 | return; | |
385 | } | |
386 | ||
387 | DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__, vdev->host.domain, | |
388 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
389 | ||
390 | vdev->intx.pending = false; | |
68919cac | 391 | pci_irq_deassert(&vdev->pdev); |
65501a74 AW |
392 | vfio_unmask_intx(vdev); |
393 | } | |
394 | ||
e1d1e586 AW |
395 | static void vfio_enable_intx_kvm(VFIODevice *vdev) |
396 | { | |
397 | #ifdef CONFIG_KVM | |
398 | struct kvm_irqfd irqfd = { | |
399 | .fd = event_notifier_get_fd(&vdev->intx.interrupt), | |
400 | .gsi = vdev->intx.route.irq, | |
401 | .flags = KVM_IRQFD_FLAG_RESAMPLE, | |
402 | }; | |
403 | struct vfio_irq_set *irq_set; | |
404 | int ret, argsz; | |
405 | int32_t *pfd; | |
406 | ||
82ca8912 | 407 | if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() || |
e1d1e586 | 408 | vdev->intx.route.mode != PCI_INTX_ENABLED || |
9fc0e2d8 | 409 | !kvm_resamplefds_enabled()) { |
e1d1e586 AW |
410 | return; |
411 | } | |
412 | ||
413 | /* Get to a known interrupt state */ | |
414 | qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev); | |
415 | vfio_mask_intx(vdev); | |
416 | vdev->intx.pending = false; | |
68919cac | 417 | pci_irq_deassert(&vdev->pdev); |
e1d1e586 AW |
418 | |
419 | /* Get an eventfd for resample/unmask */ | |
420 | if (event_notifier_init(&vdev->intx.unmask, 0)) { | |
312fd5f2 | 421 | error_report("vfio: Error: event_notifier_init failed eoi"); |
e1d1e586 AW |
422 | goto fail; |
423 | } | |
424 | ||
425 | /* KVM triggers it, VFIO listens for it */ | |
426 | irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask); | |
427 | ||
428 | if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) { | |
312fd5f2 | 429 | error_report("vfio: Error: Failed to setup resample irqfd: %m"); |
e1d1e586 AW |
430 | goto fail_irqfd; |
431 | } | |
432 | ||
433 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
434 | ||
435 | irq_set = g_malloc0(argsz); | |
436 | irq_set->argsz = argsz; | |
437 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; | |
438 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
439 | irq_set->start = 0; | |
440 | irq_set->count = 1; | |
441 | pfd = (int32_t *)&irq_set->data; | |
442 | ||
443 | *pfd = irqfd.resamplefd; | |
444 | ||
445 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
446 | g_free(irq_set); | |
447 | if (ret) { | |
312fd5f2 | 448 | error_report("vfio: Error: Failed to setup INTx unmask fd: %m"); |
e1d1e586 AW |
449 | goto fail_vfio; |
450 | } | |
451 | ||
452 | /* Let'em rip */ | |
453 | vfio_unmask_intx(vdev); | |
454 | ||
455 | vdev->intx.kvm_accel = true; | |
456 | ||
457 | DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n", | |
458 | __func__, vdev->host.domain, vdev->host.bus, | |
459 | vdev->host.slot, vdev->host.function); | |
460 | ||
461 | return; | |
462 | ||
463 | fail_vfio: | |
464 | irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN; | |
465 | kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd); | |
466 | fail_irqfd: | |
467 | event_notifier_cleanup(&vdev->intx.unmask); | |
468 | fail: | |
469 | qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev); | |
470 | vfio_unmask_intx(vdev); | |
471 | #endif | |
472 | } | |
473 | ||
474 | static void vfio_disable_intx_kvm(VFIODevice *vdev) | |
475 | { | |
476 | #ifdef CONFIG_KVM | |
477 | struct kvm_irqfd irqfd = { | |
478 | .fd = event_notifier_get_fd(&vdev->intx.interrupt), | |
479 | .gsi = vdev->intx.route.irq, | |
480 | .flags = KVM_IRQFD_FLAG_DEASSIGN, | |
481 | }; | |
482 | ||
483 | if (!vdev->intx.kvm_accel) { | |
484 | return; | |
485 | } | |
486 | ||
487 | /* | |
488 | * Get to a known state, hardware masked, QEMU ready to accept new | |
489 | * interrupts, QEMU IRQ de-asserted. | |
490 | */ | |
491 | vfio_mask_intx(vdev); | |
492 | vdev->intx.pending = false; | |
68919cac | 493 | pci_irq_deassert(&vdev->pdev); |
e1d1e586 AW |
494 | |
495 | /* Tell KVM to stop listening for an INTx irqfd */ | |
496 | if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) { | |
312fd5f2 | 497 | error_report("vfio: Error: Failed to disable INTx irqfd: %m"); |
e1d1e586 AW |
498 | } |
499 | ||
500 | /* We only need to close the eventfd for VFIO to cleanup the kernel side */ | |
501 | event_notifier_cleanup(&vdev->intx.unmask); | |
502 | ||
503 | /* QEMU starts listening for interrupt events. */ | |
504 | qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev); | |
505 | ||
506 | vdev->intx.kvm_accel = false; | |
507 | ||
508 | /* If we've missed an event, let it re-fire through QEMU */ | |
509 | vfio_unmask_intx(vdev); | |
510 | ||
511 | DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n", | |
512 | __func__, vdev->host.domain, vdev->host.bus, | |
513 | vdev->host.slot, vdev->host.function); | |
514 | #endif | |
515 | } | |
516 | ||
517 | static void vfio_update_irq(PCIDevice *pdev) | |
518 | { | |
519 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
520 | PCIINTxRoute route; | |
521 | ||
522 | if (vdev->interrupt != VFIO_INT_INTx) { | |
523 | return; | |
524 | } | |
525 | ||
526 | route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); | |
527 | ||
528 | if (!pci_intx_route_changed(&vdev->intx.route, &route)) { | |
529 | return; /* Nothing changed */ | |
530 | } | |
531 | ||
532 | DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__, | |
533 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
534 | vdev->host.function, vdev->intx.route.irq, route.irq); | |
535 | ||
536 | vfio_disable_intx_kvm(vdev); | |
537 | ||
538 | vdev->intx.route = route; | |
539 | ||
540 | if (route.mode != PCI_INTX_ENABLED) { | |
541 | return; | |
542 | } | |
543 | ||
544 | vfio_enable_intx_kvm(vdev); | |
545 | ||
546 | /* Re-enable the interrupt in cased we missed an EOI */ | |
547 | vfio_eoi(vdev); | |
548 | } | |
549 | ||
65501a74 AW |
550 | static int vfio_enable_intx(VFIODevice *vdev) |
551 | { | |
65501a74 | 552 | uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1); |
1a403133 AW |
553 | int ret, argsz; |
554 | struct vfio_irq_set *irq_set; | |
555 | int32_t *pfd; | |
65501a74 | 556 | |
ea486926 | 557 | if (!pin) { |
65501a74 AW |
558 | return 0; |
559 | } | |
560 | ||
561 | vfio_disable_interrupts(vdev); | |
562 | ||
563 | vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ | |
68919cac | 564 | pci_config_set_interrupt_pin(vdev->pdev.config, pin); |
e1d1e586 AW |
565 | |
566 | #ifdef CONFIG_KVM | |
567 | /* | |
568 | * Only conditional to avoid generating error messages on platforms | |
569 | * where we won't actually use the result anyway. | |
570 | */ | |
9fc0e2d8 | 571 | if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) { |
e1d1e586 AW |
572 | vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, |
573 | vdev->intx.pin); | |
574 | } | |
575 | #endif | |
576 | ||
65501a74 AW |
577 | ret = event_notifier_init(&vdev->intx.interrupt, 0); |
578 | if (ret) { | |
312fd5f2 | 579 | error_report("vfio: Error: event_notifier_init failed"); |
65501a74 AW |
580 | return ret; |
581 | } | |
582 | ||
1a403133 AW |
583 | argsz = sizeof(*irq_set) + sizeof(*pfd); |
584 | ||
585 | irq_set = g_malloc0(argsz); | |
586 | irq_set->argsz = argsz; | |
587 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
588 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
589 | irq_set->start = 0; | |
590 | irq_set->count = 1; | |
591 | pfd = (int32_t *)&irq_set->data; | |
592 | ||
593 | *pfd = event_notifier_get_fd(&vdev->intx.interrupt); | |
594 | qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev); | |
65501a74 | 595 | |
1a403133 AW |
596 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
597 | g_free(irq_set); | |
598 | if (ret) { | |
312fd5f2 | 599 | error_report("vfio: Error: Failed to setup INTx fd: %m"); |
1a403133 | 600 | qemu_set_fd_handler(*pfd, NULL, NULL, vdev); |
ce59af2d | 601 | event_notifier_cleanup(&vdev->intx.interrupt); |
65501a74 AW |
602 | return -errno; |
603 | } | |
604 | ||
e1d1e586 AW |
605 | vfio_enable_intx_kvm(vdev); |
606 | ||
65501a74 AW |
607 | vdev->interrupt = VFIO_INT_INTx; |
608 | ||
609 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, | |
610 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
611 | ||
612 | return 0; | |
613 | } | |
614 | ||
615 | static void vfio_disable_intx(VFIODevice *vdev) | |
616 | { | |
617 | int fd; | |
618 | ||
bc72ad67 | 619 | timer_del(vdev->intx.mmap_timer); |
e1d1e586 | 620 | vfio_disable_intx_kvm(vdev); |
65501a74 AW |
621 | vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX); |
622 | vdev->intx.pending = false; | |
68919cac | 623 | pci_irq_deassert(&vdev->pdev); |
65501a74 AW |
624 | vfio_mmap_set_enabled(vdev, true); |
625 | ||
626 | fd = event_notifier_get_fd(&vdev->intx.interrupt); | |
627 | qemu_set_fd_handler(fd, NULL, NULL, vdev); | |
628 | event_notifier_cleanup(&vdev->intx.interrupt); | |
629 | ||
630 | vdev->interrupt = VFIO_INT_NONE; | |
631 | ||
632 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, | |
633 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
634 | } | |
635 | ||
636 | /* | |
637 | * MSI/X | |
638 | */ | |
639 | static void vfio_msi_interrupt(void *opaque) | |
640 | { | |
641 | VFIOMSIVector *vector = opaque; | |
642 | VFIODevice *vdev = vector->vdev; | |
643 | int nr = vector - vdev->msi_vectors; | |
644 | ||
645 | if (!event_notifier_test_and_clear(&vector->interrupt)) { | |
646 | return; | |
647 | } | |
648 | ||
8b6d1408 | 649 | #ifdef DEBUG_VFIO |
b3ebc10c AW |
650 | MSIMessage msg; |
651 | ||
652 | if (vdev->interrupt == VFIO_INT_MSIX) { | |
b3ebc10c | 653 | msg = msix_get_message(&vdev->pdev, nr); |
9035f8c0 AW |
654 | } else if (vdev->interrupt == VFIO_INT_MSI) { |
655 | msg = msi_get_message(&vdev->pdev, nr); | |
b3ebc10c AW |
656 | } else { |
657 | abort(); | |
658 | } | |
659 | ||
660 | DPRINTF("%s(%04x:%02x:%02x.%x) vector %d 0x%"PRIx64"/0x%x\n", __func__, | |
65501a74 | 661 | vdev->host.domain, vdev->host.bus, vdev->host.slot, |
b3ebc10c AW |
662 | vdev->host.function, nr, msg.address, msg.data); |
663 | #endif | |
65501a74 AW |
664 | |
665 | if (vdev->interrupt == VFIO_INT_MSIX) { | |
666 | msix_notify(&vdev->pdev, nr); | |
667 | } else if (vdev->interrupt == VFIO_INT_MSI) { | |
668 | msi_notify(&vdev->pdev, nr); | |
669 | } else { | |
312fd5f2 | 670 | error_report("vfio: MSI interrupt receieved, but not enabled?"); |
65501a74 AW |
671 | } |
672 | } | |
673 | ||
674 | static int vfio_enable_vectors(VFIODevice *vdev, bool msix) | |
675 | { | |
676 | struct vfio_irq_set *irq_set; | |
677 | int ret = 0, i, argsz; | |
678 | int32_t *fds; | |
679 | ||
680 | argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); | |
681 | ||
682 | irq_set = g_malloc0(argsz); | |
683 | irq_set->argsz = argsz; | |
684 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
685 | irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; | |
686 | irq_set->start = 0; | |
687 | irq_set->count = vdev->nr_vectors; | |
688 | fds = (int32_t *)&irq_set->data; | |
689 | ||
690 | for (i = 0; i < vdev->nr_vectors; i++) { | |
c048be5c AW |
691 | int fd = -1; |
692 | ||
693 | /* | |
694 | * MSI vs MSI-X - The guest has direct access to MSI mask and pending | |
695 | * bits, therefore we always use the KVM signaling path when setup. | |
696 | * MSI-X mask and pending bits are emulated, so we want to use the | |
697 | * KVM signaling path only when configured and unmasked. | |
698 | */ | |
699 | if (vdev->msi_vectors[i].use) { | |
700 | if (vdev->msi_vectors[i].virq < 0 || | |
701 | (msix && msix_is_masked(&vdev->pdev, i))) { | |
702 | fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); | |
703 | } else { | |
704 | fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt); | |
705 | } | |
65501a74 | 706 | } |
c048be5c AW |
707 | |
708 | fds[i] = fd; | |
65501a74 AW |
709 | } |
710 | ||
711 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
712 | ||
713 | g_free(irq_set); | |
714 | ||
65501a74 AW |
715 | return ret; |
716 | } | |
717 | ||
f4d45d47 AW |
718 | static void vfio_add_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage *msg, |
719 | bool msix) | |
720 | { | |
721 | int virq; | |
722 | ||
723 | if ((msix && !VFIO_ALLOW_KVM_MSIX) || | |
724 | (!msix && !VFIO_ALLOW_KVM_MSI) || !msg) { | |
725 | return; | |
726 | } | |
727 | ||
728 | if (event_notifier_init(&vector->kvm_interrupt, 0)) { | |
729 | return; | |
730 | } | |
731 | ||
732 | virq = kvm_irqchip_add_msi_route(kvm_state, *msg); | |
733 | if (virq < 0) { | |
734 | event_notifier_cleanup(&vector->kvm_interrupt); | |
735 | return; | |
736 | } | |
737 | ||
738 | if (kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->kvm_interrupt, | |
739 | NULL, virq) < 0) { | |
740 | kvm_irqchip_release_virq(kvm_state, virq); | |
741 | event_notifier_cleanup(&vector->kvm_interrupt); | |
742 | return; | |
743 | } | |
744 | ||
f4d45d47 AW |
745 | vector->virq = virq; |
746 | } | |
747 | ||
748 | static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector) | |
749 | { | |
750 | kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->kvm_interrupt, | |
751 | vector->virq); | |
752 | kvm_irqchip_release_virq(kvm_state, vector->virq); | |
753 | vector->virq = -1; | |
754 | event_notifier_cleanup(&vector->kvm_interrupt); | |
755 | } | |
756 | ||
757 | static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg) | |
758 | { | |
759 | kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg); | |
f4d45d47 AW |
760 | } |
761 | ||
b0223e29 AW |
762 | static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, |
763 | MSIMessage *msg, IOHandler *handler) | |
65501a74 AW |
764 | { |
765 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
766 | VFIOMSIVector *vector; | |
767 | int ret; | |
768 | ||
769 | DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__, | |
770 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
771 | vdev->host.function, nr); | |
772 | ||
65501a74 | 773 | vector = &vdev->msi_vectors[nr]; |
65501a74 | 774 | |
f4d45d47 AW |
775 | if (!vector->use) { |
776 | vector->vdev = vdev; | |
777 | vector->virq = -1; | |
778 | if (event_notifier_init(&vector->interrupt, 0)) { | |
779 | error_report("vfio: Error: event_notifier_init failed"); | |
780 | } | |
781 | vector->use = true; | |
782 | msix_vector_use(pdev, nr); | |
65501a74 AW |
783 | } |
784 | ||
f4d45d47 AW |
785 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
786 | handler, NULL, vector); | |
787 | ||
65501a74 AW |
788 | /* |
789 | * Attempt to enable route through KVM irqchip, | |
790 | * default to userspace handling if unavailable. | |
791 | */ | |
f4d45d47 AW |
792 | if (vector->virq >= 0) { |
793 | if (!msg) { | |
794 | vfio_remove_kvm_msi_virq(vector); | |
795 | } else { | |
796 | vfio_update_kvm_msi_virq(vector, *msg); | |
65501a74 | 797 | } |
f4d45d47 AW |
798 | } else { |
799 | vfio_add_kvm_msi_virq(vector, msg, true); | |
65501a74 AW |
800 | } |
801 | ||
802 | /* | |
803 | * We don't want to have the host allocate all possible MSI vectors | |
804 | * for a device if they're not in use, so we shutdown and incrementally | |
805 | * increase them as needed. | |
806 | */ | |
807 | if (vdev->nr_vectors < nr + 1) { | |
65501a74 AW |
808 | vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); |
809 | vdev->nr_vectors = nr + 1; | |
810 | ret = vfio_enable_vectors(vdev, true); | |
811 | if (ret) { | |
312fd5f2 | 812 | error_report("vfio: failed to enable vectors, %d", ret); |
65501a74 | 813 | } |
65501a74 | 814 | } else { |
1a403133 AW |
815 | int argsz; |
816 | struct vfio_irq_set *irq_set; | |
817 | int32_t *pfd; | |
818 | ||
819 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
820 | ||
821 | irq_set = g_malloc0(argsz); | |
822 | irq_set->argsz = argsz; | |
823 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
824 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
825 | irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; | |
826 | irq_set->start = nr; | |
827 | irq_set->count = 1; | |
828 | pfd = (int32_t *)&irq_set->data; | |
829 | ||
f4d45d47 AW |
830 | if (vector->virq >= 0) { |
831 | *pfd = event_notifier_get_fd(&vector->kvm_interrupt); | |
832 | } else { | |
833 | *pfd = event_notifier_get_fd(&vector->interrupt); | |
834 | } | |
1a403133 AW |
835 | |
836 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
837 | g_free(irq_set); | |
65501a74 | 838 | if (ret) { |
312fd5f2 | 839 | error_report("vfio: failed to modify vector, %d", ret); |
65501a74 | 840 | } |
65501a74 AW |
841 | } |
842 | ||
843 | return 0; | |
844 | } | |
845 | ||
b0223e29 AW |
846 | static int vfio_msix_vector_use(PCIDevice *pdev, |
847 | unsigned int nr, MSIMessage msg) | |
848 | { | |
849 | return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt); | |
850 | } | |
851 | ||
65501a74 AW |
852 | static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) |
853 | { | |
854 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
855 | VFIOMSIVector *vector = &vdev->msi_vectors[nr]; | |
65501a74 AW |
856 | |
857 | DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__, | |
858 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
859 | vdev->host.function, nr); | |
860 | ||
861 | /* | |
f4d45d47 AW |
862 | * There are still old guests that mask and unmask vectors on every |
863 | * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of | |
864 | * the KVM setup in place, simply switch VFIO to use the non-bypass | |
865 | * eventfd. We'll then fire the interrupt through QEMU and the MSI-X | |
866 | * core will mask the interrupt and set pending bits, allowing it to | |
867 | * be re-asserted on unmask. Nothing to do if already using QEMU mode. | |
65501a74 | 868 | */ |
f4d45d47 AW |
869 | if (vector->virq >= 0) { |
870 | int argsz; | |
871 | struct vfio_irq_set *irq_set; | |
872 | int32_t *pfd; | |
1a403133 | 873 | |
f4d45d47 | 874 | argsz = sizeof(*irq_set) + sizeof(*pfd); |
1a403133 | 875 | |
f4d45d47 AW |
876 | irq_set = g_malloc0(argsz); |
877 | irq_set->argsz = argsz; | |
878 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
879 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
880 | irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; | |
881 | irq_set->start = nr; | |
882 | irq_set->count = 1; | |
883 | pfd = (int32_t *)&irq_set->data; | |
1a403133 | 884 | |
f4d45d47 | 885 | *pfd = event_notifier_get_fd(&vector->interrupt); |
1a403133 | 886 | |
f4d45d47 | 887 | ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
65501a74 | 888 | |
f4d45d47 | 889 | g_free(irq_set); |
65501a74 | 890 | } |
65501a74 AW |
891 | } |
892 | ||
fd704adc AW |
893 | static void vfio_enable_msix(VFIODevice *vdev) |
894 | { | |
895 | vfio_disable_interrupts(vdev); | |
896 | ||
897 | vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector)); | |
898 | ||
899 | vdev->interrupt = VFIO_INT_MSIX; | |
900 | ||
b0223e29 AW |
901 | /* |
902 | * Some communication channels between VF & PF or PF & fw rely on the | |
903 | * physical state of the device and expect that enabling MSI-X from the | |
904 | * guest enables the same on the host. When our guest is Linux, the | |
905 | * guest driver call to pci_enable_msix() sets the enabling bit in the | |
906 | * MSI-X capability, but leaves the vector table masked. We therefore | |
907 | * can't rely on a vector_use callback (from request_irq() in the guest) | |
908 | * to switch the physical device into MSI-X mode because that may come a | |
909 | * long time after pci_enable_msix(). This code enables vector 0 with | |
910 | * triggering to userspace, then immediately release the vector, leaving | |
911 | * the physical device with no vectors enabled, but MSI-X enabled, just | |
912 | * like the guest view. | |
913 | */ | |
914 | vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL); | |
915 | vfio_msix_vector_release(&vdev->pdev, 0); | |
916 | ||
fd704adc | 917 | if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, |
bbef882c | 918 | vfio_msix_vector_release, NULL)) { |
312fd5f2 | 919 | error_report("vfio: msix_set_vector_notifiers failed"); |
fd704adc AW |
920 | } |
921 | ||
922 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, | |
923 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
924 | } | |
925 | ||
65501a74 AW |
926 | static void vfio_enable_msi(VFIODevice *vdev) |
927 | { | |
928 | int ret, i; | |
929 | ||
930 | vfio_disable_interrupts(vdev); | |
931 | ||
932 | vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); | |
933 | retry: | |
934 | vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector)); | |
935 | ||
936 | for (i = 0; i < vdev->nr_vectors; i++) { | |
65501a74 | 937 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
9b3af4c0 | 938 | MSIMessage msg = msi_get_message(&vdev->pdev, i); |
65501a74 AW |
939 | |
940 | vector->vdev = vdev; | |
f4d45d47 | 941 | vector->virq = -1; |
65501a74 AW |
942 | vector->use = true; |
943 | ||
944 | if (event_notifier_init(&vector->interrupt, 0)) { | |
312fd5f2 | 945 | error_report("vfio: Error: event_notifier_init failed"); |
65501a74 AW |
946 | } |
947 | ||
f4d45d47 AW |
948 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
949 | vfio_msi_interrupt, NULL, vector); | |
950 | ||
65501a74 AW |
951 | /* |
952 | * Attempt to enable route through KVM irqchip, | |
953 | * default to userspace handling if unavailable. | |
954 | */ | |
9b3af4c0 | 955 | vfio_add_kvm_msi_virq(vector, &msg, false); |
65501a74 AW |
956 | } |
957 | ||
f4d45d47 AW |
958 | /* Set interrupt type prior to possible interrupts */ |
959 | vdev->interrupt = VFIO_INT_MSI; | |
960 | ||
65501a74 AW |
961 | ret = vfio_enable_vectors(vdev, false); |
962 | if (ret) { | |
963 | if (ret < 0) { | |
312fd5f2 | 964 | error_report("vfio: Error: Failed to setup MSI fds: %m"); |
65501a74 AW |
965 | } else if (ret != vdev->nr_vectors) { |
966 | error_report("vfio: Error: Failed to enable %d " | |
312fd5f2 | 967 | "MSI vectors, retry with %d", vdev->nr_vectors, ret); |
65501a74 AW |
968 | } |
969 | ||
970 | for (i = 0; i < vdev->nr_vectors; i++) { | |
971 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
972 | if (vector->virq >= 0) { | |
f4d45d47 | 973 | vfio_remove_kvm_msi_virq(vector); |
65501a74 | 974 | } |
f4d45d47 AW |
975 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
976 | NULL, NULL, NULL); | |
65501a74 AW |
977 | event_notifier_cleanup(&vector->interrupt); |
978 | } | |
979 | ||
980 | g_free(vdev->msi_vectors); | |
981 | ||
982 | if (ret > 0 && ret != vdev->nr_vectors) { | |
983 | vdev->nr_vectors = ret; | |
984 | goto retry; | |
985 | } | |
986 | vdev->nr_vectors = 0; | |
987 | ||
f4d45d47 AW |
988 | /* |
989 | * Failing to setup MSI doesn't really fall within any specification. | |
990 | * Let's try leaving interrupts disabled and hope the guest figures | |
991 | * out to fall back to INTx for this device. | |
992 | */ | |
993 | error_report("vfio: Error: Failed to enable MSI"); | |
994 | vdev->interrupt = VFIO_INT_NONE; | |
995 | ||
65501a74 AW |
996 | return; |
997 | } | |
998 | ||
65501a74 AW |
999 | DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__, |
1000 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1001 | vdev->host.function, vdev->nr_vectors); | |
1002 | } | |
1003 | ||
fd704adc AW |
1004 | static void vfio_disable_msi_common(VFIODevice *vdev) |
1005 | { | |
f4d45d47 AW |
1006 | int i; |
1007 | ||
1008 | for (i = 0; i < vdev->nr_vectors; i++) { | |
1009 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
1010 | if (vdev->msi_vectors[i].use) { | |
1011 | if (vector->virq >= 0) { | |
1012 | vfio_remove_kvm_msi_virq(vector); | |
1013 | } | |
1014 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), | |
1015 | NULL, NULL, NULL); | |
1016 | event_notifier_cleanup(&vector->interrupt); | |
1017 | } | |
1018 | } | |
1019 | ||
fd704adc AW |
1020 | g_free(vdev->msi_vectors); |
1021 | vdev->msi_vectors = NULL; | |
1022 | vdev->nr_vectors = 0; | |
1023 | vdev->interrupt = VFIO_INT_NONE; | |
1024 | ||
1025 | vfio_enable_intx(vdev); | |
1026 | } | |
1027 | ||
1028 | static void vfio_disable_msix(VFIODevice *vdev) | |
1029 | { | |
3e40ba0f AW |
1030 | int i; |
1031 | ||
fd704adc AW |
1032 | msix_unset_vector_notifiers(&vdev->pdev); |
1033 | ||
3e40ba0f AW |
1034 | /* |
1035 | * MSI-X will only release vectors if MSI-X is still enabled on the | |
1036 | * device, check through the rest and release it ourselves if necessary. | |
1037 | */ | |
1038 | for (i = 0; i < vdev->nr_vectors; i++) { | |
1039 | if (vdev->msi_vectors[i].use) { | |
1040 | vfio_msix_vector_release(&vdev->pdev, i); | |
f4d45d47 | 1041 | msix_vector_unuse(&vdev->pdev, i); |
3e40ba0f AW |
1042 | } |
1043 | } | |
1044 | ||
fd704adc AW |
1045 | if (vdev->nr_vectors) { |
1046 | vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); | |
1047 | } | |
1048 | ||
1049 | vfio_disable_msi_common(vdev); | |
1050 | ||
a011b10e AW |
1051 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, |
1052 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
fd704adc AW |
1053 | } |
1054 | ||
1055 | static void vfio_disable_msi(VFIODevice *vdev) | |
65501a74 | 1056 | { |
fd704adc | 1057 | vfio_disable_irqindex(vdev, VFIO_PCI_MSI_IRQ_INDEX); |
fd704adc | 1058 | vfio_disable_msi_common(vdev); |
65501a74 | 1059 | |
fd704adc AW |
1060 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, |
1061 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
65501a74 AW |
1062 | } |
1063 | ||
c7679d45 AW |
1064 | static void vfio_update_msi(VFIODevice *vdev) |
1065 | { | |
1066 | int i; | |
1067 | ||
1068 | for (i = 0; i < vdev->nr_vectors; i++) { | |
1069 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
1070 | MSIMessage msg; | |
1071 | ||
1072 | if (!vector->use || vector->virq < 0) { | |
1073 | continue; | |
1074 | } | |
1075 | ||
1076 | msg = msi_get_message(&vdev->pdev, i); | |
f4d45d47 | 1077 | vfio_update_kvm_msi_virq(vector, msg); |
c7679d45 AW |
1078 | } |
1079 | } | |
1080 | ||
65501a74 AW |
1081 | /* |
1082 | * IO Port/MMIO - Beware of the endians, VFIO is always little endian | |
1083 | */ | |
a8170e5e | 1084 | static void vfio_bar_write(void *opaque, hwaddr addr, |
65501a74 AW |
1085 | uint64_t data, unsigned size) |
1086 | { | |
1087 | VFIOBAR *bar = opaque; | |
1088 | union { | |
1089 | uint8_t byte; | |
1090 | uint16_t word; | |
1091 | uint32_t dword; | |
1092 | uint64_t qword; | |
1093 | } buf; | |
1094 | ||
1095 | switch (size) { | |
1096 | case 1: | |
1097 | buf.byte = data; | |
1098 | break; | |
1099 | case 2: | |
6758008e | 1100 | buf.word = cpu_to_le16(data); |
65501a74 AW |
1101 | break; |
1102 | case 4: | |
6758008e | 1103 | buf.dword = cpu_to_le32(data); |
65501a74 AW |
1104 | break; |
1105 | default: | |
4e505ddd | 1106 | hw_error("vfio: unsupported write size, %d bytes", size); |
65501a74 AW |
1107 | break; |
1108 | } | |
1109 | ||
1110 | if (pwrite(bar->fd, &buf, size, bar->fd_offset + addr) != size) { | |
312fd5f2 | 1111 | error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", |
65501a74 AW |
1112 | __func__, addr, data, size); |
1113 | } | |
1114 | ||
82ca8912 AW |
1115 | #ifdef DEBUG_VFIO |
1116 | { | |
1117 | VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]); | |
1118 | ||
1119 | DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%"PRIx64 | |
1120 | ", %d)\n", __func__, vdev->host.domain, vdev->host.bus, | |
1121 | vdev->host.slot, vdev->host.function, bar->nr, addr, | |
1122 | data, size); | |
1123 | } | |
1124 | #endif | |
65501a74 AW |
1125 | |
1126 | /* | |
1127 | * A read or write to a BAR always signals an INTx EOI. This will | |
1128 | * do nothing if not pending (including not in INTx mode). We assume | |
1129 | * that a BAR access is in response to an interrupt and that BAR | |
1130 | * accesses will service the interrupt. Unfortunately, we don't know | |
1131 | * which access will service the interrupt, so we're potentially | |
1132 | * getting quite a few host interrupts per guest interrupt. | |
1133 | */ | |
3a4f2816 | 1134 | vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); |
65501a74 AW |
1135 | } |
1136 | ||
1137 | static uint64_t vfio_bar_read(void *opaque, | |
a8170e5e | 1138 | hwaddr addr, unsigned size) |
65501a74 AW |
1139 | { |
1140 | VFIOBAR *bar = opaque; | |
1141 | union { | |
1142 | uint8_t byte; | |
1143 | uint16_t word; | |
1144 | uint32_t dword; | |
1145 | uint64_t qword; | |
1146 | } buf; | |
1147 | uint64_t data = 0; | |
1148 | ||
1149 | if (pread(bar->fd, &buf, size, bar->fd_offset + addr) != size) { | |
312fd5f2 | 1150 | error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", |
65501a74 AW |
1151 | __func__, addr, size); |
1152 | return (uint64_t)-1; | |
1153 | } | |
1154 | ||
1155 | switch (size) { | |
1156 | case 1: | |
1157 | data = buf.byte; | |
1158 | break; | |
1159 | case 2: | |
6758008e | 1160 | data = le16_to_cpu(buf.word); |
65501a74 AW |
1161 | break; |
1162 | case 4: | |
6758008e | 1163 | data = le32_to_cpu(buf.dword); |
65501a74 AW |
1164 | break; |
1165 | default: | |
4e505ddd | 1166 | hw_error("vfio: unsupported read size, %d bytes", size); |
65501a74 AW |
1167 | break; |
1168 | } | |
1169 | ||
82ca8912 AW |
1170 | #ifdef DEBUG_VFIO |
1171 | { | |
1172 | VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]); | |
1173 | ||
1174 | DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx | |
1175 | ", %d) = 0x%"PRIx64"\n", __func__, vdev->host.domain, | |
1176 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
1177 | bar->nr, addr, size, data); | |
1178 | } | |
1179 | #endif | |
65501a74 AW |
1180 | |
1181 | /* Same as write above */ | |
3a4f2816 | 1182 | vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); |
65501a74 AW |
1183 | |
1184 | return data; | |
1185 | } | |
1186 | ||
1187 | static const MemoryRegionOps vfio_bar_ops = { | |
1188 | .read = vfio_bar_read, | |
1189 | .write = vfio_bar_write, | |
6758008e | 1190 | .endianness = DEVICE_LITTLE_ENDIAN, |
65501a74 AW |
1191 | }; |
1192 | ||
6f864e6e AW |
1193 | static void vfio_pci_load_rom(VFIODevice *vdev) |
1194 | { | |
1195 | struct vfio_region_info reg_info = { | |
1196 | .argsz = sizeof(reg_info), | |
1197 | .index = VFIO_PCI_ROM_REGION_INDEX | |
1198 | }; | |
1199 | uint64_t size; | |
1200 | off_t off = 0; | |
1201 | size_t bytes; | |
1202 | ||
1203 | if (ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { | |
1204 | error_report("vfio: Error getting ROM info: %m"); | |
1205 | return; | |
1206 | } | |
1207 | ||
1208 | DPRINTF("Device %04x:%02x:%02x.%x ROM:\n", vdev->host.domain, | |
1209 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
1210 | DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n", | |
1211 | (unsigned long)reg_info.size, (unsigned long)reg_info.offset, | |
1212 | (unsigned long)reg_info.flags); | |
1213 | ||
1214 | vdev->rom_size = size = reg_info.size; | |
1215 | vdev->rom_offset = reg_info.offset; | |
1216 | ||
1217 | if (!vdev->rom_size) { | |
e638073c | 1218 | vdev->rom_read_failed = true; |
d20b43df | 1219 | error_report("vfio-pci: Cannot read device rom at " |
4e505ddd | 1220 | "%04x:%02x:%02x.%x", |
d20b43df BD |
1221 | vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1222 | vdev->host.function); | |
1223 | error_printf("Device option ROM contents are probably invalid " | |
1224 | "(check dmesg).\nSkip option ROM probe with rombar=0, " | |
1225 | "or load from file with romfile=\n"); | |
6f864e6e AW |
1226 | return; |
1227 | } | |
1228 | ||
1229 | vdev->rom = g_malloc(size); | |
1230 | memset(vdev->rom, 0xff, size); | |
1231 | ||
1232 | while (size) { | |
1233 | bytes = pread(vdev->fd, vdev->rom + off, size, vdev->rom_offset + off); | |
1234 | if (bytes == 0) { | |
1235 | break; | |
1236 | } else if (bytes > 0) { | |
1237 | off += bytes; | |
1238 | size -= bytes; | |
1239 | } else { | |
1240 | if (errno == EINTR || errno == EAGAIN) { | |
1241 | continue; | |
1242 | } | |
1243 | error_report("vfio: Error reading device ROM: %m"); | |
1244 | break; | |
1245 | } | |
1246 | } | |
1247 | } | |
1248 | ||
1249 | static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size) | |
1250 | { | |
1251 | VFIODevice *vdev = opaque; | |
75bd0c72 ND |
1252 | union { |
1253 | uint8_t byte; | |
1254 | uint16_t word; | |
1255 | uint32_t dword; | |
1256 | uint64_t qword; | |
1257 | } val; | |
1258 | uint64_t data = 0; | |
6f864e6e AW |
1259 | |
1260 | /* Load the ROM lazily when the guest tries to read it */ | |
db01eedb | 1261 | if (unlikely(!vdev->rom && !vdev->rom_read_failed)) { |
6f864e6e AW |
1262 | vfio_pci_load_rom(vdev); |
1263 | } | |
1264 | ||
6758008e | 1265 | memcpy(&val, vdev->rom + addr, |
6f864e6e AW |
1266 | (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0); |
1267 | ||
75bd0c72 ND |
1268 | switch (size) { |
1269 | case 1: | |
1270 | data = val.byte; | |
1271 | break; | |
1272 | case 2: | |
1273 | data = le16_to_cpu(val.word); | |
1274 | break; | |
1275 | case 4: | |
1276 | data = le32_to_cpu(val.dword); | |
1277 | break; | |
1278 | default: | |
1279 | hw_error("vfio: unsupported read size, %d bytes\n", size); | |
1280 | break; | |
1281 | } | |
1282 | ||
6f864e6e AW |
1283 | DPRINTF("%s(%04x:%02x:%02x.%x, 0x%"HWADDR_PRIx", 0x%x) = 0x%"PRIx64"\n", |
1284 | __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
75bd0c72 | 1285 | vdev->host.function, addr, size, data); |
6f864e6e | 1286 | |
75bd0c72 | 1287 | return data; |
6f864e6e AW |
1288 | } |
1289 | ||
64fa25a0 AW |
1290 | static void vfio_rom_write(void *opaque, hwaddr addr, |
1291 | uint64_t data, unsigned size) | |
1292 | { | |
1293 | } | |
1294 | ||
6f864e6e AW |
1295 | static const MemoryRegionOps vfio_rom_ops = { |
1296 | .read = vfio_rom_read, | |
64fa25a0 | 1297 | .write = vfio_rom_write, |
6758008e | 1298 | .endianness = DEVICE_LITTLE_ENDIAN, |
6f864e6e AW |
1299 | }; |
1300 | ||
4b943029 BD |
1301 | static bool vfio_blacklist_opt_rom(VFIODevice *vdev) |
1302 | { | |
1303 | PCIDevice *pdev = &vdev->pdev; | |
1304 | uint16_t vendor_id, device_id; | |
1305 | int count = 0; | |
1306 | ||
1307 | vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); | |
1308 | device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); | |
1309 | ||
1310 | while (count < ARRAY_SIZE(romblacklist)) { | |
1311 | if (romblacklist[count].vendor_id == vendor_id && | |
1312 | romblacklist[count].device_id == device_id) { | |
1313 | return true; | |
1314 | } | |
1315 | count++; | |
1316 | } | |
1317 | ||
1318 | return false; | |
1319 | } | |
1320 | ||
6f864e6e AW |
1321 | static void vfio_pci_size_rom(VFIODevice *vdev) |
1322 | { | |
b1c50c5f | 1323 | uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK); |
6f864e6e | 1324 | off_t offset = vdev->config_offset + PCI_ROM_ADDRESS; |
4b943029 | 1325 | DeviceState *dev = DEVICE(vdev); |
6f864e6e AW |
1326 | char name[32]; |
1327 | ||
1328 | if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { | |
4b943029 BD |
1329 | /* Since pci handles romfile, just print a message and return */ |
1330 | if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) { | |
1331 | error_printf("Warning : Device at %04x:%02x:%02x.%x " | |
1332 | "is known to cause system instability issues during " | |
1333 | "option rom execution. " | |
1334 | "Proceeding anyway since user specified romfile\n", | |
1335 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1336 | vdev->host.function); | |
1337 | } | |
6f864e6e AW |
1338 | return; |
1339 | } | |
1340 | ||
1341 | /* | |
1342 | * Use the same size ROM BAR as the physical device. The contents | |
1343 | * will get filled in later when the guest tries to read it. | |
1344 | */ | |
1345 | if (pread(vdev->fd, &orig, 4, offset) != 4 || | |
1346 | pwrite(vdev->fd, &size, 4, offset) != 4 || | |
1347 | pread(vdev->fd, &size, 4, offset) != 4 || | |
1348 | pwrite(vdev->fd, &orig, 4, offset) != 4) { | |
1349 | error_report("%s(%04x:%02x:%02x.%x) failed: %m", | |
1350 | __func__, vdev->host.domain, vdev->host.bus, | |
1351 | vdev->host.slot, vdev->host.function); | |
1352 | return; | |
1353 | } | |
1354 | ||
b1c50c5f | 1355 | size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1; |
6f864e6e AW |
1356 | |
1357 | if (!size) { | |
1358 | return; | |
1359 | } | |
1360 | ||
4b943029 BD |
1361 | if (vfio_blacklist_opt_rom(vdev)) { |
1362 | if (dev->opts && qemu_opt_get(dev->opts, "rombar")) { | |
1363 | error_printf("Warning : Device at %04x:%02x:%02x.%x " | |
1364 | "is known to cause system instability issues during " | |
1365 | "option rom execution. " | |
1366 | "Proceeding anyway since user specified non zero value for " | |
1367 | "rombar\n", | |
1368 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1369 | vdev->host.function); | |
1370 | } else { | |
1371 | error_printf("Warning : Rom loading for device at " | |
1372 | "%04x:%02x:%02x.%x has been disabled due to " | |
1373 | "system instability issues. " | |
1374 | "Specify rombar=1 or romfile to force\n", | |
1375 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1376 | vdev->host.function); | |
1377 | return; | |
1378 | } | |
1379 | } | |
1380 | ||
6f864e6e AW |
1381 | DPRINTF("%04x:%02x:%02x.%x ROM size 0x%x\n", vdev->host.domain, |
1382 | vdev->host.bus, vdev->host.slot, vdev->host.function, size); | |
1383 | ||
1384 | snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom", | |
1385 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1386 | vdev->host.function); | |
1387 | ||
1388 | memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev), | |
1389 | &vfio_rom_ops, vdev, name, size); | |
1390 | ||
1391 | pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, | |
1392 | PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom); | |
1393 | ||
1394 | vdev->pdev.has_rom = true; | |
e638073c | 1395 | vdev->rom_read_failed = false; |
6f864e6e AW |
1396 | } |
1397 | ||
f15689c7 AW |
1398 | static void vfio_vga_write(void *opaque, hwaddr addr, |
1399 | uint64_t data, unsigned size) | |
1400 | { | |
1401 | VFIOVGARegion *region = opaque; | |
1402 | VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); | |
1403 | union { | |
1404 | uint8_t byte; | |
1405 | uint16_t word; | |
1406 | uint32_t dword; | |
1407 | uint64_t qword; | |
1408 | } buf; | |
1409 | off_t offset = vga->fd_offset + region->offset + addr; | |
1410 | ||
1411 | switch (size) { | |
1412 | case 1: | |
1413 | buf.byte = data; | |
1414 | break; | |
1415 | case 2: | |
1416 | buf.word = cpu_to_le16(data); | |
1417 | break; | |
1418 | case 4: | |
1419 | buf.dword = cpu_to_le32(data); | |
1420 | break; | |
1421 | default: | |
4e505ddd | 1422 | hw_error("vfio: unsupported write size, %d bytes", size); |
f15689c7 AW |
1423 | break; |
1424 | } | |
1425 | ||
1426 | if (pwrite(vga->fd, &buf, size, offset) != size) { | |
1427 | error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", | |
1428 | __func__, region->offset + addr, data, size); | |
1429 | } | |
1430 | ||
1431 | DPRINTF("%s(0x%"HWADDR_PRIx", 0x%"PRIx64", %d)\n", | |
1432 | __func__, region->offset + addr, data, size); | |
1433 | } | |
1434 | ||
1435 | static uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size) | |
1436 | { | |
1437 | VFIOVGARegion *region = opaque; | |
1438 | VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); | |
1439 | union { | |
1440 | uint8_t byte; | |
1441 | uint16_t word; | |
1442 | uint32_t dword; | |
1443 | uint64_t qword; | |
1444 | } buf; | |
1445 | uint64_t data = 0; | |
1446 | off_t offset = vga->fd_offset + region->offset + addr; | |
1447 | ||
1448 | if (pread(vga->fd, &buf, size, offset) != size) { | |
1449 | error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", | |
1450 | __func__, region->offset + addr, size); | |
1451 | return (uint64_t)-1; | |
1452 | } | |
1453 | ||
1454 | switch (size) { | |
1455 | case 1: | |
1456 | data = buf.byte; | |
1457 | break; | |
1458 | case 2: | |
1459 | data = le16_to_cpu(buf.word); | |
1460 | break; | |
1461 | case 4: | |
1462 | data = le32_to_cpu(buf.dword); | |
1463 | break; | |
1464 | default: | |
4e505ddd | 1465 | hw_error("vfio: unsupported read size, %d bytes", size); |
f15689c7 AW |
1466 | break; |
1467 | } | |
1468 | ||
1469 | DPRINTF("%s(0x%"HWADDR_PRIx", %d) = 0x%"PRIx64"\n", | |
1470 | __func__, region->offset + addr, size, data); | |
1471 | ||
1472 | return data; | |
1473 | } | |
1474 | ||
1475 | static const MemoryRegionOps vfio_vga_ops = { | |
1476 | .read = vfio_vga_read, | |
1477 | .write = vfio_vga_write, | |
1478 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1479 | }; | |
1480 | ||
7076eabc AW |
1481 | /* |
1482 | * Device specific quirks | |
1483 | */ | |
1484 | ||
39360f0b AW |
1485 | /* Is range1 fully contained within range2? */ |
1486 | static bool vfio_range_contained(uint64_t first1, uint64_t len1, | |
1487 | uint64_t first2, uint64_t len2) { | |
1488 | return (first1 >= first2 && first1 + len1 <= first2 + len2); | |
1489 | } | |
7076eabc | 1490 | |
39360f0b AW |
1491 | static bool vfio_flags_enabled(uint8_t flags, uint8_t mask) |
1492 | { | |
1493 | return (mask && (flags & mask) == mask); | |
1494 | } | |
1495 | ||
1496 | static uint64_t vfio_generic_window_quirk_read(void *opaque, | |
1497 | hwaddr addr, unsigned size) | |
7076eabc AW |
1498 | { |
1499 | VFIOQuirk *quirk = opaque; | |
1500 | VFIODevice *vdev = quirk->vdev; | |
39360f0b | 1501 | uint64_t data; |
7076eabc | 1502 | |
39360f0b AW |
1503 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) && |
1504 | ranges_overlap(addr, size, | |
1505 | quirk->data.data_offset, quirk->data.data_size)) { | |
1506 | hwaddr offset = addr - quirk->data.data_offset; | |
1507 | ||
1508 | if (!vfio_range_contained(addr, size, quirk->data.data_offset, | |
1509 | quirk->data.data_size)) { | |
4e505ddd | 1510 | hw_error("%s: window data read not fully contained: %s", |
39360f0b AW |
1511 | __func__, memory_region_name(&quirk->mem)); |
1512 | } | |
1513 | ||
1514 | data = vfio_pci_read_config(&vdev->pdev, | |
1515 | quirk->data.address_val + offset, size); | |
1516 | ||
1517 | DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", %d) = 0x%" | |
1518 | PRIx64"\n", memory_region_name(&quirk->mem), vdev->host.domain, | |
1519 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
1520 | quirk->data.bar, addr, size, data); | |
1521 | } else { | |
1522 | data = vfio_bar_read(&vdev->bars[quirk->data.bar], | |
1523 | addr + quirk->data.base_offset, size); | |
7076eabc AW |
1524 | } |
1525 | ||
1526 | return data; | |
1527 | } | |
1528 | ||
39360f0b AW |
1529 | static void vfio_generic_window_quirk_write(void *opaque, hwaddr addr, |
1530 | uint64_t data, unsigned size) | |
7076eabc | 1531 | { |
39360f0b AW |
1532 | VFIOQuirk *quirk = opaque; |
1533 | VFIODevice *vdev = quirk->vdev; | |
7076eabc | 1534 | |
39360f0b AW |
1535 | if (ranges_overlap(addr, size, |
1536 | quirk->data.address_offset, quirk->data.address_size)) { | |
7076eabc | 1537 | |
39360f0b | 1538 | if (addr != quirk->data.address_offset) { |
4e505ddd | 1539 | hw_error("%s: offset write into address window: %s", |
39360f0b AW |
1540 | __func__, memory_region_name(&quirk->mem)); |
1541 | } | |
1542 | ||
1543 | if ((data & ~quirk->data.address_mask) == quirk->data.address_match) { | |
1544 | quirk->data.flags |= quirk->data.write_flags | | |
1545 | quirk->data.read_flags; | |
1546 | quirk->data.address_val = data & quirk->data.address_mask; | |
1547 | } else { | |
1548 | quirk->data.flags &= ~(quirk->data.write_flags | | |
1549 | quirk->data.read_flags); | |
1550 | } | |
7076eabc AW |
1551 | } |
1552 | ||
39360f0b AW |
1553 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) && |
1554 | ranges_overlap(addr, size, | |
1555 | quirk->data.data_offset, quirk->data.data_size)) { | |
1556 | hwaddr offset = addr - quirk->data.data_offset; | |
7076eabc | 1557 | |
39360f0b AW |
1558 | if (!vfio_range_contained(addr, size, quirk->data.data_offset, |
1559 | quirk->data.data_size)) { | |
4e505ddd | 1560 | hw_error("%s: window data write not fully contained: %s", |
39360f0b AW |
1561 | __func__, memory_region_name(&quirk->mem)); |
1562 | } | |
7076eabc | 1563 | |
39360f0b AW |
1564 | vfio_pci_write_config(&vdev->pdev, |
1565 | quirk->data.address_val + offset, data, size); | |
1566 | DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%" | |
1567 | PRIx64", %d)\n", memory_region_name(&quirk->mem), | |
1568 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1569 | vdev->host.function, quirk->data.bar, addr, data, size); | |
1570 | return; | |
1571 | } | |
7076eabc | 1572 | |
39360f0b AW |
1573 | vfio_bar_write(&vdev->bars[quirk->data.bar], |
1574 | addr + quirk->data.base_offset, data, size); | |
7076eabc AW |
1575 | } |
1576 | ||
39360f0b AW |
1577 | static const MemoryRegionOps vfio_generic_window_quirk = { |
1578 | .read = vfio_generic_window_quirk_read, | |
1579 | .write = vfio_generic_window_quirk_write, | |
1580 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1581 | }; | |
1582 | ||
1583 | static uint64_t vfio_generic_quirk_read(void *opaque, | |
1584 | hwaddr addr, unsigned size) | |
7076eabc AW |
1585 | { |
1586 | VFIOQuirk *quirk = opaque; | |
1587 | VFIODevice *vdev = quirk->vdev; | |
39360f0b AW |
1588 | hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; |
1589 | hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK; | |
1590 | uint64_t data; | |
7076eabc | 1591 | |
39360f0b AW |
1592 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) && |
1593 | ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) { | |
1594 | if (!vfio_range_contained(addr, size, offset, | |
1595 | quirk->data.address_mask + 1)) { | |
4e505ddd | 1596 | hw_error("%s: read not fully contained: %s", |
39360f0b AW |
1597 | __func__, memory_region_name(&quirk->mem)); |
1598 | } | |
7076eabc | 1599 | |
39360f0b AW |
1600 | data = vfio_pci_read_config(&vdev->pdev, addr - offset, size); |
1601 | ||
1602 | DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", %d) = 0x%" | |
1603 | PRIx64"\n", memory_region_name(&quirk->mem), vdev->host.domain, | |
1604 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
1605 | quirk->data.bar, addr + base, size, data); | |
1606 | } else { | |
1607 | data = vfio_bar_read(&vdev->bars[quirk->data.bar], addr + base, size); | |
1608 | } | |
7076eabc AW |
1609 | |
1610 | return data; | |
1611 | } | |
1612 | ||
39360f0b AW |
1613 | static void vfio_generic_quirk_write(void *opaque, hwaddr addr, |
1614 | uint64_t data, unsigned size) | |
7076eabc AW |
1615 | { |
1616 | VFIOQuirk *quirk = opaque; | |
1617 | VFIODevice *vdev = quirk->vdev; | |
39360f0b AW |
1618 | hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; |
1619 | hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK; | |
1620 | ||
1621 | if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) && | |
1622 | ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) { | |
1623 | if (!vfio_range_contained(addr, size, offset, | |
1624 | quirk->data.address_mask + 1)) { | |
4e505ddd | 1625 | hw_error("%s: write not fully contained: %s", |
39360f0b AW |
1626 | __func__, memory_region_name(&quirk->mem)); |
1627 | } | |
7076eabc | 1628 | |
39360f0b | 1629 | vfio_pci_write_config(&vdev->pdev, addr - offset, data, size); |
7076eabc | 1630 | |
39360f0b AW |
1631 | DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%" |
1632 | PRIx64", %d)\n", memory_region_name(&quirk->mem), | |
1633 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1634 | vdev->host.function, quirk->data.bar, addr + base, data, size); | |
1635 | } else { | |
1636 | vfio_bar_write(&vdev->bars[quirk->data.bar], addr + base, data, size); | |
1637 | } | |
7076eabc AW |
1638 | } |
1639 | ||
39360f0b AW |
1640 | static const MemoryRegionOps vfio_generic_quirk = { |
1641 | .read = vfio_generic_quirk_read, | |
1642 | .write = vfio_generic_quirk_write, | |
7076eabc AW |
1643 | .endianness = DEVICE_LITTLE_ENDIAN, |
1644 | }; | |
1645 | ||
39360f0b AW |
1646 | #define PCI_VENDOR_ID_ATI 0x1002 |
1647 | ||
1648 | /* | |
1649 | * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR | |
1650 | * through VGA register 0x3c3. On newer cards, the I/O port BAR is always | |
1651 | * BAR4 (older cards like the X550 used BAR1, but we don't care to support | |
1652 | * those). Note that on bare metal, a read of 0x3c3 doesn't always return the | |
1653 | * I/O port BAR address. Originally this was coded to return the virtual BAR | |
1654 | * address only if the physical register read returns the actual BAR address, | |
1655 | * but users have reported greater success if we return the virtual address | |
1656 | * unconditionally. | |
1657 | */ | |
1658 | static uint64_t vfio_ati_3c3_quirk_read(void *opaque, | |
1659 | hwaddr addr, unsigned size) | |
1660 | { | |
1661 | VFIOQuirk *quirk = opaque; | |
1662 | VFIODevice *vdev = quirk->vdev; | |
1663 | uint64_t data = vfio_pci_read_config(&vdev->pdev, | |
1664 | PCI_BASE_ADDRESS_0 + (4 * 4) + 1, | |
1665 | size); | |
1666 | DPRINTF("%s(0x3c3, 1) = 0x%"PRIx64"\n", __func__, data); | |
1667 | ||
1668 | return data; | |
1669 | } | |
1670 | ||
1671 | static const MemoryRegionOps vfio_ati_3c3_quirk = { | |
1672 | .read = vfio_ati_3c3_quirk_read, | |
1673 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1674 | }; | |
1675 | ||
1676 | static void vfio_vga_probe_ati_3c3_quirk(VFIODevice *vdev) | |
7076eabc AW |
1677 | { |
1678 | PCIDevice *pdev = &vdev->pdev; | |
7076eabc AW |
1679 | VFIOQuirk *quirk; |
1680 | ||
39360f0b | 1681 | if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { |
7076eabc AW |
1682 | return; |
1683 | } | |
1684 | ||
39360f0b AW |
1685 | /* |
1686 | * As long as the BAR is >= 256 bytes it will be aligned such that the | |
1687 | * lower byte is always zero. Filter out anything else, if it exists. | |
1688 | */ | |
1689 | if (!vdev->bars[4].ioport || vdev->bars[4].size < 256) { | |
7076eabc AW |
1690 | return; |
1691 | } | |
1692 | ||
1693 | quirk = g_malloc0(sizeof(*quirk)); | |
1694 | quirk->vdev = vdev; | |
1695 | ||
39360f0b AW |
1696 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, quirk, |
1697 | "vfio-ati-3c3-quirk", 1); | |
1698 | memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, | |
1699 | 3 /* offset 3 bytes from 0x3c0 */, &quirk->mem); | |
7076eabc | 1700 | |
39360f0b AW |
1701 | QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, |
1702 | quirk, next); | |
7076eabc | 1703 | |
39360f0b | 1704 | DPRINTF("Enabled ATI/AMD quirk 0x3c3 BAR4for device %04x:%02x:%02x.%x\n", |
7076eabc AW |
1705 | vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1706 | vdev->host.function); | |
1707 | } | |
1708 | ||
1709 | /* | |
39360f0b AW |
1710 | * Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI |
1711 | * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access | |
1712 | * the MMIO space directly, but a window to this space is provided through | |
1713 | * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the | |
1714 | * data register. When the address is programmed to a range of 0x4000-0x4fff | |
1715 | * PCI configuration space is available. Experimentation seems to indicate | |
1716 | * that only read-only access is provided, but we drop writes when the window | |
1717 | * is enabled to config space nonetheless. | |
7076eabc | 1718 | */ |
39360f0b | 1719 | static void vfio_probe_ati_bar4_window_quirk(VFIODevice *vdev, int nr) |
7076eabc | 1720 | { |
7076eabc | 1721 | PCIDevice *pdev = &vdev->pdev; |
39360f0b | 1722 | VFIOQuirk *quirk; |
7076eabc | 1723 | |
39360f0b AW |
1724 | if (!vdev->has_vga || nr != 4 || |
1725 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { | |
1726 | return; | |
7076eabc AW |
1727 | } |
1728 | ||
39360f0b AW |
1729 | quirk = g_malloc0(sizeof(*quirk)); |
1730 | quirk->vdev = vdev; | |
1731 | quirk->data.address_size = 4; | |
1732 | quirk->data.data_offset = 4; | |
1733 | quirk->data.data_size = 4; | |
1734 | quirk->data.address_match = 0x4000; | |
1735 | quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1; | |
1736 | quirk->data.bar = nr; | |
1737 | quirk->data.read_flags = quirk->data.write_flags = 1; | |
1738 | ||
1739 | memory_region_init_io(&quirk->mem, OBJECT(vdev), | |
1740 | &vfio_generic_window_quirk, quirk, | |
1741 | "vfio-ati-bar4-window-quirk", 8); | |
1742 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, | |
1743 | quirk->data.base_offset, &quirk->mem, 1); | |
7076eabc | 1744 | |
39360f0b | 1745 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
7076eabc | 1746 | |
39360f0b AW |
1747 | DPRINTF("Enabled ATI/AMD BAR4 window quirk for device %04x:%02x:%02x.%x\n", |
1748 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1749 | vdev->host.function); | |
7076eabc AW |
1750 | } |
1751 | ||
4cb47d28 AW |
1752 | #define PCI_VENDOR_ID_REALTEK 0x10ec |
1753 | ||
1754 | /* | |
1755 | * RTL8168 devices have a backdoor that can access the MSI-X table. At BAR2 | |
1756 | * offset 0x70 there is a dword data register, offset 0x74 is a dword address | |
1757 | * register. According to the Linux r8169 driver, the MSI-X table is addressed | |
1758 | * when the "type" portion of the address register is set to 0x1. This appears | |
1759 | * to be bits 16:30. Bit 31 is both a write indicator and some sort of | |
1760 | * "address latched" indicator. Bits 12:15 are a mask field, which we can | |
1761 | * ignore because the MSI-X table should always be accessed as a dword (full | |
1762 | * mask). Bits 0:11 is offset within the type. | |
1763 | * | |
1764 | * Example trace: | |
1765 | * | |
1766 | * Read from MSI-X table offset 0 | |
1767 | * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr | |
1768 | * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch | |
1769 | * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data | |
1770 | * | |
1771 | * Write 0xfee00000 to MSI-X table offset 0 | |
1772 | * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data | |
1773 | * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write | |
1774 | * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete | |
1775 | */ | |
1776 | ||
1777 | static uint64_t vfio_rtl8168_window_quirk_read(void *opaque, | |
1778 | hwaddr addr, unsigned size) | |
1779 | { | |
1780 | VFIOQuirk *quirk = opaque; | |
1781 | VFIODevice *vdev = quirk->vdev; | |
1782 | ||
1783 | switch (addr) { | |
1784 | case 4: /* address */ | |
1785 | if (quirk->data.flags) { | |
1786 | DPRINTF("%s fake read(%04x:%02x:%02x.%d)\n", | |
1787 | memory_region_name(&quirk->mem), vdev->host.domain, | |
1788 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
1789 | ||
1790 | return quirk->data.address_match ^ 0x10000000U; | |
1791 | } | |
1792 | break; | |
1793 | case 0: /* data */ | |
1794 | if (quirk->data.flags) { | |
1795 | uint64_t val; | |
1796 | ||
1797 | DPRINTF("%s MSI-X table read(%04x:%02x:%02x.%d)\n", | |
1798 | memory_region_name(&quirk->mem), vdev->host.domain, | |
1799 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
1800 | ||
1801 | if (!(vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { | |
1802 | return 0; | |
1803 | } | |
1804 | ||
1805 | io_mem_read(&vdev->pdev.msix_table_mmio, | |
1806 | (hwaddr)(quirk->data.address_match & 0xfff), | |
1807 | &val, size); | |
1808 | return val; | |
1809 | } | |
1810 | } | |
1811 | ||
1812 | DPRINTF("%s direct read(%04x:%02x:%02x.%d)\n", | |
1813 | memory_region_name(&quirk->mem), vdev->host.domain, | |
1814 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
1815 | ||
1816 | return vfio_bar_read(&vdev->bars[quirk->data.bar], addr + 0x70, size); | |
1817 | } | |
1818 | ||
1819 | static void vfio_rtl8168_window_quirk_write(void *opaque, hwaddr addr, | |
1820 | uint64_t data, unsigned size) | |
1821 | { | |
1822 | VFIOQuirk *quirk = opaque; | |
1823 | VFIODevice *vdev = quirk->vdev; | |
1824 | ||
1825 | switch (addr) { | |
1826 | case 4: /* address */ | |
1827 | if ((data & 0x7fff0000) == 0x10000) { | |
1828 | if (data & 0x10000000U && | |
1829 | vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) { | |
1830 | ||
1831 | DPRINTF("%s MSI-X table write(%04x:%02x:%02x.%d)\n", | |
1832 | memory_region_name(&quirk->mem), vdev->host.domain, | |
1833 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
1834 | ||
1835 | io_mem_write(&vdev->pdev.msix_table_mmio, | |
1836 | (hwaddr)(quirk->data.address_match & 0xfff), | |
1837 | data, size); | |
1838 | } | |
1839 | ||
1840 | quirk->data.flags = 1; | |
1841 | quirk->data.address_match = data; | |
1842 | ||
1843 | return; | |
1844 | } | |
1845 | quirk->data.flags = 0; | |
1846 | break; | |
1847 | case 0: /* data */ | |
1848 | quirk->data.address_mask = data; | |
1849 | break; | |
1850 | } | |
1851 | ||
1852 | DPRINTF("%s direct write(%04x:%02x:%02x.%d)\n", | |
1853 | memory_region_name(&quirk->mem), vdev->host.domain, | |
1854 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
1855 | ||
1856 | vfio_bar_write(&vdev->bars[quirk->data.bar], addr + 0x70, data, size); | |
1857 | } | |
1858 | ||
1859 | static const MemoryRegionOps vfio_rtl8168_window_quirk = { | |
1860 | .read = vfio_rtl8168_window_quirk_read, | |
1861 | .write = vfio_rtl8168_window_quirk_write, | |
1862 | .valid = { | |
1863 | .min_access_size = 4, | |
1864 | .max_access_size = 4, | |
1865 | .unaligned = false, | |
1866 | }, | |
1867 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1868 | }; | |
1869 | ||
1870 | static void vfio_probe_rtl8168_bar2_window_quirk(VFIODevice *vdev, int nr) | |
1871 | { | |
1872 | PCIDevice *pdev = &vdev->pdev; | |
1873 | VFIOQuirk *quirk; | |
1874 | ||
1875 | if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_REALTEK || | |
1876 | pci_get_word(pdev->config + PCI_DEVICE_ID) != 0x8168 || nr != 2) { | |
1877 | return; | |
1878 | } | |
1879 | ||
1880 | quirk = g_malloc0(sizeof(*quirk)); | |
1881 | quirk->vdev = vdev; | |
1882 | quirk->data.bar = nr; | |
1883 | ||
1884 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_rtl8168_window_quirk, | |
1885 | quirk, "vfio-rtl8168-window-quirk", 8); | |
1886 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, | |
1887 | 0x70, &quirk->mem, 1); | |
1888 | ||
1889 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1890 | ||
1891 | DPRINTF("Enabled RTL8168 BAR2 window quirk for device %04x:%02x:%02x.%x\n", | |
1892 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1893 | vdev->host.function); | |
1894 | } | |
39360f0b AW |
1895 | /* |
1896 | * Trap the BAR2 MMIO window to config space as well. | |
1897 | */ | |
1898 | static void vfio_probe_ati_bar2_4000_quirk(VFIODevice *vdev, int nr) | |
7076eabc AW |
1899 | { |
1900 | PCIDevice *pdev = &vdev->pdev; | |
7076eabc AW |
1901 | VFIOQuirk *quirk; |
1902 | ||
39360f0b AW |
1903 | /* Only enable on newer devices where BAR2 is 64bit */ |
1904 | if (!vdev->has_vga || nr != 2 || !vdev->bars[2].mem64 || | |
7076eabc AW |
1905 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { |
1906 | return; | |
1907 | } | |
1908 | ||
7076eabc AW |
1909 | quirk = g_malloc0(sizeof(*quirk)); |
1910 | quirk->vdev = vdev; | |
39360f0b AW |
1911 | quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1; |
1912 | quirk->data.address_match = 0x4000; | |
1913 | quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1; | |
1914 | quirk->data.bar = nr; | |
1915 | ||
1916 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk, | |
1917 | "vfio-ati-bar2-4000-quirk", | |
1918 | TARGET_PAGE_ALIGN(quirk->data.address_mask + 1)); | |
1919 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, | |
1920 | quirk->data.address_match & TARGET_PAGE_MASK, | |
1921 | &quirk->mem, 1); | |
7076eabc AW |
1922 | |
1923 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
1924 | ||
39360f0b | 1925 | DPRINTF("Enabled ATI/AMD BAR2 0x4000 quirk for device %04x:%02x:%02x.%x\n", |
7076eabc AW |
1926 | vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1927 | vdev->host.function); | |
1928 | } | |
1929 | ||
39360f0b AW |
1930 | /* |
1931 | * Older ATI/AMD cards like the X550 have a similar window to that above. | |
1932 | * I/O port BAR1 provides a window to a mirror of PCI config space located | |
1933 | * in BAR2 at offset 0xf00. We don't care to support such older cards, but | |
1934 | * note it for future reference. | |
1935 | */ | |
1936 | ||
7076eabc AW |
1937 | #define PCI_VENDOR_ID_NVIDIA 0x10de |
1938 | ||
1939 | /* | |
1940 | * Nvidia has several different methods to get to config space, the | |
1941 | * nouveu project has several of these documented here: | |
1942 | * https://github.com/pathscale/envytools/tree/master/hwdocs | |
1943 | * | |
1944 | * The first quirk is actually not documented in envytools and is found | |
1945 | * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an | |
1946 | * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access | |
1947 | * the mirror of PCI config space found at BAR0 offset 0x1800. The access | |
1948 | * sequence first writes 0x338 to I/O port 0x3d4. The target offset is | |
1949 | * then written to 0x3d0. Finally 0x538 is written for a read and 0x738 | |
1950 | * is written for a write to 0x3d4. The BAR0 offset is then accessible | |
1951 | * through 0x3d0. This quirk doesn't seem to be necessary on newer cards | |
1952 | * that use the I/O port BAR5 window but it doesn't hurt to leave it. | |
1953 | */ | |
1954 | enum { | |
39360f0b | 1955 | NV_3D0_NONE = 0, |
7076eabc AW |
1956 | NV_3D0_SELECT, |
1957 | NV_3D0_WINDOW, | |
1958 | NV_3D0_READ, | |
1959 | NV_3D0_WRITE, | |
1960 | }; | |
1961 | ||
1962 | static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque, | |
1963 | hwaddr addr, unsigned size) | |
1964 | { | |
1965 | VFIOQuirk *quirk = opaque; | |
1966 | VFIODevice *vdev = quirk->vdev; | |
1967 | PCIDevice *pdev = &vdev->pdev; | |
1968 | uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], | |
39360f0b | 1969 | addr + quirk->data.base_offset, size); |
7076eabc | 1970 | |
39360f0b AW |
1971 | if (quirk->data.flags == NV_3D0_READ && addr == quirk->data.data_offset) { |
1972 | data = vfio_pci_read_config(pdev, quirk->data.address_val, size); | |
7076eabc AW |
1973 | DPRINTF("%s(0x3d0, %d) = 0x%"PRIx64"\n", __func__, size, data); |
1974 | } | |
1975 | ||
39360f0b | 1976 | quirk->data.flags = NV_3D0_NONE; |
7076eabc AW |
1977 | |
1978 | return data; | |
1979 | } | |
1980 | ||
1981 | static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr, | |
1982 | uint64_t data, unsigned size) | |
1983 | { | |
1984 | VFIOQuirk *quirk = opaque; | |
1985 | VFIODevice *vdev = quirk->vdev; | |
1986 | PCIDevice *pdev = &vdev->pdev; | |
1987 | ||
39360f0b | 1988 | switch (quirk->data.flags) { |
7076eabc | 1989 | case NV_3D0_NONE: |
39360f0b AW |
1990 | if (addr == quirk->data.address_offset && data == 0x338) { |
1991 | quirk->data.flags = NV_3D0_SELECT; | |
7076eabc AW |
1992 | } |
1993 | break; | |
1994 | case NV_3D0_SELECT: | |
39360f0b AW |
1995 | quirk->data.flags = NV_3D0_NONE; |
1996 | if (addr == quirk->data.data_offset && | |
1997 | (data & ~quirk->data.address_mask) == quirk->data.address_match) { | |
1998 | quirk->data.flags = NV_3D0_WINDOW; | |
1999 | quirk->data.address_val = data & quirk->data.address_mask; | |
7076eabc AW |
2000 | } |
2001 | break; | |
2002 | case NV_3D0_WINDOW: | |
39360f0b AW |
2003 | quirk->data.flags = NV_3D0_NONE; |
2004 | if (addr == quirk->data.address_offset) { | |
7076eabc | 2005 | if (data == 0x538) { |
39360f0b | 2006 | quirk->data.flags = NV_3D0_READ; |
7076eabc | 2007 | } else if (data == 0x738) { |
39360f0b | 2008 | quirk->data.flags = NV_3D0_WRITE; |
7076eabc AW |
2009 | } |
2010 | } | |
2011 | break; | |
2012 | case NV_3D0_WRITE: | |
39360f0b AW |
2013 | quirk->data.flags = NV_3D0_NONE; |
2014 | if (addr == quirk->data.data_offset) { | |
2015 | vfio_pci_write_config(pdev, quirk->data.address_val, data, size); | |
7076eabc AW |
2016 | DPRINTF("%s(0x3d0, 0x%"PRIx64", %d)\n", __func__, data, size); |
2017 | return; | |
2018 | } | |
2019 | break; | |
7076eabc AW |
2020 | } |
2021 | ||
2022 | vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], | |
39360f0b | 2023 | addr + quirk->data.base_offset, data, size); |
7076eabc AW |
2024 | } |
2025 | ||
2026 | static const MemoryRegionOps vfio_nvidia_3d0_quirk = { | |
2027 | .read = vfio_nvidia_3d0_quirk_read, | |
2028 | .write = vfio_nvidia_3d0_quirk_write, | |
2029 | .endianness = DEVICE_LITTLE_ENDIAN, | |
2030 | }; | |
2031 | ||
2032 | static void vfio_vga_probe_nvidia_3d0_quirk(VFIODevice *vdev) | |
2033 | { | |
2034 | PCIDevice *pdev = &vdev->pdev; | |
2035 | VFIOQuirk *quirk; | |
2036 | ||
2037 | if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA || | |
2038 | !vdev->bars[1].size) { | |
2039 | return; | |
2040 | } | |
2041 | ||
2042 | quirk = g_malloc0(sizeof(*quirk)); | |
2043 | quirk->vdev = vdev; | |
39360f0b AW |
2044 | quirk->data.base_offset = 0x10; |
2045 | quirk->data.address_offset = 4; | |
2046 | quirk->data.address_size = 2; | |
2047 | quirk->data.address_match = 0x1800; | |
2048 | quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1; | |
2049 | quirk->data.data_offset = 0; | |
2050 | quirk->data.data_size = 4; | |
2051 | ||
2052 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_3d0_quirk, | |
2053 | quirk, "vfio-nvidia-3d0-quirk", 6); | |
7076eabc | 2054 | memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, |
39360f0b | 2055 | quirk->data.base_offset, &quirk->mem); |
7076eabc AW |
2056 | |
2057 | QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, | |
2058 | quirk, next); | |
2059 | ||
2060 | DPRINTF("Enabled NVIDIA VGA 0x3d0 quirk for device %04x:%02x:%02x.%x\n", | |
2061 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2062 | vdev->host.function); | |
2063 | } | |
2064 | ||
2065 | /* | |
2066 | * The second quirk is documented in envytools. The I/O port BAR5 is just | |
2067 | * a set of address/data ports to the MMIO BARs. The BAR we care about is | |
2068 | * again BAR0. This backdoor is apparently a bit newer than the one above | |
2069 | * so we need to not only trap 256 bytes @0x1800, but all of PCI config | |
2070 | * space, including extended space is available at the 4k @0x88000. | |
2071 | */ | |
2072 | enum { | |
2073 | NV_BAR5_ADDRESS = 0x1, | |
2074 | NV_BAR5_ENABLE = 0x2, | |
2075 | NV_BAR5_MASTER = 0x4, | |
2076 | NV_BAR5_VALID = 0x7, | |
2077 | }; | |
2078 | ||
7076eabc AW |
2079 | static void vfio_nvidia_bar5_window_quirk_write(void *opaque, hwaddr addr, |
2080 | uint64_t data, unsigned size) | |
2081 | { | |
2082 | VFIOQuirk *quirk = opaque; | |
7076eabc | 2083 | |
7076eabc AW |
2084 | switch (addr) { |
2085 | case 0x0: | |
2086 | if (data & 0x1) { | |
39360f0b | 2087 | quirk->data.flags |= NV_BAR5_MASTER; |
7076eabc | 2088 | } else { |
39360f0b | 2089 | quirk->data.flags &= ~NV_BAR5_MASTER; |
7076eabc AW |
2090 | } |
2091 | break; | |
2092 | case 0x4: | |
2093 | if (data & 0x1) { | |
39360f0b | 2094 | quirk->data.flags |= NV_BAR5_ENABLE; |
7076eabc | 2095 | } else { |
39360f0b | 2096 | quirk->data.flags &= ~NV_BAR5_ENABLE; |
7076eabc AW |
2097 | } |
2098 | break; | |
2099 | case 0x8: | |
39360f0b | 2100 | if (quirk->data.flags & NV_BAR5_MASTER) { |
7076eabc | 2101 | if ((data & ~0xfff) == 0x88000) { |
39360f0b AW |
2102 | quirk->data.flags |= NV_BAR5_ADDRESS; |
2103 | quirk->data.address_val = data & 0xfff; | |
7076eabc | 2104 | } else if ((data & ~0xff) == 0x1800) { |
39360f0b AW |
2105 | quirk->data.flags |= NV_BAR5_ADDRESS; |
2106 | quirk->data.address_val = data & 0xff; | |
7076eabc | 2107 | } else { |
39360f0b | 2108 | quirk->data.flags &= ~NV_BAR5_ADDRESS; |
7076eabc AW |
2109 | } |
2110 | } | |
2111 | break; | |
7076eabc AW |
2112 | } |
2113 | ||
39360f0b | 2114 | vfio_generic_window_quirk_write(opaque, addr, data, size); |
7076eabc AW |
2115 | } |
2116 | ||
2117 | static const MemoryRegionOps vfio_nvidia_bar5_window_quirk = { | |
39360f0b | 2118 | .read = vfio_generic_window_quirk_read, |
7076eabc AW |
2119 | .write = vfio_nvidia_bar5_window_quirk_write, |
2120 | .valid.min_access_size = 4, | |
2121 | .endianness = DEVICE_LITTLE_ENDIAN, | |
2122 | }; | |
2123 | ||
2124 | static void vfio_probe_nvidia_bar5_window_quirk(VFIODevice *vdev, int nr) | |
2125 | { | |
2126 | PCIDevice *pdev = &vdev->pdev; | |
2127 | VFIOQuirk *quirk; | |
2128 | ||
2129 | if (!vdev->has_vga || nr != 5 || | |
2130 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { | |
2131 | return; | |
2132 | } | |
2133 | ||
2134 | quirk = g_malloc0(sizeof(*quirk)); | |
2135 | quirk->vdev = vdev; | |
39360f0b AW |
2136 | quirk->data.read_flags = quirk->data.write_flags = NV_BAR5_VALID; |
2137 | quirk->data.address_offset = 0x8; | |
2138 | quirk->data.address_size = 0; /* actually 4, but avoids generic code */ | |
2139 | quirk->data.data_offset = 0xc; | |
2140 | quirk->data.data_size = 4; | |
2141 | quirk->data.bar = nr; | |
2142 | ||
2143 | memory_region_init_io(&quirk->mem, OBJECT(vdev), | |
2144 | &vfio_nvidia_bar5_window_quirk, quirk, | |
7076eabc AW |
2145 | "vfio-nvidia-bar5-window-quirk", 16); |
2146 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, 0, &quirk->mem, 1); | |
2147 | ||
2148 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
2149 | ||
2150 | DPRINTF("Enabled NVIDIA BAR5 window quirk for device %04x:%02x:%02x.%x\n", | |
2151 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2152 | vdev->host.function); | |
2153 | } | |
2154 | ||
96eeeba0 AW |
2155 | static void vfio_nvidia_88000_quirk_write(void *opaque, hwaddr addr, |
2156 | uint64_t data, unsigned size) | |
2157 | { | |
2158 | VFIOQuirk *quirk = opaque; | |
2159 | VFIODevice *vdev = quirk->vdev; | |
2160 | PCIDevice *pdev = &vdev->pdev; | |
2161 | hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; | |
2162 | ||
2163 | vfio_generic_quirk_write(opaque, addr, data, size); | |
2164 | ||
2165 | /* | |
2166 | * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the | |
2167 | * MSI capability ID register. Both the ID and next register are | |
2168 | * read-only, so we allow writes covering either of those to real hw. | |
2169 | * NB - only fixed for the 0x88000 MMIO window. | |
2170 | */ | |
2171 | if ((pdev->cap_present & QEMU_PCI_CAP_MSI) && | |
2172 | vfio_range_contained(addr, size, pdev->msi_cap, PCI_MSI_FLAGS)) { | |
2173 | vfio_bar_write(&vdev->bars[quirk->data.bar], addr + base, data, size); | |
2174 | } | |
2175 | } | |
2176 | ||
2177 | static const MemoryRegionOps vfio_nvidia_88000_quirk = { | |
2178 | .read = vfio_generic_quirk_read, | |
2179 | .write = vfio_nvidia_88000_quirk_write, | |
2180 | .endianness = DEVICE_LITTLE_ENDIAN, | |
2181 | }; | |
2182 | ||
7076eabc AW |
2183 | /* |
2184 | * Finally, BAR0 itself. We want to redirect any accesses to either | |
2185 | * 0x1800 or 0x88000 through the PCI config space access functions. | |
2186 | * | |
2187 | * NB - quirk at a page granularity or else they don't seem to work when | |
2188 | * BARs are mmap'd | |
2189 | * | |
2190 | * Here's offset 0x88000... | |
2191 | */ | |
7076eabc AW |
2192 | static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice *vdev, int nr) |
2193 | { | |
2194 | PCIDevice *pdev = &vdev->pdev; | |
2195 | VFIOQuirk *quirk; | |
fe08275d | 2196 | uint16_t vendor, class; |
7076eabc | 2197 | |
fe08275d AW |
2198 | vendor = pci_get_word(pdev->config + PCI_VENDOR_ID); |
2199 | class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); | |
2200 | ||
2201 | if (nr != 0 || vendor != PCI_VENDOR_ID_NVIDIA || | |
2202 | class != PCI_CLASS_DISPLAY_VGA) { | |
7076eabc AW |
2203 | return; |
2204 | } | |
2205 | ||
2206 | quirk = g_malloc0(sizeof(*quirk)); | |
2207 | quirk->vdev = vdev; | |
39360f0b AW |
2208 | quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1; |
2209 | quirk->data.address_match = 0x88000; | |
2210 | quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1; | |
2211 | quirk->data.bar = nr; | |
2212 | ||
96eeeba0 | 2213 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_88000_quirk, |
39360f0b AW |
2214 | quirk, "vfio-nvidia-bar0-88000-quirk", |
2215 | TARGET_PAGE_ALIGN(quirk->data.address_mask + 1)); | |
7076eabc | 2216 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, |
39360f0b AW |
2217 | quirk->data.address_match & TARGET_PAGE_MASK, |
2218 | &quirk->mem, 1); | |
7076eabc AW |
2219 | |
2220 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
2221 | ||
2222 | DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n", | |
2223 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2224 | vdev->host.function); | |
2225 | } | |
2226 | ||
2227 | /* | |
2228 | * And here's the same for BAR0 offset 0x1800... | |
2229 | */ | |
7076eabc AW |
2230 | static void vfio_probe_nvidia_bar0_1800_quirk(VFIODevice *vdev, int nr) |
2231 | { | |
2232 | PCIDevice *pdev = &vdev->pdev; | |
2233 | VFIOQuirk *quirk; | |
2234 | ||
2235 | if (!vdev->has_vga || nr != 0 || | |
2236 | pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { | |
2237 | return; | |
2238 | } | |
2239 | ||
2240 | /* Log the chipset ID */ | |
2241 | DPRINTF("Nvidia NV%02x\n", | |
2242 | (unsigned int)(vfio_bar_read(&vdev->bars[0], 0, 4) >> 20) & 0xff); | |
2243 | ||
2244 | quirk = g_malloc0(sizeof(*quirk)); | |
2245 | quirk->vdev = vdev; | |
39360f0b AW |
2246 | quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1; |
2247 | quirk->data.address_match = 0x1800; | |
2248 | quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1; | |
2249 | quirk->data.bar = nr; | |
7076eabc | 2250 | |
39360f0b | 2251 | memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk, |
7076eabc | 2252 | "vfio-nvidia-bar0-1800-quirk", |
39360f0b | 2253 | TARGET_PAGE_ALIGN(quirk->data.address_mask + 1)); |
7076eabc | 2254 | memory_region_add_subregion_overlap(&vdev->bars[nr].mem, |
39360f0b AW |
2255 | quirk->data.address_match & TARGET_PAGE_MASK, |
2256 | &quirk->mem, 1); | |
7076eabc AW |
2257 | |
2258 | QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); | |
2259 | ||
2260 | DPRINTF("Enabled NVIDIA BAR0 0x1800 quirk for device %04x:%02x:%02x.%x\n", | |
2261 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2262 | vdev->host.function); | |
2263 | } | |
2264 | ||
2265 | /* | |
2266 | * TODO - Some Nvidia devices provide config access to their companion HDA | |
2267 | * device and even to their parent bridge via these config space mirrors. | |
2268 | * Add quirks for those regions. | |
2269 | */ | |
2270 | ||
2271 | /* | |
2272 | * Common quirk probe entry points. | |
2273 | */ | |
2274 | static void vfio_vga_quirk_setup(VFIODevice *vdev) | |
2275 | { | |
2276 | vfio_vga_probe_ati_3c3_quirk(vdev); | |
2277 | vfio_vga_probe_nvidia_3d0_quirk(vdev); | |
2278 | } | |
2279 | ||
2280 | static void vfio_vga_quirk_teardown(VFIODevice *vdev) | |
2281 | { | |
2282 | int i; | |
2283 | ||
2284 | for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) { | |
2285 | while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) { | |
2286 | VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks); | |
2287 | memory_region_del_subregion(&vdev->vga.region[i].mem, &quirk->mem); | |
d8d95814 | 2288 | object_unparent(OBJECT(&quirk->mem)); |
7076eabc AW |
2289 | QLIST_REMOVE(quirk, next); |
2290 | g_free(quirk); | |
2291 | } | |
2292 | } | |
2293 | } | |
2294 | ||
2295 | static void vfio_bar_quirk_setup(VFIODevice *vdev, int nr) | |
2296 | { | |
39360f0b AW |
2297 | vfio_probe_ati_bar4_window_quirk(vdev, nr); |
2298 | vfio_probe_ati_bar2_4000_quirk(vdev, nr); | |
7076eabc AW |
2299 | vfio_probe_nvidia_bar5_window_quirk(vdev, nr); |
2300 | vfio_probe_nvidia_bar0_88000_quirk(vdev, nr); | |
2301 | vfio_probe_nvidia_bar0_1800_quirk(vdev, nr); | |
4cb47d28 | 2302 | vfio_probe_rtl8168_bar2_window_quirk(vdev, nr); |
7076eabc AW |
2303 | } |
2304 | ||
2305 | static void vfio_bar_quirk_teardown(VFIODevice *vdev, int nr) | |
2306 | { | |
2307 | VFIOBAR *bar = &vdev->bars[nr]; | |
2308 | ||
2309 | while (!QLIST_EMPTY(&bar->quirks)) { | |
2310 | VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks); | |
2311 | memory_region_del_subregion(&bar->mem, &quirk->mem); | |
d8d95814 | 2312 | object_unparent(OBJECT(&quirk->mem)); |
7076eabc AW |
2313 | QLIST_REMOVE(quirk, next); |
2314 | g_free(quirk); | |
2315 | } | |
2316 | } | |
2317 | ||
65501a74 AW |
2318 | /* |
2319 | * PCI config space | |
2320 | */ | |
2321 | static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) | |
2322 | { | |
2323 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
4b5d5e87 | 2324 | uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val; |
65501a74 | 2325 | |
4b5d5e87 AW |
2326 | memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); |
2327 | emu_bits = le32_to_cpu(emu_bits); | |
65501a74 | 2328 | |
4b5d5e87 AW |
2329 | if (emu_bits) { |
2330 | emu_val = pci_default_read_config(pdev, addr, len); | |
2331 | } | |
2332 | ||
2333 | if (~emu_bits & (0xffffffffU >> (32 - len * 8))) { | |
2334 | ssize_t ret; | |
2335 | ||
2336 | ret = pread(vdev->fd, &phys_val, len, vdev->config_offset + addr); | |
2337 | if (ret != len) { | |
312fd5f2 | 2338 | error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m", |
65501a74 AW |
2339 | __func__, vdev->host.domain, vdev->host.bus, |
2340 | vdev->host.slot, vdev->host.function, addr, len); | |
2341 | return -errno; | |
2342 | } | |
4b5d5e87 | 2343 | phys_val = le32_to_cpu(phys_val); |
65501a74 AW |
2344 | } |
2345 | ||
4b5d5e87 | 2346 | val = (emu_val & emu_bits) | (phys_val & ~emu_bits); |
65501a74 AW |
2347 | |
2348 | DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__, | |
2349 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2350 | vdev->host.function, addr, len, val); | |
2351 | ||
2352 | return val; | |
2353 | } | |
2354 | ||
2355 | static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, | |
2356 | uint32_t val, int len) | |
2357 | { | |
2358 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
2359 | uint32_t val_le = cpu_to_le32(val); | |
2360 | ||
2361 | DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__, | |
2362 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2363 | vdev->host.function, addr, val, len); | |
2364 | ||
2365 | /* Write everything to VFIO, let it filter out what we can't write */ | |
2366 | if (pwrite(vdev->fd, &val_le, len, vdev->config_offset + addr) != len) { | |
312fd5f2 | 2367 | error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m", |
65501a74 AW |
2368 | __func__, vdev->host.domain, vdev->host.bus, |
2369 | vdev->host.slot, vdev->host.function, addr, val, len); | |
2370 | } | |
2371 | ||
65501a74 AW |
2372 | /* MSI/MSI-X Enabling/Disabling */ |
2373 | if (pdev->cap_present & QEMU_PCI_CAP_MSI && | |
2374 | ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) { | |
2375 | int is_enabled, was_enabled = msi_enabled(pdev); | |
2376 | ||
2377 | pci_default_write_config(pdev, addr, val, len); | |
2378 | ||
2379 | is_enabled = msi_enabled(pdev); | |
2380 | ||
c7679d45 AW |
2381 | if (!was_enabled) { |
2382 | if (is_enabled) { | |
2383 | vfio_enable_msi(vdev); | |
2384 | } | |
2385 | } else { | |
2386 | if (!is_enabled) { | |
2387 | vfio_disable_msi(vdev); | |
2388 | } else { | |
2389 | vfio_update_msi(vdev); | |
2390 | } | |
65501a74 | 2391 | } |
4b5d5e87 | 2392 | } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX && |
65501a74 AW |
2393 | ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) { |
2394 | int is_enabled, was_enabled = msix_enabled(pdev); | |
2395 | ||
2396 | pci_default_write_config(pdev, addr, val, len); | |
2397 | ||
2398 | is_enabled = msix_enabled(pdev); | |
2399 | ||
2400 | if (!was_enabled && is_enabled) { | |
fd704adc | 2401 | vfio_enable_msix(vdev); |
65501a74 | 2402 | } else if (was_enabled && !is_enabled) { |
fd704adc | 2403 | vfio_disable_msix(vdev); |
65501a74 | 2404 | } |
4b5d5e87 AW |
2405 | } else { |
2406 | /* Write everything to QEMU to keep emulated bits correct */ | |
2407 | pci_default_write_config(pdev, addr, val, len); | |
65501a74 AW |
2408 | } |
2409 | } | |
2410 | ||
2411 | /* | |
2412 | * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 | |
2413 | */ | |
af6bc27e | 2414 | static int vfio_dma_unmap(VFIOContainer *container, |
a8170e5e | 2415 | hwaddr iova, ram_addr_t size) |
af6bc27e AW |
2416 | { |
2417 | struct vfio_iommu_type1_dma_unmap unmap = { | |
2418 | .argsz = sizeof(unmap), | |
2419 | .flags = 0, | |
2420 | .iova = iova, | |
2421 | .size = size, | |
2422 | }; | |
2423 | ||
2424 | if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { | |
2425 | DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno); | |
2426 | return -errno; | |
2427 | } | |
2428 | ||
2429 | return 0; | |
2430 | } | |
2431 | ||
a8170e5e | 2432 | static int vfio_dma_map(VFIOContainer *container, hwaddr iova, |
65501a74 AW |
2433 | ram_addr_t size, void *vaddr, bool readonly) |
2434 | { | |
2435 | struct vfio_iommu_type1_dma_map map = { | |
2436 | .argsz = sizeof(map), | |
2437 | .flags = VFIO_DMA_MAP_FLAG_READ, | |
5976cdd5 | 2438 | .vaddr = (__u64)(uintptr_t)vaddr, |
65501a74 AW |
2439 | .iova = iova, |
2440 | .size = size, | |
2441 | }; | |
2442 | ||
2443 | if (!readonly) { | |
2444 | map.flags |= VFIO_DMA_MAP_FLAG_WRITE; | |
2445 | } | |
2446 | ||
12af1344 AW |
2447 | /* |
2448 | * Try the mapping, if it fails with EBUSY, unmap the region and try | |
2449 | * again. This shouldn't be necessary, but we sometimes see it in | |
2450 | * the the VGA ROM space. | |
2451 | */ | |
2452 | if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || | |
2453 | (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 && | |
2454 | ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { | |
2455 | return 0; | |
65501a74 AW |
2456 | } |
2457 | ||
12af1344 AW |
2458 | DPRINTF("VFIO_MAP_DMA: %d\n", -errno); |
2459 | return -errno; | |
65501a74 AW |
2460 | } |
2461 | ||
65501a74 AW |
2462 | static bool vfio_listener_skipped_section(MemoryRegionSection *section) |
2463 | { | |
5e70018b DG |
2464 | return (!memory_region_is_ram(section->mr) && |
2465 | !memory_region_is_iommu(section->mr)) || | |
d3a2fd9b AW |
2466 | /* |
2467 | * Sizing an enabled 64-bit BAR can cause spurious mappings to | |
2468 | * addresses in the upper part of the 64-bit address space. These | |
2469 | * are never accessed by the CPU and beyond the address width of | |
2470 | * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. | |
2471 | */ | |
2472 | section->offset_within_address_space & (1ULL << 63); | |
65501a74 AW |
2473 | } |
2474 | ||
5e70018b DG |
2475 | static void vfio_iommu_map_notify(Notifier *n, void *data) |
2476 | { | |
2477 | VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); | |
2478 | VFIOContainer *container = giommu->container; | |
2479 | IOMMUTLBEntry *iotlb = data; | |
2480 | MemoryRegion *mr; | |
2481 | hwaddr xlat; | |
2482 | hwaddr len = iotlb->addr_mask + 1; | |
2483 | void *vaddr; | |
2484 | int ret; | |
2485 | ||
2486 | DPRINTF("iommu map @ %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", | |
2487 | iotlb->iova, iotlb->iova + iotlb->addr_mask); | |
2488 | ||
2489 | /* | |
2490 | * The IOMMU TLB entry we have just covers translation through | |
2491 | * this IOMMU to its immediate target. We need to translate | |
2492 | * it the rest of the way through to memory. | |
2493 | */ | |
2494 | mr = address_space_translate(&address_space_memory, | |
2495 | iotlb->translated_addr, | |
2496 | &xlat, &len, iotlb->perm & IOMMU_WO); | |
2497 | if (!memory_region_is_ram(mr)) { | |
2498 | DPRINTF("iommu map to non memory area %"HWADDR_PRIx"\n", | |
2499 | xlat); | |
2500 | return; | |
2501 | } | |
2502 | /* | |
2503 | * Translation truncates length to the IOMMU page size, | |
2504 | * check that it did not truncate too much. | |
2505 | */ | |
2506 | if (len & iotlb->addr_mask) { | |
2507 | DPRINTF("iommu has granularity incompatible with target AS\n"); | |
2508 | return; | |
2509 | } | |
2510 | ||
27e27782 | 2511 | if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { |
5e70018b DG |
2512 | vaddr = memory_region_get_ram_ptr(mr) + xlat; |
2513 | ||
2514 | ret = vfio_dma_map(container, iotlb->iova, | |
2515 | iotlb->addr_mask + 1, vaddr, | |
2516 | !(iotlb->perm & IOMMU_WO) || mr->readonly); | |
2517 | if (ret) { | |
2518 | error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " | |
2519 | "0x%"HWADDR_PRIx", %p) = %d (%m)", | |
2520 | container, iotlb->iova, | |
2521 | iotlb->addr_mask + 1, vaddr, ret); | |
2522 | } | |
2523 | } else { | |
2524 | ret = vfio_dma_unmap(container, iotlb->iova, iotlb->addr_mask + 1); | |
2525 | if (ret) { | |
2526 | error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " | |
2527 | "0x%"HWADDR_PRIx") = %d (%m)", | |
2528 | container, iotlb->iova, | |
2529 | iotlb->addr_mask + 1, ret); | |
2530 | } | |
2531 | } | |
2532 | } | |
2533 | ||
65501a74 AW |
2534 | static void vfio_listener_region_add(MemoryListener *listener, |
2535 | MemoryRegionSection *section) | |
2536 | { | |
2537 | VFIOContainer *container = container_of(listener, VFIOContainer, | |
87ca1f77 | 2538 | iommu_data.type1.listener); |
a8170e5e | 2539 | hwaddr iova, end; |
7532d3cb | 2540 | Int128 llend; |
65501a74 AW |
2541 | void *vaddr; |
2542 | int ret; | |
2543 | ||
2544 | if (vfio_listener_skipped_section(section)) { | |
82ca8912 | 2545 | DPRINTF("SKIPPING region_add %"HWADDR_PRIx" - %"PRIx64"\n", |
65501a74 | 2546 | section->offset_within_address_space, |
1d5bf692 AK |
2547 | section->offset_within_address_space + |
2548 | int128_get64(int128_sub(section->size, int128_one()))); | |
65501a74 AW |
2549 | return; |
2550 | } | |
2551 | ||
2552 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != | |
2553 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { | |
312fd5f2 | 2554 | error_report("%s received unaligned region", __func__); |
65501a74 AW |
2555 | return; |
2556 | } | |
2557 | ||
2558 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); | |
7532d3cb AK |
2559 | llend = int128_make64(section->offset_within_address_space); |
2560 | llend = int128_add(llend, section->size); | |
2561 | llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); | |
65501a74 | 2562 | |
7532d3cb | 2563 | if (int128_ge(int128_make64(iova), llend)) { |
65501a74 AW |
2564 | return; |
2565 | } | |
2566 | ||
5e70018b DG |
2567 | memory_region_ref(section->mr); |
2568 | ||
2569 | if (memory_region_is_iommu(section->mr)) { | |
2570 | VFIOGuestIOMMU *giommu; | |
2571 | ||
2572 | DPRINTF("region_add [iommu] %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", | |
2573 | iova, int128_get64(int128_sub(llend, int128_one()))); | |
2574 | /* | |
2575 | * FIXME: We should do some checking to see if the | |
2576 | * capabilities of the host VFIO IOMMU are adequate to model | |
2577 | * the guest IOMMU | |
2578 | * | |
2579 | * FIXME: For VFIO iommu types which have KVM acceleration to | |
2580 | * avoid bouncing all map/unmaps through qemu this way, this | |
2581 | * would be the right place to wire that up (tell the KVM | |
2582 | * device emulation the VFIO iommu handles to use). | |
2583 | */ | |
2584 | /* | |
2585 | * This assumes that the guest IOMMU is empty of | |
2586 | * mappings at this point. | |
2587 | * | |
2588 | * One way of doing this is: | |
2589 | * 1. Avoid sharing IOMMUs between emulated devices or different | |
2590 | * IOMMU groups. | |
2591 | * 2. Implement VFIO_IOMMU_ENABLE in the host kernel to fail if | |
2592 | * there are some mappings in IOMMU. | |
2593 | * | |
2594 | * VFIO on SPAPR does that. Other IOMMU models may do that different, | |
2595 | * they must make sure there are no existing mappings or | |
2596 | * loop through existing mappings to map them into VFIO. | |
2597 | */ | |
2598 | giommu = g_malloc0(sizeof(*giommu)); | |
2599 | giommu->iommu = section->mr; | |
2600 | giommu->container = container; | |
2601 | giommu->n.notify = vfio_iommu_map_notify; | |
2602 | QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); | |
2603 | memory_region_register_iommu_notifier(giommu->iommu, &giommu->n); | |
2604 | ||
2605 | return; | |
2606 | } | |
2607 | ||
2608 | /* Here we assume that memory_region_is_ram(section->mr)==true */ | |
2609 | ||
7532d3cb | 2610 | end = int128_get64(llend); |
65501a74 AW |
2611 | vaddr = memory_region_get_ram_ptr(section->mr) + |
2612 | section->offset_within_region + | |
2613 | (iova - section->offset_within_address_space); | |
2614 | ||
5e70018b | 2615 | DPRINTF("region_add [ram] %"HWADDR_PRIx" - %"HWADDR_PRIx" [%p]\n", |
65501a74 AW |
2616 | iova, end - 1, vaddr); |
2617 | ||
2618 | ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly); | |
2619 | if (ret) { | |
a8170e5e | 2620 | error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " |
312fd5f2 | 2621 | "0x%"HWADDR_PRIx", %p) = %d (%m)", |
65501a74 | 2622 | container, iova, end - iova, vaddr, ret); |
87ca1f77 AW |
2623 | |
2624 | /* | |
2625 | * On the initfn path, store the first error in the container so we | |
2626 | * can gracefully fail. Runtime, there's not much we can do other | |
2627 | * than throw a hardware error. | |
2628 | */ | |
2629 | if (!container->iommu_data.type1.initialized) { | |
2630 | if (!container->iommu_data.type1.error) { | |
2631 | container->iommu_data.type1.error = ret; | |
2632 | } | |
2633 | } else { | |
4e505ddd | 2634 | hw_error("vfio: DMA mapping failed, unable to continue"); |
87ca1f77 | 2635 | } |
65501a74 AW |
2636 | } |
2637 | } | |
2638 | ||
2639 | static void vfio_listener_region_del(MemoryListener *listener, | |
2640 | MemoryRegionSection *section) | |
2641 | { | |
2642 | VFIOContainer *container = container_of(listener, VFIOContainer, | |
87ca1f77 | 2643 | iommu_data.type1.listener); |
a8170e5e | 2644 | hwaddr iova, end; |
65501a74 AW |
2645 | int ret; |
2646 | ||
2647 | if (vfio_listener_skipped_section(section)) { | |
82ca8912 | 2648 | DPRINTF("SKIPPING region_del %"HWADDR_PRIx" - %"PRIx64"\n", |
65501a74 | 2649 | section->offset_within_address_space, |
1d5bf692 AK |
2650 | section->offset_within_address_space + |
2651 | int128_get64(int128_sub(section->size, int128_one()))); | |
65501a74 AW |
2652 | return; |
2653 | } | |
2654 | ||
2655 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != | |
2656 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { | |
312fd5f2 | 2657 | error_report("%s received unaligned region", __func__); |
65501a74 AW |
2658 | return; |
2659 | } | |
2660 | ||
5e70018b DG |
2661 | if (memory_region_is_iommu(section->mr)) { |
2662 | VFIOGuestIOMMU *giommu; | |
2663 | ||
2664 | QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { | |
2665 | if (giommu->iommu == section->mr) { | |
2666 | memory_region_unregister_iommu_notifier(&giommu->n); | |
2667 | QLIST_REMOVE(giommu, giommu_next); | |
2668 | g_free(giommu); | |
2669 | break; | |
2670 | } | |
2671 | } | |
2672 | ||
2673 | /* | |
2674 | * FIXME: We assume the one big unmap below is adequate to | |
2675 | * remove any individual page mappings in the IOMMU which | |
2676 | * might have been copied into VFIO. This works for a page table | |
2677 | * based IOMMU where a big unmap flattens a large range of IO-PTEs. | |
2678 | * That may not be true for all IOMMU types. | |
2679 | */ | |
2680 | } | |
2681 | ||
65501a74 | 2682 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); |
052e87b0 | 2683 | end = (section->offset_within_address_space + int128_get64(section->size)) & |
65501a74 AW |
2684 | TARGET_PAGE_MASK; |
2685 | ||
2686 | if (iova >= end) { | |
2687 | return; | |
2688 | } | |
2689 | ||
82ca8912 | 2690 | DPRINTF("region_del %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", |
65501a74 AW |
2691 | iova, end - 1); |
2692 | ||
2693 | ret = vfio_dma_unmap(container, iova, end - iova); | |
dfde4e6e | 2694 | memory_region_unref(section->mr); |
65501a74 | 2695 | if (ret) { |
a8170e5e | 2696 | error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " |
312fd5f2 | 2697 | "0x%"HWADDR_PRIx") = %d (%m)", |
65501a74 AW |
2698 | container, iova, end - iova, ret); |
2699 | } | |
2700 | } | |
2701 | ||
2702 | static MemoryListener vfio_memory_listener = { | |
65501a74 AW |
2703 | .region_add = vfio_listener_region_add, |
2704 | .region_del = vfio_listener_region_del, | |
65501a74 AW |
2705 | }; |
2706 | ||
2707 | static void vfio_listener_release(VFIOContainer *container) | |
2708 | { | |
87ca1f77 | 2709 | memory_listener_unregister(&container->iommu_data.type1.listener); |
65501a74 AW |
2710 | } |
2711 | ||
2712 | /* | |
2713 | * Interrupt setup | |
2714 | */ | |
2715 | static void vfio_disable_interrupts(VFIODevice *vdev) | |
2716 | { | |
2717 | switch (vdev->interrupt) { | |
2718 | case VFIO_INT_INTx: | |
2719 | vfio_disable_intx(vdev); | |
2720 | break; | |
2721 | case VFIO_INT_MSI: | |
fd704adc | 2722 | vfio_disable_msi(vdev); |
65501a74 AW |
2723 | break; |
2724 | case VFIO_INT_MSIX: | |
fd704adc | 2725 | vfio_disable_msix(vdev); |
65501a74 AW |
2726 | break; |
2727 | } | |
2728 | } | |
2729 | ||
2730 | static int vfio_setup_msi(VFIODevice *vdev, int pos) | |
2731 | { | |
2732 | uint16_t ctrl; | |
2733 | bool msi_64bit, msi_maskbit; | |
2734 | int ret, entries; | |
2735 | ||
65501a74 AW |
2736 | if (pread(vdev->fd, &ctrl, sizeof(ctrl), |
2737 | vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { | |
2738 | return -errno; | |
2739 | } | |
2740 | ctrl = le16_to_cpu(ctrl); | |
2741 | ||
2742 | msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT); | |
2743 | msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT); | |
2744 | entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1); | |
2745 | ||
2746 | DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev->host.domain, | |
2747 | vdev->host.bus, vdev->host.slot, vdev->host.function, pos); | |
2748 | ||
2749 | ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit); | |
2750 | if (ret < 0) { | |
e43b9a5a AW |
2751 | if (ret == -ENOTSUP) { |
2752 | return 0; | |
2753 | } | |
312fd5f2 | 2754 | error_report("vfio: msi_init failed"); |
65501a74 AW |
2755 | return ret; |
2756 | } | |
2757 | vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); | |
2758 | ||
2759 | return 0; | |
2760 | } | |
2761 | ||
2762 | /* | |
2763 | * We don't have any control over how pci_add_capability() inserts | |
2764 | * capabilities into the chain. In order to setup MSI-X we need a | |
2765 | * MemoryRegion for the BAR. In order to setup the BAR and not | |
2766 | * attempt to mmap the MSI-X table area, which VFIO won't allow, we | |
2767 | * need to first look for where the MSI-X table lives. So we | |
2768 | * unfortunately split MSI-X setup across two functions. | |
2769 | */ | |
2770 | static int vfio_early_setup_msix(VFIODevice *vdev) | |
2771 | { | |
2772 | uint8_t pos; | |
2773 | uint16_t ctrl; | |
2774 | uint32_t table, pba; | |
2775 | ||
2776 | pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); | |
2777 | if (!pos) { | |
2778 | return 0; | |
2779 | } | |
2780 | ||
2781 | if (pread(vdev->fd, &ctrl, sizeof(ctrl), | |
2782 | vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { | |
2783 | return -errno; | |
2784 | } | |
2785 | ||
2786 | if (pread(vdev->fd, &table, sizeof(table), | |
2787 | vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) { | |
2788 | return -errno; | |
2789 | } | |
2790 | ||
2791 | if (pread(vdev->fd, &pba, sizeof(pba), | |
2792 | vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) { | |
2793 | return -errno; | |
2794 | } | |
2795 | ||
2796 | ctrl = le16_to_cpu(ctrl); | |
2797 | table = le32_to_cpu(table); | |
2798 | pba = le32_to_cpu(pba); | |
2799 | ||
2800 | vdev->msix = g_malloc0(sizeof(*(vdev->msix))); | |
2801 | vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK; | |
2802 | vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; | |
2803 | vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; | |
2804 | vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; | |
2805 | vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; | |
2806 | ||
2807 | DPRINTF("%04x:%02x:%02x.%x " | |
2808 | "PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n", | |
2809 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2810 | vdev->host.function, pos, vdev->msix->table_bar, | |
2811 | vdev->msix->table_offset, vdev->msix->entries); | |
2812 | ||
2813 | return 0; | |
2814 | } | |
2815 | ||
2816 | static int vfio_setup_msix(VFIODevice *vdev, int pos) | |
2817 | { | |
2818 | int ret; | |
2819 | ||
65501a74 AW |
2820 | ret = msix_init(&vdev->pdev, vdev->msix->entries, |
2821 | &vdev->bars[vdev->msix->table_bar].mem, | |
2822 | vdev->msix->table_bar, vdev->msix->table_offset, | |
2823 | &vdev->bars[vdev->msix->pba_bar].mem, | |
2824 | vdev->msix->pba_bar, vdev->msix->pba_offset, pos); | |
2825 | if (ret < 0) { | |
e43b9a5a AW |
2826 | if (ret == -ENOTSUP) { |
2827 | return 0; | |
2828 | } | |
312fd5f2 | 2829 | error_report("vfio: msix_init failed"); |
65501a74 AW |
2830 | return ret; |
2831 | } | |
2832 | ||
65501a74 AW |
2833 | return 0; |
2834 | } | |
2835 | ||
2836 | static void vfio_teardown_msi(VFIODevice *vdev) | |
2837 | { | |
2838 | msi_uninit(&vdev->pdev); | |
2839 | ||
2840 | if (vdev->msix) { | |
65501a74 AW |
2841 | msix_uninit(&vdev->pdev, &vdev->bars[vdev->msix->table_bar].mem, |
2842 | &vdev->bars[vdev->msix->pba_bar].mem); | |
2843 | } | |
2844 | } | |
2845 | ||
2846 | /* | |
2847 | * Resource setup | |
2848 | */ | |
2849 | static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled) | |
2850 | { | |
2851 | int i; | |
2852 | ||
2853 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
2854 | VFIOBAR *bar = &vdev->bars[i]; | |
2855 | ||
2856 | if (!bar->size) { | |
2857 | continue; | |
2858 | } | |
2859 | ||
2860 | memory_region_set_enabled(&bar->mmap_mem, enabled); | |
2861 | if (vdev->msix && vdev->msix->table_bar == i) { | |
2862 | memory_region_set_enabled(&vdev->msix->mmap_mem, enabled); | |
2863 | } | |
2864 | } | |
2865 | } | |
2866 | ||
2867 | static void vfio_unmap_bar(VFIODevice *vdev, int nr) | |
2868 | { | |
2869 | VFIOBAR *bar = &vdev->bars[nr]; | |
2870 | ||
2871 | if (!bar->size) { | |
2872 | return; | |
2873 | } | |
2874 | ||
7076eabc AW |
2875 | vfio_bar_quirk_teardown(vdev, nr); |
2876 | ||
65501a74 AW |
2877 | memory_region_del_subregion(&bar->mem, &bar->mmap_mem); |
2878 | munmap(bar->mmap, memory_region_size(&bar->mmap_mem)); | |
2879 | ||
2880 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
2881 | memory_region_del_subregion(&bar->mem, &vdev->msix->mmap_mem); | |
2882 | munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem)); | |
2883 | } | |
65501a74 AW |
2884 | } |
2885 | ||
5cb022a1 PB |
2886 | static int vfio_mmap_bar(VFIODevice *vdev, VFIOBAR *bar, |
2887 | MemoryRegion *mem, MemoryRegion *submem, | |
65501a74 AW |
2888 | void **map, size_t size, off_t offset, |
2889 | const char *name) | |
2890 | { | |
2891 | int ret = 0; | |
2892 | ||
82ca8912 | 2893 | if (VFIO_ALLOW_MMAP && size && bar->flags & VFIO_REGION_INFO_FLAG_MMAP) { |
65501a74 AW |
2894 | int prot = 0; |
2895 | ||
2896 | if (bar->flags & VFIO_REGION_INFO_FLAG_READ) { | |
2897 | prot |= PROT_READ; | |
2898 | } | |
2899 | ||
2900 | if (bar->flags & VFIO_REGION_INFO_FLAG_WRITE) { | |
2901 | prot |= PROT_WRITE; | |
2902 | } | |
2903 | ||
2904 | *map = mmap(NULL, size, prot, MAP_SHARED, | |
2905 | bar->fd, bar->fd_offset + offset); | |
2906 | if (*map == MAP_FAILED) { | |
2907 | *map = NULL; | |
2908 | ret = -errno; | |
2909 | goto empty_region; | |
2910 | } | |
2911 | ||
5cb022a1 | 2912 | memory_region_init_ram_ptr(submem, OBJECT(vdev), name, size, *map); |
e4dc3f59 | 2913 | memory_region_set_skip_dump(submem); |
65501a74 AW |
2914 | } else { |
2915 | empty_region: | |
2916 | /* Create a zero sized sub-region to make cleanup easy. */ | |
5cb022a1 | 2917 | memory_region_init(submem, OBJECT(vdev), name, 0); |
65501a74 AW |
2918 | } |
2919 | ||
2920 | memory_region_add_subregion(mem, offset, submem); | |
2921 | ||
2922 | return ret; | |
2923 | } | |
2924 | ||
2925 | static void vfio_map_bar(VFIODevice *vdev, int nr) | |
2926 | { | |
2927 | VFIOBAR *bar = &vdev->bars[nr]; | |
2928 | unsigned size = bar->size; | |
2929 | char name[64]; | |
2930 | uint32_t pci_bar; | |
2931 | uint8_t type; | |
2932 | int ret; | |
2933 | ||
2934 | /* Skip both unimplemented BARs and the upper half of 64bit BARS. */ | |
2935 | if (!size) { | |
2936 | return; | |
2937 | } | |
2938 | ||
2939 | snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d", | |
2940 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2941 | vdev->host.function, nr); | |
2942 | ||
2943 | /* Determine what type of BAR this is for registration */ | |
2944 | ret = pread(vdev->fd, &pci_bar, sizeof(pci_bar), | |
2945 | vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr)); | |
2946 | if (ret != sizeof(pci_bar)) { | |
312fd5f2 | 2947 | error_report("vfio: Failed to read BAR %d (%m)", nr); |
65501a74 AW |
2948 | return; |
2949 | } | |
2950 | ||
2951 | pci_bar = le32_to_cpu(pci_bar); | |
39360f0b AW |
2952 | bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO); |
2953 | bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64); | |
2954 | type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK : | |
2955 | ~PCI_BASE_ADDRESS_MEM_MASK); | |
65501a74 AW |
2956 | |
2957 | /* A "slow" read/write mapping underlies all BARs */ | |
39360f0b AW |
2958 | memory_region_init_io(&bar->mem, OBJECT(vdev), &vfio_bar_ops, |
2959 | bar, name, size); | |
65501a74 AW |
2960 | pci_register_bar(&vdev->pdev, nr, type, &bar->mem); |
2961 | ||
2962 | /* | |
2963 | * We can't mmap areas overlapping the MSIX vector table, so we | |
2964 | * potentially insert a direct-mapped subregion before and after it. | |
2965 | */ | |
2966 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
8d7b5a1d | 2967 | size = vdev->msix->table_offset & qemu_host_page_mask; |
65501a74 AW |
2968 | } |
2969 | ||
2970 | strncat(name, " mmap", sizeof(name) - strlen(name) - 1); | |
5cb022a1 | 2971 | if (vfio_mmap_bar(vdev, bar, &bar->mem, |
65501a74 | 2972 | &bar->mmap_mem, &bar->mmap, size, 0, name)) { |
312fd5f2 | 2973 | error_report("%s unsupported. Performance may be slow", name); |
65501a74 AW |
2974 | } |
2975 | ||
2976 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
2977 | unsigned start; | |
2978 | ||
8d7b5a1d AK |
2979 | start = HOST_PAGE_ALIGN(vdev->msix->table_offset + |
2980 | (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); | |
65501a74 AW |
2981 | |
2982 | size = start < bar->size ? bar->size - start : 0; | |
2983 | strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1); | |
2984 | /* VFIOMSIXInfo contains another MemoryRegion for this mapping */ | |
5cb022a1 | 2985 | if (vfio_mmap_bar(vdev, bar, &bar->mem, &vdev->msix->mmap_mem, |
65501a74 | 2986 | &vdev->msix->mmap, size, start, name)) { |
312fd5f2 | 2987 | error_report("%s unsupported. Performance may be slow", name); |
65501a74 AW |
2988 | } |
2989 | } | |
7076eabc AW |
2990 | |
2991 | vfio_bar_quirk_setup(vdev, nr); | |
65501a74 AW |
2992 | } |
2993 | ||
2994 | static void vfio_map_bars(VFIODevice *vdev) | |
2995 | { | |
2996 | int i; | |
2997 | ||
2998 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
2999 | vfio_map_bar(vdev, i); | |
3000 | } | |
f15689c7 AW |
3001 | |
3002 | if (vdev->has_vga) { | |
3c161542 PB |
3003 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem, |
3004 | OBJECT(vdev), &vfio_vga_ops, | |
f15689c7 AW |
3005 | &vdev->vga.region[QEMU_PCI_VGA_MEM], |
3006 | "vfio-vga-mmio@0xa0000", | |
3007 | QEMU_PCI_VGA_MEM_SIZE); | |
3c161542 PB |
3008 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, |
3009 | OBJECT(vdev), &vfio_vga_ops, | |
f15689c7 AW |
3010 | &vdev->vga.region[QEMU_PCI_VGA_IO_LO], |
3011 | "vfio-vga-io@0x3b0", | |
3012 | QEMU_PCI_VGA_IO_LO_SIZE); | |
3c161542 PB |
3013 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, |
3014 | OBJECT(vdev), &vfio_vga_ops, | |
f15689c7 AW |
3015 | &vdev->vga.region[QEMU_PCI_VGA_IO_HI], |
3016 | "vfio-vga-io@0x3c0", | |
3017 | QEMU_PCI_VGA_IO_HI_SIZE); | |
3018 | ||
3019 | pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem, | |
3020 | &vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, | |
3021 | &vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem); | |
7076eabc | 3022 | vfio_vga_quirk_setup(vdev); |
f15689c7 | 3023 | } |
65501a74 AW |
3024 | } |
3025 | ||
3026 | static void vfio_unmap_bars(VFIODevice *vdev) | |
3027 | { | |
3028 | int i; | |
3029 | ||
3030 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
3031 | vfio_unmap_bar(vdev, i); | |
3032 | } | |
f15689c7 AW |
3033 | |
3034 | if (vdev->has_vga) { | |
7076eabc | 3035 | vfio_vga_quirk_teardown(vdev); |
f15689c7 | 3036 | pci_unregister_vga(&vdev->pdev); |
f15689c7 | 3037 | } |
65501a74 AW |
3038 | } |
3039 | ||
3040 | /* | |
3041 | * General setup | |
3042 | */ | |
3043 | static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos) | |
3044 | { | |
3045 | uint8_t tmp, next = 0xff; | |
3046 | ||
3047 | for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp; | |
3048 | tmp = pdev->config[tmp + 1]) { | |
3049 | if (tmp > pos && tmp < next) { | |
3050 | next = tmp; | |
3051 | } | |
3052 | } | |
3053 | ||
3054 | return next - pos; | |
3055 | } | |
3056 | ||
96adc5c7 AW |
3057 | static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask) |
3058 | { | |
3059 | pci_set_word(buf, (pci_get_word(buf) & ~mask) | val); | |
3060 | } | |
3061 | ||
3062 | static void vfio_add_emulated_word(VFIODevice *vdev, int pos, | |
3063 | uint16_t val, uint16_t mask) | |
3064 | { | |
3065 | vfio_set_word_bits(vdev->pdev.config + pos, val, mask); | |
3066 | vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask); | |
3067 | vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask); | |
3068 | } | |
3069 | ||
3070 | static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask) | |
3071 | { | |
3072 | pci_set_long(buf, (pci_get_long(buf) & ~mask) | val); | |
3073 | } | |
3074 | ||
3075 | static void vfio_add_emulated_long(VFIODevice *vdev, int pos, | |
3076 | uint32_t val, uint32_t mask) | |
3077 | { | |
3078 | vfio_set_long_bits(vdev->pdev.config + pos, val, mask); | |
3079 | vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask); | |
3080 | vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask); | |
3081 | } | |
3082 | ||
3083 | static int vfio_setup_pcie_cap(VFIODevice *vdev, int pos, uint8_t size) | |
3084 | { | |
3085 | uint16_t flags; | |
3086 | uint8_t type; | |
3087 | ||
3088 | flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); | |
3089 | type = (flags & PCI_EXP_FLAGS_TYPE) >> 4; | |
3090 | ||
3091 | if (type != PCI_EXP_TYPE_ENDPOINT && | |
3092 | type != PCI_EXP_TYPE_LEG_END && | |
3093 | type != PCI_EXP_TYPE_RC_END) { | |
3094 | ||
3095 | error_report("vfio: Assignment of PCIe type 0x%x " | |
3096 | "devices is not currently supported", type); | |
3097 | return -EINVAL; | |
3098 | } | |
3099 | ||
3100 | if (!pci_bus_is_express(vdev->pdev.bus)) { | |
3101 | /* | |
3102 | * Use express capability as-is on PCI bus. It doesn't make much | |
3103 | * sense to even expose, but some drivers (ex. tg3) depend on it | |
3104 | * and guests don't seem to be particular about it. We'll need | |
3105 | * to revist this or force express devices to express buses if we | |
3106 | * ever expose an IOMMU to the guest. | |
3107 | */ | |
3108 | } else if (pci_bus_is_root(vdev->pdev.bus)) { | |
3109 | /* | |
3110 | * On a Root Complex bus Endpoints become Root Complex Integrated | |
3111 | * Endpoints, which changes the type and clears the LNK & LNK2 fields. | |
3112 | */ | |
3113 | if (type == PCI_EXP_TYPE_ENDPOINT) { | |
3114 | vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, | |
3115 | PCI_EXP_TYPE_RC_END << 4, | |
3116 | PCI_EXP_FLAGS_TYPE); | |
3117 | ||
3118 | /* Link Capabilities, Status, and Control goes away */ | |
3119 | if (size > PCI_EXP_LNKCTL) { | |
3120 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0); | |
3121 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); | |
3122 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0); | |
3123 | ||
3124 | #ifndef PCI_EXP_LNKCAP2 | |
3125 | #define PCI_EXP_LNKCAP2 44 | |
3126 | #endif | |
3127 | #ifndef PCI_EXP_LNKSTA2 | |
3128 | #define PCI_EXP_LNKSTA2 50 | |
3129 | #endif | |
3130 | /* Link 2 Capabilities, Status, and Control goes away */ | |
3131 | if (size > PCI_EXP_LNKCAP2) { | |
3132 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0); | |
3133 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0); | |
3134 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0); | |
3135 | } | |
3136 | } | |
3137 | ||
3138 | } else if (type == PCI_EXP_TYPE_LEG_END) { | |
3139 | /* | |
3140 | * Legacy endpoints don't belong on the root complex. Windows | |
3141 | * seems to be happier with devices if we skip the capability. | |
3142 | */ | |
3143 | return 0; | |
3144 | } | |
3145 | ||
3146 | } else { | |
3147 | /* | |
3148 | * Convert Root Complex Integrated Endpoints to regular endpoints. | |
3149 | * These devices don't support LNK/LNK2 capabilities, so make them up. | |
3150 | */ | |
3151 | if (type == PCI_EXP_TYPE_RC_END) { | |
3152 | vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, | |
3153 | PCI_EXP_TYPE_ENDPOINT << 4, | |
3154 | PCI_EXP_FLAGS_TYPE); | |
3155 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, | |
3156 | PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0); | |
3157 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); | |
3158 | } | |
3159 | ||
3160 | /* Mark the Link Status bits as emulated to allow virtual negotiation */ | |
3161 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, | |
3162 | pci_get_word(vdev->pdev.config + pos + | |
3163 | PCI_EXP_LNKSTA), | |
3164 | PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); | |
3165 | } | |
3166 | ||
3167 | pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size); | |
3168 | if (pos >= 0) { | |
3169 | vdev->pdev.exp.exp_cap = pos; | |
3170 | } | |
3171 | ||
3172 | return pos; | |
3173 | } | |
3174 | ||
befe5176 AW |
3175 | static void vfio_check_pcie_flr(VFIODevice *vdev, uint8_t pos) |
3176 | { | |
3177 | uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP); | |
3178 | ||
3179 | if (cap & PCI_EXP_DEVCAP_FLR) { | |
3180 | DPRINTF("%04x:%02x:%02x.%x Supports FLR via PCIe cap\n", | |
3181 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
3182 | vdev->host.function); | |
3183 | vdev->has_flr = true; | |
3184 | } | |
3185 | } | |
3186 | ||
3187 | static void vfio_check_pm_reset(VFIODevice *vdev, uint8_t pos) | |
3188 | { | |
3189 | uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL); | |
3190 | ||
3191 | if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) { | |
3192 | DPRINTF("%04x:%02x:%02x.%x Supports PM reset\n", | |
3193 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
3194 | vdev->host.function); | |
3195 | vdev->has_pm_reset = true; | |
3196 | } | |
3197 | } | |
3198 | ||
3199 | static void vfio_check_af_flr(VFIODevice *vdev, uint8_t pos) | |
3200 | { | |
3201 | uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP); | |
3202 | ||
3203 | if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) { | |
3204 | DPRINTF("%04x:%02x:%02x.%x Supports FLR via AF cap\n", | |
3205 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
3206 | vdev->host.function); | |
3207 | vdev->has_flr = true; | |
3208 | } | |
3209 | } | |
3210 | ||
65501a74 AW |
3211 | static int vfio_add_std_cap(VFIODevice *vdev, uint8_t pos) |
3212 | { | |
3213 | PCIDevice *pdev = &vdev->pdev; | |
3214 | uint8_t cap_id, next, size; | |
3215 | int ret; | |
3216 | ||
3217 | cap_id = pdev->config[pos]; | |
3218 | next = pdev->config[pos + 1]; | |
3219 | ||
3220 | /* | |
3221 | * If it becomes important to configure capabilities to their actual | |
3222 | * size, use this as the default when it's something we don't recognize. | |
3223 | * Since QEMU doesn't actually handle many of the config accesses, | |
3224 | * exact size doesn't seem worthwhile. | |
3225 | */ | |
3226 | size = vfio_std_cap_max_size(pdev, pos); | |
3227 | ||
3228 | /* | |
3229 | * pci_add_capability always inserts the new capability at the head | |
3230 | * of the chain. Therefore to end up with a chain that matches the | |
3231 | * physical device, we insert from the end by making this recursive. | |
3232 | * This is also why we pre-caclulate size above as cached config space | |
3233 | * will be changed as we unwind the stack. | |
3234 | */ | |
3235 | if (next) { | |
3236 | ret = vfio_add_std_cap(vdev, next); | |
3237 | if (ret) { | |
3238 | return ret; | |
3239 | } | |
3240 | } else { | |
96adc5c7 AW |
3241 | /* Begin the rebuild, use QEMU emulated list bits */ |
3242 | pdev->config[PCI_CAPABILITY_LIST] = 0; | |
3243 | vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff; | |
3244 | vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST; | |
65501a74 AW |
3245 | } |
3246 | ||
96adc5c7 AW |
3247 | /* Use emulated next pointer to allow dropping caps */ |
3248 | pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff); | |
3249 | ||
65501a74 AW |
3250 | switch (cap_id) { |
3251 | case PCI_CAP_ID_MSI: | |
3252 | ret = vfio_setup_msi(vdev, pos); | |
3253 | break; | |
96adc5c7 | 3254 | case PCI_CAP_ID_EXP: |
befe5176 | 3255 | vfio_check_pcie_flr(vdev, pos); |
96adc5c7 AW |
3256 | ret = vfio_setup_pcie_cap(vdev, pos, size); |
3257 | break; | |
65501a74 AW |
3258 | case PCI_CAP_ID_MSIX: |
3259 | ret = vfio_setup_msix(vdev, pos); | |
3260 | break; | |
ba661818 | 3261 | case PCI_CAP_ID_PM: |
befe5176 | 3262 | vfio_check_pm_reset(vdev, pos); |
ba661818 | 3263 | vdev->pm_cap = pos; |
befe5176 AW |
3264 | ret = pci_add_capability(pdev, cap_id, pos, size); |
3265 | break; | |
3266 | case PCI_CAP_ID_AF: | |
3267 | vfio_check_af_flr(vdev, pos); | |
3268 | ret = pci_add_capability(pdev, cap_id, pos, size); | |
3269 | break; | |
65501a74 AW |
3270 | default: |
3271 | ret = pci_add_capability(pdev, cap_id, pos, size); | |
3272 | break; | |
3273 | } | |
3274 | ||
3275 | if (ret < 0) { | |
3276 | error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability " | |
312fd5f2 | 3277 | "0x%x[0x%x]@0x%x: %d", vdev->host.domain, |
65501a74 AW |
3278 | vdev->host.bus, vdev->host.slot, vdev->host.function, |
3279 | cap_id, size, pos, ret); | |
3280 | return ret; | |
3281 | } | |
3282 | ||
3283 | return 0; | |
3284 | } | |
3285 | ||
3286 | static int vfio_add_capabilities(VFIODevice *vdev) | |
3287 | { | |
3288 | PCIDevice *pdev = &vdev->pdev; | |
3289 | ||
3290 | if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) || | |
3291 | !pdev->config[PCI_CAPABILITY_LIST]) { | |
3292 | return 0; /* Nothing to add */ | |
3293 | } | |
3294 | ||
3295 | return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]); | |
3296 | } | |
3297 | ||
f16f39c3 AW |
3298 | static void vfio_pci_pre_reset(VFIODevice *vdev) |
3299 | { | |
3300 | PCIDevice *pdev = &vdev->pdev; | |
3301 | uint16_t cmd; | |
3302 | ||
3303 | vfio_disable_interrupts(vdev); | |
3304 | ||
3305 | /* Make sure the device is in D0 */ | |
3306 | if (vdev->pm_cap) { | |
3307 | uint16_t pmcsr; | |
3308 | uint8_t state; | |
3309 | ||
3310 | pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); | |
3311 | state = pmcsr & PCI_PM_CTRL_STATE_MASK; | |
3312 | if (state) { | |
3313 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
3314 | vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2); | |
3315 | /* vfio handles the necessary delay here */ | |
3316 | pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); | |
3317 | state = pmcsr & PCI_PM_CTRL_STATE_MASK; | |
3318 | if (state) { | |
4e505ddd | 3319 | error_report("vfio: Unable to power on device, stuck in D%d", |
f16f39c3 AW |
3320 | state); |
3321 | } | |
3322 | } | |
3323 | } | |
3324 | ||
3325 | /* | |
3326 | * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master. | |
3327 | * Also put INTx Disable in known state. | |
3328 | */ | |
3329 | cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2); | |
3330 | cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | | |
3331 | PCI_COMMAND_INTX_DISABLE); | |
3332 | vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2); | |
3333 | } | |
3334 | ||
3335 | static void vfio_pci_post_reset(VFIODevice *vdev) | |
3336 | { | |
3337 | vfio_enable_intx(vdev); | |
3338 | } | |
3339 | ||
3340 | static bool vfio_pci_host_match(PCIHostDeviceAddress *host1, | |
3341 | PCIHostDeviceAddress *host2) | |
3342 | { | |
3343 | return (host1->domain == host2->domain && host1->bus == host2->bus && | |
3344 | host1->slot == host2->slot && host1->function == host2->function); | |
3345 | } | |
3346 | ||
3347 | static int vfio_pci_hot_reset(VFIODevice *vdev, bool single) | |
3348 | { | |
3349 | VFIOGroup *group; | |
3350 | struct vfio_pci_hot_reset_info *info; | |
3351 | struct vfio_pci_dependent_device *devices; | |
3352 | struct vfio_pci_hot_reset *reset; | |
3353 | int32_t *fds; | |
3354 | int ret, i, count; | |
3355 | bool multi = false; | |
3356 | ||
3357 | DPRINTF("%s(%04x:%02x:%02x.%x) %s\n", __func__, vdev->host.domain, | |
3358 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
3359 | single ? "one" : "multi"); | |
3360 | ||
3361 | vfio_pci_pre_reset(vdev); | |
3362 | vdev->needs_reset = false; | |
3363 | ||
3364 | info = g_malloc0(sizeof(*info)); | |
3365 | info->argsz = sizeof(*info); | |
3366 | ||
3367 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); | |
3368 | if (ret && errno != ENOSPC) { | |
3369 | ret = -errno; | |
3370 | if (!vdev->has_pm_reset) { | |
3371 | error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, " | |
3372 | "no available reset mechanism.", vdev->host.domain, | |
3373 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
3374 | } | |
3375 | goto out_single; | |
3376 | } | |
3377 | ||
3378 | count = info->count; | |
3379 | info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices))); | |
3380 | info->argsz = sizeof(*info) + (count * sizeof(*devices)); | |
3381 | devices = &info->devices[0]; | |
3382 | ||
3383 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); | |
3384 | if (ret) { | |
3385 | ret = -errno; | |
3386 | error_report("vfio: hot reset info failed: %m"); | |
3387 | goto out_single; | |
3388 | } | |
3389 | ||
3390 | DPRINTF("%04x:%02x:%02x.%x: hot reset dependent devices:\n", | |
3391 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
3392 | vdev->host.function); | |
3393 | ||
3394 | /* Verify that we have all the groups required */ | |
3395 | for (i = 0; i < info->count; i++) { | |
3396 | PCIHostDeviceAddress host; | |
3397 | VFIODevice *tmp; | |
3398 | ||
3399 | host.domain = devices[i].segment; | |
3400 | host.bus = devices[i].bus; | |
3401 | host.slot = PCI_SLOT(devices[i].devfn); | |
3402 | host.function = PCI_FUNC(devices[i].devfn); | |
3403 | ||
3404 | DPRINTF("\t%04x:%02x:%02x.%x group %d\n", host.domain, | |
3405 | host.bus, host.slot, host.function, devices[i].group_id); | |
3406 | ||
3407 | if (vfio_pci_host_match(&host, &vdev->host)) { | |
3408 | continue; | |
3409 | } | |
3410 | ||
3411 | QLIST_FOREACH(group, &group_list, next) { | |
3412 | if (group->groupid == devices[i].group_id) { | |
3413 | break; | |
3414 | } | |
3415 | } | |
3416 | ||
3417 | if (!group) { | |
3418 | if (!vdev->has_pm_reset) { | |
3419 | error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, " | |
3420 | "depends on group %d which is not owned.", | |
3421 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
3422 | vdev->host.function, devices[i].group_id); | |
3423 | } | |
3424 | ret = -EPERM; | |
3425 | goto out; | |
3426 | } | |
3427 | ||
3428 | /* Prep dependent devices for reset and clear our marker. */ | |
3429 | QLIST_FOREACH(tmp, &group->device_list, next) { | |
3430 | if (vfio_pci_host_match(&host, &tmp->host)) { | |
3431 | if (single) { | |
3432 | DPRINTF("vfio: found another in-use device " | |
3433 | "%04x:%02x:%02x.%x\n", host.domain, host.bus, | |
3434 | host.slot, host.function); | |
3435 | ret = -EINVAL; | |
3436 | goto out_single; | |
3437 | } | |
3438 | vfio_pci_pre_reset(tmp); | |
3439 | tmp->needs_reset = false; | |
3440 | multi = true; | |
3441 | break; | |
3442 | } | |
3443 | } | |
3444 | } | |
3445 | ||
3446 | if (!single && !multi) { | |
3447 | DPRINTF("vfio: No other in-use devices for multi hot reset\n"); | |
3448 | ret = -EINVAL; | |
3449 | goto out_single; | |
3450 | } | |
3451 | ||
3452 | /* Determine how many group fds need to be passed */ | |
3453 | count = 0; | |
3454 | QLIST_FOREACH(group, &group_list, next) { | |
3455 | for (i = 0; i < info->count; i++) { | |
3456 | if (group->groupid == devices[i].group_id) { | |
3457 | count++; | |
3458 | break; | |
3459 | } | |
3460 | } | |
3461 | } | |
3462 | ||
3463 | reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); | |
3464 | reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); | |
3465 | fds = &reset->group_fds[0]; | |
3466 | ||
3467 | /* Fill in group fds */ | |
3468 | QLIST_FOREACH(group, &group_list, next) { | |
3469 | for (i = 0; i < info->count; i++) { | |
3470 | if (group->groupid == devices[i].group_id) { | |
3471 | fds[reset->count++] = group->fd; | |
3472 | break; | |
3473 | } | |
3474 | } | |
3475 | } | |
3476 | ||
3477 | /* Bus reset! */ | |
3478 | ret = ioctl(vdev->fd, VFIO_DEVICE_PCI_HOT_RESET, reset); | |
3479 | g_free(reset); | |
3480 | ||
3481 | DPRINTF("%04x:%02x:%02x.%x hot reset: %s\n", vdev->host.domain, | |
3482 | vdev->host.bus, vdev->host.slot, vdev->host.function, | |
3483 | ret ? "%m" : "Success"); | |
3484 | ||
3485 | out: | |
3486 | /* Re-enable INTx on affected devices */ | |
3487 | for (i = 0; i < info->count; i++) { | |
3488 | PCIHostDeviceAddress host; | |
3489 | VFIODevice *tmp; | |
3490 | ||
3491 | host.domain = devices[i].segment; | |
3492 | host.bus = devices[i].bus; | |
3493 | host.slot = PCI_SLOT(devices[i].devfn); | |
3494 | host.function = PCI_FUNC(devices[i].devfn); | |
3495 | ||
3496 | if (vfio_pci_host_match(&host, &vdev->host)) { | |
3497 | continue; | |
3498 | } | |
3499 | ||
3500 | QLIST_FOREACH(group, &group_list, next) { | |
3501 | if (group->groupid == devices[i].group_id) { | |
3502 | break; | |
3503 | } | |
3504 | } | |
3505 | ||
3506 | if (!group) { | |
3507 | break; | |
3508 | } | |
3509 | ||
3510 | QLIST_FOREACH(tmp, &group->device_list, next) { | |
3511 | if (vfio_pci_host_match(&host, &tmp->host)) { | |
3512 | vfio_pci_post_reset(tmp); | |
3513 | break; | |
3514 | } | |
3515 | } | |
3516 | } | |
3517 | out_single: | |
3518 | vfio_pci_post_reset(vdev); | |
3519 | g_free(info); | |
3520 | ||
3521 | return ret; | |
3522 | } | |
3523 | ||
3524 | /* | |
3525 | * We want to differentiate hot reset of mulitple in-use devices vs hot reset | |
3526 | * of a single in-use device. VFIO_DEVICE_RESET will already handle the case | |
3527 | * of doing hot resets when there is only a single device per bus. The in-use | |
3528 | * here refers to how many VFIODevices are affected. A hot reset that affects | |
3529 | * multiple devices, but only a single in-use device, means that we can call | |
3530 | * it from our bus ->reset() callback since the extent is effectively a single | |
3531 | * device. This allows us to make use of it in the hotplug path. When there | |
3532 | * are multiple in-use devices, we can only trigger the hot reset during a | |
3533 | * system reset and thus from our reset handler. We separate _one vs _multi | |
3534 | * here so that we don't overlap and do a double reset on the system reset | |
3535 | * path where both our reset handler and ->reset() callback are used. Calling | |
3536 | * _one() will only do a hot reset for the one in-use devices case, calling | |
3537 | * _multi() will do nothing if a _one() would have been sufficient. | |
3538 | */ | |
3539 | static int vfio_pci_hot_reset_one(VFIODevice *vdev) | |
3540 | { | |
3541 | return vfio_pci_hot_reset(vdev, true); | |
3542 | } | |
3543 | ||
3544 | static int vfio_pci_hot_reset_multi(VFIODevice *vdev) | |
3545 | { | |
3546 | return vfio_pci_hot_reset(vdev, false); | |
3547 | } | |
3548 | ||
3549 | static void vfio_pci_reset_handler(void *opaque) | |
3550 | { | |
3551 | VFIOGroup *group; | |
3552 | VFIODevice *vdev; | |
3553 | ||
3554 | QLIST_FOREACH(group, &group_list, next) { | |
3555 | QLIST_FOREACH(vdev, &group->device_list, next) { | |
3556 | if (!vdev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) { | |
3557 | vdev->needs_reset = true; | |
3558 | } | |
3559 | } | |
3560 | } | |
3561 | ||
3562 | QLIST_FOREACH(group, &group_list, next) { | |
3563 | QLIST_FOREACH(vdev, &group->device_list, next) { | |
3564 | if (vdev->needs_reset) { | |
3565 | vfio_pci_hot_reset_multi(vdev); | |
3566 | } | |
3567 | } | |
3568 | } | |
3569 | } | |
3570 | ||
5b49ab18 AW |
3571 | static void vfio_kvm_device_add_group(VFIOGroup *group) |
3572 | { | |
3573 | #ifdef CONFIG_KVM | |
3574 | struct kvm_device_attr attr = { | |
3575 | .group = KVM_DEV_VFIO_GROUP, | |
3576 | .attr = KVM_DEV_VFIO_GROUP_ADD, | |
3577 | .addr = (uint64_t)(unsigned long)&group->fd, | |
3578 | }; | |
3579 | ||
3580 | if (!kvm_enabled()) { | |
3581 | return; | |
3582 | } | |
3583 | ||
3584 | if (vfio_kvm_device_fd < 0) { | |
3585 | struct kvm_create_device cd = { | |
3586 | .type = KVM_DEV_TYPE_VFIO, | |
3587 | }; | |
3588 | ||
3589 | if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { | |
3590 | DPRINTF("KVM_CREATE_DEVICE: %m\n"); | |
3591 | return; | |
3592 | } | |
3593 | ||
3594 | vfio_kvm_device_fd = cd.fd; | |
3595 | } | |
3596 | ||
3597 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { | |
3598 | error_report("Failed to add group %d to KVM VFIO device: %m", | |
3599 | group->groupid); | |
3600 | } | |
3601 | #endif | |
3602 | } | |
3603 | ||
3604 | static void vfio_kvm_device_del_group(VFIOGroup *group) | |
3605 | { | |
3606 | #ifdef CONFIG_KVM | |
3607 | struct kvm_device_attr attr = { | |
3608 | .group = KVM_DEV_VFIO_GROUP, | |
3609 | .attr = KVM_DEV_VFIO_GROUP_DEL, | |
3610 | .addr = (uint64_t)(unsigned long)&group->fd, | |
3611 | }; | |
3612 | ||
3613 | if (vfio_kvm_device_fd < 0) { | |
3614 | return; | |
3615 | } | |
3616 | ||
3617 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { | |
4e505ddd | 3618 | error_report("Failed to remove group %d from KVM VFIO device: %m", |
5b49ab18 AW |
3619 | group->groupid); |
3620 | } | |
3621 | #endif | |
3622 | } | |
3623 | ||
0688448b DG |
3624 | static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) |
3625 | { | |
3626 | VFIOAddressSpace *space; | |
3627 | ||
3628 | QLIST_FOREACH(space, &vfio_address_spaces, list) { | |
3629 | if (space->as == as) { | |
3630 | return space; | |
3631 | } | |
3632 | } | |
3633 | ||
3634 | /* No suitable VFIOAddressSpace, create a new one */ | |
3635 | space = g_malloc0(sizeof(*space)); | |
3636 | space->as = as; | |
3637 | QLIST_INIT(&space->containers); | |
3638 | ||
3639 | QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); | |
3640 | ||
3641 | return space; | |
3642 | } | |
3643 | ||
3644 | static void vfio_put_address_space(VFIOAddressSpace *space) | |
3645 | { | |
3646 | if (QLIST_EMPTY(&space->containers)) { | |
3647 | QLIST_REMOVE(space, list); | |
3648 | g_free(space); | |
3649 | } | |
3650 | } | |
3651 | ||
3df3e0a5 | 3652 | static int vfio_connect_container(VFIOGroup *group, AddressSpace *as) |
65501a74 AW |
3653 | { |
3654 | VFIOContainer *container; | |
3655 | int ret, fd; | |
3df3e0a5 | 3656 | VFIOAddressSpace *space; |
65501a74 | 3657 | |
0688448b | 3658 | space = vfio_get_address_space(as); |
65501a74 | 3659 | |
3df3e0a5 | 3660 | QLIST_FOREACH(container, &space->containers, next) { |
65501a74 AW |
3661 | if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { |
3662 | group->container = container; | |
3663 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); | |
3664 | return 0; | |
3665 | } | |
3666 | } | |
3667 | ||
3668 | fd = qemu_open("/dev/vfio/vfio", O_RDWR); | |
3669 | if (fd < 0) { | |
312fd5f2 | 3670 | error_report("vfio: failed to open /dev/vfio/vfio: %m"); |
0688448b DG |
3671 | ret = -errno; |
3672 | goto put_space_exit; | |
65501a74 AW |
3673 | } |
3674 | ||
3675 | ret = ioctl(fd, VFIO_GET_API_VERSION); | |
3676 | if (ret != VFIO_API_VERSION) { | |
3677 | error_report("vfio: supported vfio version: %d, " | |
312fd5f2 | 3678 | "reported version: %d", VFIO_API_VERSION, ret); |
279a35ab AK |
3679 | ret = -EINVAL; |
3680 | goto close_fd_exit; | |
65501a74 AW |
3681 | } |
3682 | ||
3683 | container = g_malloc0(sizeof(*container)); | |
3df3e0a5 | 3684 | container->space = space; |
65501a74 AW |
3685 | container->fd = fd; |
3686 | ||
3687 | if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) { | |
3688 | ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); | |
3689 | if (ret) { | |
312fd5f2 | 3690 | error_report("vfio: failed to set group container: %m"); |
279a35ab AK |
3691 | ret = -errno; |
3692 | goto free_container_exit; | |
65501a74 AW |
3693 | } |
3694 | ||
3695 | ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU); | |
3696 | if (ret) { | |
312fd5f2 | 3697 | error_report("vfio: failed to set iommu for container: %m"); |
279a35ab AK |
3698 | ret = -errno; |
3699 | goto free_container_exit; | |
65501a74 AW |
3700 | } |
3701 | ||
87ca1f77 | 3702 | container->iommu_data.type1.listener = vfio_memory_listener; |
65501a74 AW |
3703 | container->iommu_data.release = vfio_listener_release; |
3704 | ||
87ca1f77 AW |
3705 | memory_listener_register(&container->iommu_data.type1.listener, |
3706 | &address_space_memory); | |
3707 | ||
3708 | if (container->iommu_data.type1.error) { | |
3709 | ret = container->iommu_data.type1.error; | |
4e505ddd | 3710 | error_report("vfio: memory listener initialization failed for container"); |
279a35ab | 3711 | goto listener_release_exit; |
87ca1f77 AW |
3712 | } |
3713 | ||
3714 | container->iommu_data.type1.initialized = true; | |
3715 | ||
59181263 AK |
3716 | } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) { |
3717 | ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); | |
3718 | if (ret) { | |
3719 | error_report("vfio: failed to set group container: %m"); | |
3720 | ret = -errno; | |
3721 | goto free_container_exit; | |
3722 | } | |
3723 | ||
3724 | ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU); | |
3725 | if (ret) { | |
3726 | error_report("vfio: failed to set iommu for container: %m"); | |
3727 | ret = -errno; | |
3728 | goto free_container_exit; | |
3729 | } | |
3730 | ||
3731 | /* | |
3732 | * The host kernel code implementing VFIO_IOMMU_DISABLE is called | |
3733 | * when container fd is closed so we do not call it explicitly | |
3734 | * in this file. | |
3735 | */ | |
3736 | ret = ioctl(fd, VFIO_IOMMU_ENABLE); | |
3737 | if (ret) { | |
3738 | error_report("vfio: failed to enable container: %m"); | |
3739 | ret = -errno; | |
3740 | goto free_container_exit; | |
3741 | } | |
3742 | ||
3743 | container->iommu_data.type1.listener = vfio_memory_listener; | |
3744 | container->iommu_data.release = vfio_listener_release; | |
3745 | ||
3746 | memory_listener_register(&container->iommu_data.type1.listener, | |
3747 | container->space->as); | |
3748 | ||
65501a74 | 3749 | } else { |
312fd5f2 | 3750 | error_report("vfio: No available IOMMU models"); |
279a35ab AK |
3751 | ret = -EINVAL; |
3752 | goto free_container_exit; | |
65501a74 AW |
3753 | } |
3754 | ||
3755 | QLIST_INIT(&container->group_list); | |
3df3e0a5 | 3756 | QLIST_INSERT_HEAD(&space->containers, container, next); |
65501a74 AW |
3757 | |
3758 | group->container = container; | |
3759 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); | |
3760 | ||
3761 | return 0; | |
279a35ab AK |
3762 | |
3763 | listener_release_exit: | |
3764 | vfio_listener_release(container); | |
3765 | ||
3766 | free_container_exit: | |
3767 | g_free(container); | |
3768 | ||
3769 | close_fd_exit: | |
3770 | close(fd); | |
3771 | ||
0688448b DG |
3772 | put_space_exit: |
3773 | vfio_put_address_space(space); | |
3774 | ||
279a35ab | 3775 | return ret; |
65501a74 AW |
3776 | } |
3777 | ||
3778 | static void vfio_disconnect_container(VFIOGroup *group) | |
3779 | { | |
3780 | VFIOContainer *container = group->container; | |
3781 | ||
3782 | if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { | |
312fd5f2 | 3783 | error_report("vfio: error disconnecting group %d from container", |
65501a74 AW |
3784 | group->groupid); |
3785 | } | |
3786 | ||
3787 | QLIST_REMOVE(group, container_next); | |
3788 | group->container = NULL; | |
3789 | ||
3790 | if (QLIST_EMPTY(&container->group_list)) { | |
0688448b DG |
3791 | VFIOAddressSpace *space = container->space; |
3792 | ||
65501a74 AW |
3793 | if (container->iommu_data.release) { |
3794 | container->iommu_data.release(container); | |
3795 | } | |
3796 | QLIST_REMOVE(container, next); | |
3797 | DPRINTF("vfio_disconnect_container: close container->fd\n"); | |
3798 | close(container->fd); | |
3799 | g_free(container); | |
0688448b DG |
3800 | |
3801 | vfio_put_address_space(space); | |
65501a74 AW |
3802 | } |
3803 | } | |
3804 | ||
3df3e0a5 | 3805 | static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as) |
65501a74 AW |
3806 | { |
3807 | VFIOGroup *group; | |
3808 | char path[32]; | |
3809 | struct vfio_group_status status = { .argsz = sizeof(status) }; | |
3810 | ||
3811 | QLIST_FOREACH(group, &group_list, next) { | |
3812 | if (group->groupid == groupid) { | |
3df3e0a5 DG |
3813 | /* Found it. Now is it already in the right context? */ |
3814 | if (group->container->space->as == as) { | |
3815 | return group; | |
3816 | } else { | |
3817 | error_report("vfio: group %d used in multiple address spaces", | |
3818 | group->groupid); | |
3819 | return NULL; | |
3820 | } | |
65501a74 AW |
3821 | } |
3822 | } | |
3823 | ||
3824 | group = g_malloc0(sizeof(*group)); | |
3825 | ||
3826 | snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); | |
3827 | group->fd = qemu_open(path, O_RDWR); | |
3828 | if (group->fd < 0) { | |
312fd5f2 | 3829 | error_report("vfio: error opening %s: %m", path); |
279a35ab | 3830 | goto free_group_exit; |
65501a74 AW |
3831 | } |
3832 | ||
3833 | if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { | |
312fd5f2 | 3834 | error_report("vfio: error getting group status: %m"); |
279a35ab | 3835 | goto close_fd_exit; |
65501a74 AW |
3836 | } |
3837 | ||
3838 | if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { | |
3839 | error_report("vfio: error, group %d is not viable, please ensure " | |
3840 | "all devices within the iommu_group are bound to their " | |
312fd5f2 | 3841 | "vfio bus driver.", groupid); |
279a35ab | 3842 | goto close_fd_exit; |
65501a74 AW |
3843 | } |
3844 | ||
3845 | group->groupid = groupid; | |
3846 | QLIST_INIT(&group->device_list); | |
3847 | ||
3df3e0a5 | 3848 | if (vfio_connect_container(group, as)) { |
312fd5f2 | 3849 | error_report("vfio: failed to setup container for group %d", groupid); |
279a35ab | 3850 | goto close_fd_exit; |
65501a74 AW |
3851 | } |
3852 | ||
f16f39c3 AW |
3853 | if (QLIST_EMPTY(&group_list)) { |
3854 | qemu_register_reset(vfio_pci_reset_handler, NULL); | |
3855 | } | |
3856 | ||
65501a74 AW |
3857 | QLIST_INSERT_HEAD(&group_list, group, next); |
3858 | ||
5b49ab18 AW |
3859 | vfio_kvm_device_add_group(group); |
3860 | ||
65501a74 | 3861 | return group; |
279a35ab AK |
3862 | |
3863 | close_fd_exit: | |
3864 | close(group->fd); | |
3865 | ||
3866 | free_group_exit: | |
3867 | g_free(group); | |
3868 | ||
3869 | return NULL; | |
65501a74 AW |
3870 | } |
3871 | ||
3872 | static void vfio_put_group(VFIOGroup *group) | |
3873 | { | |
3874 | if (!QLIST_EMPTY(&group->device_list)) { | |
3875 | return; | |
3876 | } | |
3877 | ||
5b49ab18 | 3878 | vfio_kvm_device_del_group(group); |
65501a74 AW |
3879 | vfio_disconnect_container(group); |
3880 | QLIST_REMOVE(group, next); | |
3881 | DPRINTF("vfio_put_group: close group->fd\n"); | |
3882 | close(group->fd); | |
3883 | g_free(group); | |
f16f39c3 AW |
3884 | |
3885 | if (QLIST_EMPTY(&group_list)) { | |
3886 | qemu_unregister_reset(vfio_pci_reset_handler, NULL); | |
3887 | } | |
65501a74 AW |
3888 | } |
3889 | ||
3890 | static int vfio_get_device(VFIOGroup *group, const char *name, VFIODevice *vdev) | |
3891 | { | |
3892 | struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; | |
3893 | struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; | |
7b4b0e9e | 3894 | struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) }; |
65501a74 AW |
3895 | int ret, i; |
3896 | ||
3897 | ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); | |
3898 | if (ret < 0) { | |
1a9522cc | 3899 | error_report("vfio: error getting device %s from group %d: %m", |
65501a74 | 3900 | name, group->groupid); |
1a9522cc | 3901 | error_printf("Verify all devices in group %d are bound to vfio-pci " |
65501a74 AW |
3902 | "or pci-stub and not already in use\n", group->groupid); |
3903 | return ret; | |
3904 | } | |
3905 | ||
3906 | vdev->fd = ret; | |
3907 | vdev->group = group; | |
3908 | QLIST_INSERT_HEAD(&group->device_list, vdev, next); | |
3909 | ||
3910 | /* Sanity check device */ | |
3911 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &dev_info); | |
3912 | if (ret) { | |
312fd5f2 | 3913 | error_report("vfio: error getting device info: %m"); |
65501a74 AW |
3914 | goto error; |
3915 | } | |
3916 | ||
3917 | DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name, | |
3918 | dev_info.flags, dev_info.num_regions, dev_info.num_irqs); | |
3919 | ||
3920 | if (!(dev_info.flags & VFIO_DEVICE_FLAGS_PCI)) { | |
312fd5f2 | 3921 | error_report("vfio: Um, this isn't a PCI device"); |
65501a74 AW |
3922 | goto error; |
3923 | } | |
3924 | ||
3925 | vdev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); | |
65501a74 | 3926 | |
8fc94e5a | 3927 | if (dev_info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { |
312fd5f2 | 3928 | error_report("vfio: unexpected number of io regions %u", |
65501a74 AW |
3929 | dev_info.num_regions); |
3930 | goto error; | |
3931 | } | |
3932 | ||
8fc94e5a | 3933 | if (dev_info.num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) { |
312fd5f2 | 3934 | error_report("vfio: unexpected number of irqs %u", dev_info.num_irqs); |
65501a74 AW |
3935 | goto error; |
3936 | } | |
3937 | ||
3938 | for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) { | |
3939 | reg_info.index = i; | |
3940 | ||
3941 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); | |
3942 | if (ret) { | |
312fd5f2 | 3943 | error_report("vfio: Error getting region %d info: %m", i); |
65501a74 AW |
3944 | goto error; |
3945 | } | |
3946 | ||
3947 | DPRINTF("Device %s region %d:\n", name, i); | |
3948 | DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n", | |
3949 | (unsigned long)reg_info.size, (unsigned long)reg_info.offset, | |
3950 | (unsigned long)reg_info.flags); | |
3951 | ||
3952 | vdev->bars[i].flags = reg_info.flags; | |
3953 | vdev->bars[i].size = reg_info.size; | |
3954 | vdev->bars[i].fd_offset = reg_info.offset; | |
3955 | vdev->bars[i].fd = vdev->fd; | |
3956 | vdev->bars[i].nr = i; | |
7076eabc | 3957 | QLIST_INIT(&vdev->bars[i].quirks); |
65501a74 AW |
3958 | } |
3959 | ||
65501a74 AW |
3960 | reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX; |
3961 | ||
3962 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); | |
3963 | if (ret) { | |
312fd5f2 | 3964 | error_report("vfio: Error getting config info: %m"); |
65501a74 AW |
3965 | goto error; |
3966 | } | |
3967 | ||
3968 | DPRINTF("Device %s config:\n", name); | |
3969 | DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n", | |
3970 | (unsigned long)reg_info.size, (unsigned long)reg_info.offset, | |
3971 | (unsigned long)reg_info.flags); | |
3972 | ||
3973 | vdev->config_size = reg_info.size; | |
6a659bbf AW |
3974 | if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) { |
3975 | vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; | |
3976 | } | |
65501a74 AW |
3977 | vdev->config_offset = reg_info.offset; |
3978 | ||
f15689c7 AW |
3979 | if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) && |
3980 | dev_info.num_regions > VFIO_PCI_VGA_REGION_INDEX) { | |
3981 | struct vfio_region_info vga_info = { | |
3982 | .argsz = sizeof(vga_info), | |
3983 | .index = VFIO_PCI_VGA_REGION_INDEX, | |
3984 | }; | |
3985 | ||
3986 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info); | |
3987 | if (ret) { | |
3988 | error_report( | |
3989 | "vfio: Device does not support requested feature x-vga"); | |
3990 | goto error; | |
3991 | } | |
3992 | ||
3993 | if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) || | |
3994 | !(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) || | |
3995 | vga_info.size < 0xbffff + 1) { | |
3996 | error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx", | |
3997 | (unsigned long)vga_info.flags, | |
3998 | (unsigned long)vga_info.size); | |
3999 | goto error; | |
4000 | } | |
4001 | ||
4002 | vdev->vga.fd_offset = vga_info.offset; | |
4003 | vdev->vga.fd = vdev->fd; | |
4004 | ||
4005 | vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE; | |
4006 | vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM; | |
7076eabc | 4007 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks); |
f15689c7 AW |
4008 | |
4009 | vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE; | |
4010 | vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO; | |
7076eabc | 4011 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks); |
f15689c7 AW |
4012 | |
4013 | vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE; | |
4014 | vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI; | |
7076eabc | 4015 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks); |
f15689c7 AW |
4016 | |
4017 | vdev->has_vga = true; | |
4018 | } | |
7b4b0e9e VMP |
4019 | irq_info.index = VFIO_PCI_ERR_IRQ_INDEX; |
4020 | ||
4021 | ret = ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); | |
4022 | if (ret) { | |
4023 | /* This can fail for an old kernel or legacy PCI dev */ | |
8fbf47c3 | 4024 | DPRINTF("VFIO_DEVICE_GET_IRQ_INFO failure: %m\n"); |
7b4b0e9e VMP |
4025 | ret = 0; |
4026 | } else if (irq_info.count == 1) { | |
4027 | vdev->pci_aer = true; | |
4028 | } else { | |
8fbf47c3 AW |
4029 | error_report("vfio: %04x:%02x:%02x.%x " |
4030 | "Could not enable error recovery for the device", | |
4031 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
4032 | vdev->host.function); | |
7b4b0e9e | 4033 | } |
f15689c7 | 4034 | |
65501a74 AW |
4035 | error: |
4036 | if (ret) { | |
4037 | QLIST_REMOVE(vdev, next); | |
4038 | vdev->group = NULL; | |
4039 | close(vdev->fd); | |
4040 | } | |
4041 | return ret; | |
4042 | } | |
4043 | ||
4044 | static void vfio_put_device(VFIODevice *vdev) | |
4045 | { | |
4046 | QLIST_REMOVE(vdev, next); | |
4047 | vdev->group = NULL; | |
4048 | DPRINTF("vfio_put_device: close vdev->fd\n"); | |
4049 | close(vdev->fd); | |
4050 | if (vdev->msix) { | |
4051 | g_free(vdev->msix); | |
4052 | vdev->msix = NULL; | |
4053 | } | |
4054 | } | |
4055 | ||
7b4b0e9e VMP |
4056 | static void vfio_err_notifier_handler(void *opaque) |
4057 | { | |
4058 | VFIODevice *vdev = opaque; | |
4059 | ||
4060 | if (!event_notifier_test_and_clear(&vdev->err_notifier)) { | |
4061 | return; | |
4062 | } | |
4063 | ||
4064 | /* | |
4065 | * TBD. Retrieve the error details and decide what action | |
4066 | * needs to be taken. One of the actions could be to pass | |
4067 | * the error to the guest and have the guest driver recover | |
4068 | * from the error. This requires that PCIe capabilities be | |
4069 | * exposed to the guest. For now, we just terminate the | |
4070 | * guest to contain the error. | |
4071 | */ | |
4072 | ||
8fbf47c3 AW |
4073 | error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. " |
4074 | "Please collect any data possible and then kill the guest", | |
4075 | __func__, vdev->host.domain, vdev->host.bus, | |
4076 | vdev->host.slot, vdev->host.function); | |
7b4b0e9e | 4077 | |
ba29776f | 4078 | vm_stop(RUN_STATE_INTERNAL_ERROR); |
7b4b0e9e VMP |
4079 | } |
4080 | ||
4081 | /* | |
4082 | * Registers error notifier for devices supporting error recovery. | |
4083 | * If we encounter a failure in this function, we report an error | |
4084 | * and continue after disabling error recovery support for the | |
4085 | * device. | |
4086 | */ | |
4087 | static void vfio_register_err_notifier(VFIODevice *vdev) | |
4088 | { | |
4089 | int ret; | |
4090 | int argsz; | |
4091 | struct vfio_irq_set *irq_set; | |
4092 | int32_t *pfd; | |
4093 | ||
4094 | if (!vdev->pci_aer) { | |
4095 | return; | |
4096 | } | |
4097 | ||
4098 | if (event_notifier_init(&vdev->err_notifier, 0)) { | |
8fbf47c3 | 4099 | error_report("vfio: Unable to init event notifier for error detection"); |
7b4b0e9e VMP |
4100 | vdev->pci_aer = false; |
4101 | return; | |
4102 | } | |
4103 | ||
4104 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
4105 | ||
4106 | irq_set = g_malloc0(argsz); | |
4107 | irq_set->argsz = argsz; | |
4108 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
4109 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
4110 | irq_set->index = VFIO_PCI_ERR_IRQ_INDEX; | |
4111 | irq_set->start = 0; | |
4112 | irq_set->count = 1; | |
4113 | pfd = (int32_t *)&irq_set->data; | |
4114 | ||
4115 | *pfd = event_notifier_get_fd(&vdev->err_notifier); | |
4116 | qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev); | |
4117 | ||
4118 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
4119 | if (ret) { | |
8fbf47c3 | 4120 | error_report("vfio: Failed to set up error notification"); |
7b4b0e9e VMP |
4121 | qemu_set_fd_handler(*pfd, NULL, NULL, vdev); |
4122 | event_notifier_cleanup(&vdev->err_notifier); | |
4123 | vdev->pci_aer = false; | |
4124 | } | |
4125 | g_free(irq_set); | |
4126 | } | |
4127 | ||
4128 | static void vfio_unregister_err_notifier(VFIODevice *vdev) | |
4129 | { | |
4130 | int argsz; | |
4131 | struct vfio_irq_set *irq_set; | |
4132 | int32_t *pfd; | |
4133 | int ret; | |
4134 | ||
4135 | if (!vdev->pci_aer) { | |
4136 | return; | |
4137 | } | |
4138 | ||
4139 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
4140 | ||
4141 | irq_set = g_malloc0(argsz); | |
4142 | irq_set->argsz = argsz; | |
4143 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
4144 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
4145 | irq_set->index = VFIO_PCI_ERR_IRQ_INDEX; | |
4146 | irq_set->start = 0; | |
4147 | irq_set->count = 1; | |
4148 | pfd = (int32_t *)&irq_set->data; | |
4149 | *pfd = -1; | |
4150 | ||
4151 | ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
4152 | if (ret) { | |
8fbf47c3 | 4153 | error_report("vfio: Failed to de-assign error fd: %m"); |
7b4b0e9e VMP |
4154 | } |
4155 | g_free(irq_set); | |
4156 | qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier), | |
4157 | NULL, NULL, vdev); | |
4158 | event_notifier_cleanup(&vdev->err_notifier); | |
4159 | } | |
4160 | ||
65501a74 AW |
4161 | static int vfio_initfn(PCIDevice *pdev) |
4162 | { | |
4163 | VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
4164 | VFIOGroup *group; | |
4165 | char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name; | |
4166 | ssize_t len; | |
4167 | struct stat st; | |
4168 | int groupid; | |
4169 | int ret; | |
4170 | ||
4171 | /* Check that the host device exists */ | |
4172 | snprintf(path, sizeof(path), | |
4173 | "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/", | |
4174 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
4175 | vdev->host.function); | |
4176 | if (stat(path, &st) < 0) { | |
312fd5f2 | 4177 | error_report("vfio: error: no such host device: %s", path); |
65501a74 AW |
4178 | return -errno; |
4179 | } | |
4180 | ||
4181 | strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1); | |
4182 | ||
13665a2d MA |
4183 | len = readlink(path, iommu_group_path, sizeof(path)); |
4184 | if (len <= 0 || len >= sizeof(path)) { | |
312fd5f2 | 4185 | error_report("vfio: error no iommu_group for device"); |
13665a2d | 4186 | return len < 0 ? -errno : ENAMETOOLONG; |
65501a74 AW |
4187 | } |
4188 | ||
4189 | iommu_group_path[len] = 0; | |
4190 | group_name = basename(iommu_group_path); | |
4191 | ||
4192 | if (sscanf(group_name, "%d", &groupid) != 1) { | |
312fd5f2 | 4193 | error_report("vfio: error reading %s: %m", path); |
65501a74 AW |
4194 | return -errno; |
4195 | } | |
4196 | ||
4197 | DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain, | |
4198 | vdev->host.bus, vdev->host.slot, vdev->host.function, groupid); | |
4199 | ||
0688448b | 4200 | group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev)); |
65501a74 | 4201 | if (!group) { |
312fd5f2 | 4202 | error_report("vfio: failed to get group %d", groupid); |
65501a74 AW |
4203 | return -ENOENT; |
4204 | } | |
4205 | ||
4206 | snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x", | |
4207 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
4208 | vdev->host.function); | |
4209 | ||
4210 | QLIST_FOREACH(pvdev, &group->device_list, next) { | |
4211 | if (pvdev->host.domain == vdev->host.domain && | |
4212 | pvdev->host.bus == vdev->host.bus && | |
4213 | pvdev->host.slot == vdev->host.slot && | |
4214 | pvdev->host.function == vdev->host.function) { | |
4215 | ||
312fd5f2 | 4216 | error_report("vfio: error: device %s is already attached", path); |
65501a74 AW |
4217 | vfio_put_group(group); |
4218 | return -EBUSY; | |
4219 | } | |
4220 | } | |
4221 | ||
4222 | ret = vfio_get_device(group, path, vdev); | |
4223 | if (ret) { | |
312fd5f2 | 4224 | error_report("vfio: failed to get device %s", path); |
65501a74 AW |
4225 | vfio_put_group(group); |
4226 | return ret; | |
4227 | } | |
4228 | ||
4229 | /* Get a copy of config space */ | |
4230 | ret = pread(vdev->fd, vdev->pdev.config, | |
4231 | MIN(pci_config_size(&vdev->pdev), vdev->config_size), | |
4232 | vdev->config_offset); | |
4233 | if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { | |
4234 | ret = ret < 0 ? -errno : -EFAULT; | |
312fd5f2 | 4235 | error_report("vfio: Failed to read device config space"); |
65501a74 AW |
4236 | goto out_put; |
4237 | } | |
4238 | ||
4b5d5e87 AW |
4239 | /* vfio emulates a lot for us, but some bits need extra love */ |
4240 | vdev->emulated_config_bits = g_malloc0(vdev->config_size); | |
4241 | ||
4242 | /* QEMU can choose to expose the ROM or not */ | |
4243 | memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4); | |
4244 | ||
4245 | /* QEMU can change multi-function devices to single function, or reverse */ | |
4246 | vdev->emulated_config_bits[PCI_HEADER_TYPE] = | |
4247 | PCI_HEADER_TYPE_MULTI_FUNCTION; | |
4248 | ||
187d6232 AW |
4249 | /* Restore or clear multifunction, this is always controlled by QEMU */ |
4250 | if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { | |
4251 | vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; | |
4252 | } else { | |
4253 | vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION; | |
4254 | } | |
4255 | ||
65501a74 AW |
4256 | /* |
4257 | * Clear host resource mapping info. If we choose not to register a | |
4258 | * BAR, such as might be the case with the option ROM, we can get | |
4259 | * confusing, unwritable, residual addresses from the host here. | |
4260 | */ | |
4261 | memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); | |
4262 | memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); | |
4263 | ||
6f864e6e | 4264 | vfio_pci_size_rom(vdev); |
65501a74 AW |
4265 | |
4266 | ret = vfio_early_setup_msix(vdev); | |
4267 | if (ret) { | |
4268 | goto out_put; | |
4269 | } | |
4270 | ||
4271 | vfio_map_bars(vdev); | |
4272 | ||
4273 | ret = vfio_add_capabilities(vdev); | |
4274 | if (ret) { | |
4275 | goto out_teardown; | |
4276 | } | |
4277 | ||
4b5d5e87 AW |
4278 | /* QEMU emulates all of MSI & MSIX */ |
4279 | if (pdev->cap_present & QEMU_PCI_CAP_MSIX) { | |
4280 | memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff, | |
4281 | MSIX_CAP_LENGTH); | |
4282 | } | |
4283 | ||
4284 | if (pdev->cap_present & QEMU_PCI_CAP_MSI) { | |
4285 | memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff, | |
4286 | vdev->msi_cap_size); | |
4287 | } | |
4288 | ||
65501a74 | 4289 | if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { |
bc72ad67 | 4290 | vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, |
ea486926 | 4291 | vfio_intx_mmap_enable, vdev); |
e1d1e586 | 4292 | pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq); |
65501a74 AW |
4293 | ret = vfio_enable_intx(vdev); |
4294 | if (ret) { | |
4295 | goto out_teardown; | |
4296 | } | |
4297 | } | |
4298 | ||
7b4b0e9e | 4299 | vfio_register_err_notifier(vdev); |
c29029dd | 4300 | |
65501a74 AW |
4301 | return 0; |
4302 | ||
4303 | out_teardown: | |
4304 | pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); | |
4305 | vfio_teardown_msi(vdev); | |
4306 | vfio_unmap_bars(vdev); | |
4307 | out_put: | |
4b5d5e87 | 4308 | g_free(vdev->emulated_config_bits); |
65501a74 AW |
4309 | vfio_put_device(vdev); |
4310 | vfio_put_group(group); | |
4311 | return ret; | |
4312 | } | |
4313 | ||
4314 | static void vfio_exitfn(PCIDevice *pdev) | |
4315 | { | |
4316 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
4317 | VFIOGroup *group = vdev->group; | |
4318 | ||
7b4b0e9e | 4319 | vfio_unregister_err_notifier(vdev); |
65501a74 AW |
4320 | pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); |
4321 | vfio_disable_interrupts(vdev); | |
ea486926 | 4322 | if (vdev->intx.mmap_timer) { |
bc72ad67 | 4323 | timer_free(vdev->intx.mmap_timer); |
ea486926 | 4324 | } |
65501a74 AW |
4325 | vfio_teardown_msi(vdev); |
4326 | vfio_unmap_bars(vdev); | |
4b5d5e87 | 4327 | g_free(vdev->emulated_config_bits); |
6f864e6e | 4328 | g_free(vdev->rom); |
65501a74 AW |
4329 | vfio_put_device(vdev); |
4330 | vfio_put_group(group); | |
4331 | } | |
4332 | ||
4333 | static void vfio_pci_reset(DeviceState *dev) | |
4334 | { | |
4335 | PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev); | |
4336 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); | |
4337 | ||
5834a83f AW |
4338 | DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain, |
4339 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
4340 | ||
f16f39c3 | 4341 | vfio_pci_pre_reset(vdev); |
ba661818 | 4342 | |
f16f39c3 AW |
4343 | if (vdev->reset_works && (vdev->has_flr || !vdev->has_pm_reset) && |
4344 | !ioctl(vdev->fd, VFIO_DEVICE_RESET)) { | |
4345 | DPRINTF("%04x:%02x:%02x.%x FLR/VFIO_DEVICE_RESET\n", vdev->host.domain, | |
4346 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
4347 | goto post_reset; | |
ba661818 AW |
4348 | } |
4349 | ||
f16f39c3 AW |
4350 | /* See if we can do our own bus reset */ |
4351 | if (!vfio_pci_hot_reset_one(vdev)) { | |
4352 | goto post_reset; | |
4353 | } | |
5834a83f | 4354 | |
f16f39c3 AW |
4355 | /* If nothing else works and the device supports PM reset, use it */ |
4356 | if (vdev->reset_works && vdev->has_pm_reset && | |
4357 | !ioctl(vdev->fd, VFIO_DEVICE_RESET)) { | |
4358 | DPRINTF("%04x:%02x:%02x.%x PCI PM Reset\n", vdev->host.domain, | |
4359 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
4360 | goto post_reset; | |
65501a74 | 4361 | } |
5834a83f | 4362 | |
f16f39c3 AW |
4363 | post_reset: |
4364 | vfio_pci_post_reset(vdev); | |
65501a74 AW |
4365 | } |
4366 | ||
abc5b3bf GA |
4367 | static void vfio_instance_init(Object *obj) |
4368 | { | |
4369 | PCIDevice *pci_dev = PCI_DEVICE(obj); | |
4370 | VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, PCI_DEVICE(obj)); | |
4371 | ||
4372 | device_add_bootindex_property(obj, &vdev->bootindex, | |
4373 | "bootindex", NULL, | |
4374 | &pci_dev->qdev, NULL); | |
4375 | } | |
4376 | ||
65501a74 AW |
4377 | static Property vfio_pci_dev_properties[] = { |
4378 | DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice, host), | |
ea486926 AW |
4379 | DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice, |
4380 | intx.mmap_timeout, 1100), | |
f15689c7 AW |
4381 | DEFINE_PROP_BIT("x-vga", VFIODevice, features, |
4382 | VFIO_FEATURE_ENABLE_VGA_BIT, false), | |
65501a74 AW |
4383 | /* |
4384 | * TODO - support passed fds... is this necessary? | |
4385 | * DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name), | |
4386 | * DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name), | |
4387 | */ | |
4388 | DEFINE_PROP_END_OF_LIST(), | |
4389 | }; | |
4390 | ||
d9f0e638 AW |
4391 | static const VMStateDescription vfio_pci_vmstate = { |
4392 | .name = "vfio-pci", | |
4393 | .unmigratable = 1, | |
4394 | }; | |
65501a74 AW |
4395 | |
4396 | static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) | |
4397 | { | |
4398 | DeviceClass *dc = DEVICE_CLASS(klass); | |
4399 | PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); | |
4400 | ||
4401 | dc->reset = vfio_pci_reset; | |
4402 | dc->props = vfio_pci_dev_properties; | |
d9f0e638 AW |
4403 | dc->vmsd = &vfio_pci_vmstate; |
4404 | dc->desc = "VFIO-based PCI device assignment"; | |
125ee0ed | 4405 | set_bit(DEVICE_CATEGORY_MISC, dc->categories); |
65501a74 AW |
4406 | pdc->init = vfio_initfn; |
4407 | pdc->exit = vfio_exitfn; | |
4408 | pdc->config_read = vfio_pci_read_config; | |
4409 | pdc->config_write = vfio_pci_write_config; | |
6a659bbf | 4410 | pdc->is_express = 1; /* We might be */ |
65501a74 AW |
4411 | } |
4412 | ||
4413 | static const TypeInfo vfio_pci_dev_info = { | |
4414 | .name = "vfio-pci", | |
4415 | .parent = TYPE_PCI_DEVICE, | |
4416 | .instance_size = sizeof(VFIODevice), | |
4417 | .class_init = vfio_pci_dev_class_init, | |
abc5b3bf | 4418 | .instance_init = vfio_instance_init, |
65501a74 AW |
4419 | }; |
4420 | ||
4421 | static void register_vfio_pci_dev_type(void) | |
4422 | { | |
4423 | type_register_static(&vfio_pci_dev_info); | |
4424 | } | |
4425 | ||
4426 | type_init(register_vfio_pci_dev_type) | |
6d8be4c3 AK |
4427 | |
4428 | static int vfio_container_do_ioctl(AddressSpace *as, int32_t groupid, | |
4429 | int req, void *param) | |
4430 | { | |
4431 | VFIOGroup *group; | |
4432 | VFIOContainer *container; | |
4433 | int ret = -1; | |
4434 | ||
4435 | group = vfio_get_group(groupid, as); | |
4436 | if (!group) { | |
4437 | error_report("vfio: group %d not registered", groupid); | |
4438 | return ret; | |
4439 | } | |
4440 | ||
4441 | container = group->container; | |
4442 | if (group->container) { | |
4443 | ret = ioctl(container->fd, req, param); | |
4444 | if (ret < 0) { | |
4445 | error_report("vfio: failed to ioctl container: ret=%d, %s", | |
4446 | ret, strerror(errno)); | |
4447 | } | |
4448 | } | |
4449 | ||
4450 | vfio_put_group(group); | |
4451 | ||
4452 | return ret; | |
4453 | } | |
4454 | ||
4455 | int vfio_container_ioctl(AddressSpace *as, int32_t groupid, | |
4456 | int req, void *param) | |
4457 | { | |
4458 | /* We allow only certain ioctls to the container */ | |
4459 | switch (req) { | |
59181263 AK |
4460 | case VFIO_CHECK_EXTENSION: |
4461 | case VFIO_IOMMU_SPAPR_TCE_GET_INFO: | |
4462 | break; | |
6d8be4c3 AK |
4463 | default: |
4464 | /* Return an error on unknown requests */ | |
4465 | error_report("vfio: unsupported ioctl %X", req); | |
4466 | return -1; | |
4467 | } | |
4468 | ||
4469 | return vfio_container_do_ioctl(as, groupid, req, param); | |
4470 | } |