]>
Commit | Line | Data |
---|---|---|
e2c7d025 EA |
1 | /* |
2 | * generic functions used by VFIO devices | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Alex Williamson <alex.williamson@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Based on qemu-kvm device-assignment: | |
13 | * Adapted for KVM by Qumranet. | |
14 | * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) | |
15 | * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) | |
16 | * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) | |
17 | * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) | |
18 | * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) | |
19 | */ | |
20 | ||
c6eacb1a | 21 | #include "qemu/osdep.h" |
e2c7d025 | 22 | #include <sys/ioctl.h> |
a9c94277 MA |
23 | #ifdef CONFIG_KVM |
24 | #include <linux/kvm.h> | |
25 | #endif | |
e2c7d025 EA |
26 | #include <linux/vfio.h> |
27 | ||
28 | #include "hw/vfio/vfio-common.h" | |
29 | #include "hw/vfio/vfio.h" | |
30 | #include "exec/address-spaces.h" | |
31 | #include "exec/memory.h" | |
32 | #include "hw/hw.h" | |
33 | #include "qemu/error-report.h" | |
f4ec5e26 | 34 | #include "qemu/range.h" |
c65ee433 | 35 | #include "sysemu/balloon.h" |
e2c7d025 EA |
36 | #include "sysemu/kvm.h" |
37 | #include "trace.h" | |
01905f58 | 38 | #include "qapi/error.h" |
e2c7d025 EA |
39 | |
40 | struct vfio_group_head vfio_group_list = | |
39cb514f | 41 | QLIST_HEAD_INITIALIZER(vfio_group_list); |
e2c7d025 EA |
42 | struct vfio_as_head vfio_address_spaces = |
43 | QLIST_HEAD_INITIALIZER(vfio_address_spaces); | |
44 | ||
45 | #ifdef CONFIG_KVM | |
46 | /* | |
47 | * We have a single VFIO pseudo device per KVM VM. Once created it lives | |
48 | * for the life of the VM. Closing the file descriptor only drops our | |
49 | * reference to it and the device's reference to kvm. Therefore once | |
50 | * initialized, this file descriptor is only released on QEMU exit and | |
51 | * we'll re-use it should another vfio device be attached before then. | |
52 | */ | |
53 | static int vfio_kvm_device_fd = -1; | |
54 | #endif | |
55 | ||
56 | /* | |
57 | * Common VFIO interrupt disable | |
58 | */ | |
59 | void vfio_disable_irqindex(VFIODevice *vbasedev, int index) | |
60 | { | |
61 | struct vfio_irq_set irq_set = { | |
62 | .argsz = sizeof(irq_set), | |
63 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, | |
64 | .index = index, | |
65 | .start = 0, | |
66 | .count = 0, | |
67 | }; | |
68 | ||
69 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
70 | } | |
71 | ||
72 | void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) | |
73 | { | |
74 | struct vfio_irq_set irq_set = { | |
75 | .argsz = sizeof(irq_set), | |
76 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, | |
77 | .index = index, | |
78 | .start = 0, | |
79 | .count = 1, | |
80 | }; | |
81 | ||
82 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
83 | } | |
84 | ||
85 | void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) | |
86 | { | |
87 | struct vfio_irq_set irq_set = { | |
88 | .argsz = sizeof(irq_set), | |
89 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, | |
90 | .index = index, | |
91 | .start = 0, | |
92 | .count = 1, | |
93 | }; | |
94 | ||
95 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
96 | } | |
97 | ||
98 | /* | |
99 | * IO Port/MMIO - Beware of the endians, VFIO is always little endian | |
100 | */ | |
101 | void vfio_region_write(void *opaque, hwaddr addr, | |
102 | uint64_t data, unsigned size) | |
103 | { | |
104 | VFIORegion *region = opaque; | |
105 | VFIODevice *vbasedev = region->vbasedev; | |
106 | union { | |
107 | uint8_t byte; | |
108 | uint16_t word; | |
109 | uint32_t dword; | |
110 | uint64_t qword; | |
111 | } buf; | |
112 | ||
113 | switch (size) { | |
114 | case 1: | |
115 | buf.byte = data; | |
116 | break; | |
117 | case 2: | |
118 | buf.word = cpu_to_le16(data); | |
119 | break; | |
120 | case 4: | |
121 | buf.dword = cpu_to_le32(data); | |
122 | break; | |
38d49e8c JRZ |
123 | case 8: |
124 | buf.qword = cpu_to_le64(data); | |
125 | break; | |
e2c7d025 EA |
126 | default: |
127 | hw_error("vfio: unsupported write size, %d bytes", size); | |
128 | break; | |
129 | } | |
130 | ||
131 | if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { | |
132 | error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 | |
133 | ",%d) failed: %m", | |
134 | __func__, vbasedev->name, region->nr, | |
135 | addr, data, size); | |
136 | } | |
137 | ||
138 | trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); | |
139 | ||
140 | /* | |
141 | * A read or write to a BAR always signals an INTx EOI. This will | |
142 | * do nothing if not pending (including not in INTx mode). We assume | |
143 | * that a BAR access is in response to an interrupt and that BAR | |
144 | * accesses will service the interrupt. Unfortunately, we don't know | |
145 | * which access will service the interrupt, so we're potentially | |
146 | * getting quite a few host interrupts per guest interrupt. | |
147 | */ | |
148 | vbasedev->ops->vfio_eoi(vbasedev); | |
149 | } | |
150 | ||
151 | uint64_t vfio_region_read(void *opaque, | |
152 | hwaddr addr, unsigned size) | |
153 | { | |
154 | VFIORegion *region = opaque; | |
155 | VFIODevice *vbasedev = region->vbasedev; | |
156 | union { | |
157 | uint8_t byte; | |
158 | uint16_t word; | |
159 | uint32_t dword; | |
160 | uint64_t qword; | |
161 | } buf; | |
162 | uint64_t data = 0; | |
163 | ||
164 | if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { | |
165 | error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", | |
166 | __func__, vbasedev->name, region->nr, | |
167 | addr, size); | |
168 | return (uint64_t)-1; | |
169 | } | |
170 | switch (size) { | |
171 | case 1: | |
172 | data = buf.byte; | |
173 | break; | |
174 | case 2: | |
175 | data = le16_to_cpu(buf.word); | |
176 | break; | |
177 | case 4: | |
178 | data = le32_to_cpu(buf.dword); | |
179 | break; | |
38d49e8c JRZ |
180 | case 8: |
181 | data = le64_to_cpu(buf.qword); | |
182 | break; | |
e2c7d025 EA |
183 | default: |
184 | hw_error("vfio: unsupported read size, %d bytes", size); | |
185 | break; | |
186 | } | |
187 | ||
188 | trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); | |
189 | ||
190 | /* Same as write above */ | |
191 | vbasedev->ops->vfio_eoi(vbasedev); | |
192 | ||
193 | return data; | |
194 | } | |
195 | ||
196 | const MemoryRegionOps vfio_region_ops = { | |
197 | .read = vfio_region_read, | |
198 | .write = vfio_region_write, | |
199 | .endianness = DEVICE_LITTLE_ENDIAN, | |
15126cba JRZ |
200 | .valid = { |
201 | .min_access_size = 1, | |
202 | .max_access_size = 8, | |
203 | }, | |
38d49e8c JRZ |
204 | .impl = { |
205 | .min_access_size = 1, | |
206 | .max_access_size = 8, | |
207 | }, | |
e2c7d025 EA |
208 | }; |
209 | ||
210 | /* | |
211 | * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 | |
212 | */ | |
213 | static int vfio_dma_unmap(VFIOContainer *container, | |
214 | hwaddr iova, ram_addr_t size) | |
215 | { | |
216 | struct vfio_iommu_type1_dma_unmap unmap = { | |
217 | .argsz = sizeof(unmap), | |
218 | .flags = 0, | |
219 | .iova = iova, | |
220 | .size = size, | |
221 | }; | |
222 | ||
223 | if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { | |
78e5b17f | 224 | error_report("VFIO_UNMAP_DMA: %d", -errno); |
e2c7d025 EA |
225 | return -errno; |
226 | } | |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
231 | static int vfio_dma_map(VFIOContainer *container, hwaddr iova, | |
232 | ram_addr_t size, void *vaddr, bool readonly) | |
233 | { | |
234 | struct vfio_iommu_type1_dma_map map = { | |
235 | .argsz = sizeof(map), | |
236 | .flags = VFIO_DMA_MAP_FLAG_READ, | |
237 | .vaddr = (__u64)(uintptr_t)vaddr, | |
238 | .iova = iova, | |
239 | .size = size, | |
240 | }; | |
241 | ||
242 | if (!readonly) { | |
243 | map.flags |= VFIO_DMA_MAP_FLAG_WRITE; | |
244 | } | |
245 | ||
246 | /* | |
247 | * Try the mapping, if it fails with EBUSY, unmap the region and try | |
248 | * again. This shouldn't be necessary, but we sometimes see it in | |
b6af0975 | 249 | * the VGA ROM space. |
e2c7d025 EA |
250 | */ |
251 | if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || | |
252 | (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 && | |
253 | ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { | |
254 | return 0; | |
255 | } | |
256 | ||
78e5b17f | 257 | error_report("VFIO_MAP_DMA: %d", -errno); |
e2c7d025 EA |
258 | return -errno; |
259 | } | |
260 | ||
f4ec5e26 AK |
261 | static void vfio_host_win_add(VFIOContainer *container, |
262 | hwaddr min_iova, hwaddr max_iova, | |
263 | uint64_t iova_pgsizes) | |
264 | { | |
265 | VFIOHostDMAWindow *hostwin; | |
266 | ||
267 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { | |
268 | if (ranges_overlap(hostwin->min_iova, | |
269 | hostwin->max_iova - hostwin->min_iova + 1, | |
270 | min_iova, | |
271 | max_iova - min_iova + 1)) { | |
272 | hw_error("%s: Overlapped IOMMU are not enabled", __func__); | |
273 | } | |
274 | } | |
275 | ||
276 | hostwin = g_malloc0(sizeof(*hostwin)); | |
277 | ||
278 | hostwin->min_iova = min_iova; | |
279 | hostwin->max_iova = max_iova; | |
280 | hostwin->iova_pgsizes = iova_pgsizes; | |
281 | QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); | |
282 | } | |
283 | ||
2e4109de AK |
284 | static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, |
285 | hwaddr max_iova) | |
286 | { | |
287 | VFIOHostDMAWindow *hostwin; | |
288 | ||
289 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { | |
290 | if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { | |
291 | QLIST_REMOVE(hostwin, hostwin_next); | |
292 | return 0; | |
293 | } | |
294 | } | |
295 | ||
296 | return -1; | |
297 | } | |
298 | ||
e2c7d025 EA |
299 | static bool vfio_listener_skipped_section(MemoryRegionSection *section) |
300 | { | |
301 | return (!memory_region_is_ram(section->mr) && | |
302 | !memory_region_is_iommu(section->mr)) || | |
303 | /* | |
304 | * Sizing an enabled 64-bit BAR can cause spurious mappings to | |
305 | * addresses in the upper part of the 64-bit address space. These | |
306 | * are never accessed by the CPU and beyond the address width of | |
307 | * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. | |
308 | */ | |
309 | section->offset_within_address_space & (1ULL << 63); | |
310 | } | |
311 | ||
4a4b88fb PX |
312 | /* Called with rcu_read_lock held. */ |
313 | static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, | |
314 | bool *read_only) | |
e2c7d025 | 315 | { |
e2c7d025 EA |
316 | MemoryRegion *mr; |
317 | hwaddr xlat; | |
318 | hwaddr len = iotlb->addr_mask + 1; | |
4a4b88fb | 319 | bool writable = iotlb->perm & IOMMU_WO; |
f1f93650 | 320 | |
e2c7d025 EA |
321 | /* |
322 | * The IOMMU TLB entry we have just covers translation through | |
323 | * this IOMMU to its immediate target. We need to translate | |
324 | * it the rest of the way through to memory. | |
325 | */ | |
326 | mr = address_space_translate(&address_space_memory, | |
327 | iotlb->translated_addr, | |
bc6b1cec PM |
328 | &xlat, &len, writable, |
329 | MEMTXATTRS_UNSPECIFIED); | |
e2c7d025 | 330 | if (!memory_region_is_ram(mr)) { |
78e5b17f | 331 | error_report("iommu map to non memory area %"HWADDR_PRIx"", |
e2c7d025 | 332 | xlat); |
4a4b88fb | 333 | return false; |
e2c7d025 | 334 | } |
4a4b88fb | 335 | |
e2c7d025 EA |
336 | /* |
337 | * Translation truncates length to the IOMMU page size, | |
338 | * check that it did not truncate too much. | |
339 | */ | |
340 | if (len & iotlb->addr_mask) { | |
78e5b17f | 341 | error_report("iommu has granularity incompatible with target AS"); |
4a4b88fb PX |
342 | return false; |
343 | } | |
344 | ||
345 | *vaddr = memory_region_get_ram_ptr(mr) + xlat; | |
346 | *read_only = !writable || mr->readonly; | |
347 | ||
348 | return true; | |
349 | } | |
350 | ||
351 | static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) | |
352 | { | |
353 | VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); | |
354 | VFIOContainer *container = giommu->container; | |
355 | hwaddr iova = iotlb->iova + giommu->iommu_offset; | |
356 | bool read_only; | |
357 | void *vaddr; | |
358 | int ret; | |
359 | ||
360 | trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP", | |
361 | iova, iova + iotlb->addr_mask); | |
362 | ||
363 | if (iotlb->target_as != &address_space_memory) { | |
364 | error_report("Wrong target AS \"%s\", only system memory is allowed", | |
365 | iotlb->target_as->name ? iotlb->target_as->name : "none"); | |
366 | return; | |
367 | } | |
368 | ||
369 | rcu_read_lock(); | |
370 | ||
e2c7d025 | 371 | if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { |
dfbd90e5 PX |
372 | if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) { |
373 | goto out; | |
374 | } | |
4a4b88fb PX |
375 | /* |
376 | * vaddr is only valid until rcu_read_unlock(). But after | |
377 | * vfio_dma_map has set up the mapping the pages will be | |
378 | * pinned by the kernel. This makes sure that the RAM backend | |
379 | * of vaddr will always be there, even if the memory object is | |
380 | * destroyed and its backing memory munmap-ed. | |
381 | */ | |
d78c19b5 | 382 | ret = vfio_dma_map(container, iova, |
e2c7d025 | 383 | iotlb->addr_mask + 1, vaddr, |
4a4b88fb | 384 | read_only); |
e2c7d025 EA |
385 | if (ret) { |
386 | error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " | |
387 | "0x%"HWADDR_PRIx", %p) = %d (%m)", | |
d78c19b5 | 388 | container, iova, |
e2c7d025 EA |
389 | iotlb->addr_mask + 1, vaddr, ret); |
390 | } | |
391 | } else { | |
d78c19b5 | 392 | ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1); |
e2c7d025 EA |
393 | if (ret) { |
394 | error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " | |
395 | "0x%"HWADDR_PRIx") = %d (%m)", | |
d78c19b5 | 396 | container, iova, |
e2c7d025 EA |
397 | iotlb->addr_mask + 1, ret); |
398 | } | |
399 | } | |
41063e1e PB |
400 | out: |
401 | rcu_read_unlock(); | |
e2c7d025 EA |
402 | } |
403 | ||
404 | static void vfio_listener_region_add(MemoryListener *listener, | |
405 | MemoryRegionSection *section) | |
406 | { | |
ee0bf0e5 | 407 | VFIOContainer *container = container_of(listener, VFIOContainer, listener); |
e2c7d025 | 408 | hwaddr iova, end; |
55efcc53 | 409 | Int128 llend, llsize; |
e2c7d025 EA |
410 | void *vaddr; |
411 | int ret; | |
f4ec5e26 AK |
412 | VFIOHostDMAWindow *hostwin; |
413 | bool hostwin_found; | |
e2c7d025 EA |
414 | |
415 | if (vfio_listener_skipped_section(section)) { | |
416 | trace_vfio_listener_region_add_skip( | |
417 | section->offset_within_address_space, | |
418 | section->offset_within_address_space + | |
419 | int128_get64(int128_sub(section->size, int128_one()))); | |
420 | return; | |
421 | } | |
422 | ||
423 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != | |
424 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { | |
425 | error_report("%s received unaligned region", __func__); | |
426 | return; | |
427 | } | |
428 | ||
429 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); | |
430 | llend = int128_make64(section->offset_within_address_space); | |
431 | llend = int128_add(llend, section->size); | |
432 | llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); | |
433 | ||
434 | if (int128_ge(int128_make64(iova), llend)) { | |
435 | return; | |
436 | } | |
55efcc53 | 437 | end = int128_get64(int128_sub(llend, int128_one())); |
3898aad3 | 438 | |
2e4109de | 439 | if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { |
2e4109de AK |
440 | hwaddr pgsize = 0; |
441 | ||
442 | /* For now intersections are not allowed, we may relax this later */ | |
443 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { | |
444 | if (ranges_overlap(hostwin->min_iova, | |
445 | hostwin->max_iova - hostwin->min_iova + 1, | |
446 | section->offset_within_address_space, | |
447 | int128_get64(section->size))) { | |
448 | ret = -1; | |
449 | goto fail; | |
450 | } | |
451 | } | |
452 | ||
453 | ret = vfio_spapr_create_window(container, section, &pgsize); | |
454 | if (ret) { | |
455 | goto fail; | |
456 | } | |
457 | ||
458 | vfio_host_win_add(container, section->offset_within_address_space, | |
459 | section->offset_within_address_space + | |
460 | int128_get64(section->size) - 1, pgsize); | |
07bc681a AK |
461 | #ifdef CONFIG_KVM |
462 | if (kvm_enabled()) { | |
463 | VFIOGroup *group; | |
464 | IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); | |
465 | struct kvm_vfio_spapr_tce param; | |
466 | struct kvm_device_attr attr = { | |
467 | .group = KVM_DEV_VFIO_GROUP, | |
468 | .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE, | |
469 | .addr = (uint64_t)(unsigned long)¶m, | |
470 | }; | |
471 | ||
472 | if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD, | |
473 | ¶m.tablefd)) { | |
474 | QLIST_FOREACH(group, &container->group_list, container_next) { | |
475 | param.groupfd = group->fd; | |
476 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { | |
477 | error_report("vfio: failed to setup fd %d " | |
478 | "for a group with fd %d: %s", | |
479 | param.tablefd, param.groupfd, | |
480 | strerror(errno)); | |
481 | return; | |
482 | } | |
483 | trace_vfio_spapr_group_attach(param.groupfd, param.tablefd); | |
484 | } | |
485 | } | |
486 | } | |
487 | #endif | |
2e4109de AK |
488 | } |
489 | ||
f4ec5e26 AK |
490 | hostwin_found = false; |
491 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { | |
492 | if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { | |
493 | hostwin_found = true; | |
494 | break; | |
495 | } | |
496 | } | |
497 | ||
498 | if (!hostwin_found) { | |
3898aad3 DG |
499 | error_report("vfio: IOMMU container %p can't map guest IOVA region" |
500 | " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, | |
55efcc53 | 501 | container, iova, end); |
3898aad3 DG |
502 | ret = -EFAULT; |
503 | goto fail; | |
504 | } | |
e2c7d025 EA |
505 | |
506 | memory_region_ref(section->mr); | |
507 | ||
508 | if (memory_region_is_iommu(section->mr)) { | |
509 | VFIOGuestIOMMU *giommu; | |
3df9d748 | 510 | IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); |
cb1efcf4 | 511 | int iommu_idx; |
e2c7d025 | 512 | |
55efcc53 | 513 | trace_vfio_listener_region_add_iommu(iova, end); |
e2c7d025 | 514 | /* |
e2c7d025 EA |
515 | * FIXME: For VFIO iommu types which have KVM acceleration to |
516 | * avoid bouncing all map/unmaps through qemu this way, this | |
517 | * would be the right place to wire that up (tell the KVM | |
518 | * device emulation the VFIO iommu handles to use). | |
519 | */ | |
e2c7d025 | 520 | giommu = g_malloc0(sizeof(*giommu)); |
3df9d748 | 521 | giommu->iommu = iommu_mr; |
d78c19b5 AK |
522 | giommu->iommu_offset = section->offset_within_address_space - |
523 | section->offset_within_region; | |
e2c7d025 | 524 | giommu->container = container; |
698feb5e PX |
525 | llend = int128_add(int128_make64(section->offset_within_region), |
526 | section->size); | |
527 | llend = int128_sub(llend, int128_one()); | |
cb1efcf4 PM |
528 | iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, |
529 | MEMTXATTRS_UNSPECIFIED); | |
698feb5e PX |
530 | iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, |
531 | IOMMU_NOTIFIER_ALL, | |
532 | section->offset_within_region, | |
cb1efcf4 PM |
533 | int128_get64(llend), |
534 | iommu_idx); | |
e2c7d025 | 535 | QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); |
508ce5eb | 536 | |
3df9d748 | 537 | memory_region_register_iommu_notifier(section->mr, &giommu->n); |
ad523590 | 538 | memory_region_iommu_replay(giommu->iommu, &giommu->n); |
e2c7d025 EA |
539 | |
540 | return; | |
541 | } | |
542 | ||
543 | /* Here we assume that memory_region_is_ram(section->mr)==true */ | |
544 | ||
e2c7d025 EA |
545 | vaddr = memory_region_get_ram_ptr(section->mr) + |
546 | section->offset_within_region + | |
547 | (iova - section->offset_within_address_space); | |
548 | ||
55efcc53 | 549 | trace_vfio_listener_region_add_ram(iova, end, vaddr); |
e2c7d025 | 550 | |
55efcc53 BD |
551 | llsize = int128_sub(llend, int128_make64(iova)); |
552 | ||
567b5b30 AK |
553 | if (memory_region_is_ram_device(section->mr)) { |
554 | hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; | |
555 | ||
556 | if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { | |
5c086005 EA |
557 | trace_vfio_listener_region_add_no_dma_map( |
558 | memory_region_name(section->mr), | |
559 | section->offset_within_address_space, | |
560 | int128_getlo(section->size), | |
561 | pgmask + 1); | |
567b5b30 AK |
562 | return; |
563 | } | |
564 | } | |
565 | ||
55efcc53 BD |
566 | ret = vfio_dma_map(container, iova, int128_get64(llsize), |
567 | vaddr, section->readonly); | |
e2c7d025 EA |
568 | if (ret) { |
569 | error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " | |
570 | "0x%"HWADDR_PRIx", %p) = %d (%m)", | |
55efcc53 | 571 | container, iova, int128_get64(llsize), vaddr, ret); |
567b5b30 AK |
572 | if (memory_region_is_ram_device(section->mr)) { |
573 | /* Allow unexpected mappings not to be fatal for RAM devices */ | |
574 | return; | |
575 | } | |
ac6dc389 DG |
576 | goto fail; |
577 | } | |
e2c7d025 | 578 | |
ac6dc389 DG |
579 | return; |
580 | ||
581 | fail: | |
567b5b30 AK |
582 | if (memory_region_is_ram_device(section->mr)) { |
583 | error_report("failed to vfio_dma_map. pci p2p may not work"); | |
584 | return; | |
585 | } | |
ac6dc389 DG |
586 | /* |
587 | * On the initfn path, store the first error in the container so we | |
588 | * can gracefully fail. Runtime, there's not much we can do other | |
589 | * than throw a hardware error. | |
590 | */ | |
591 | if (!container->initialized) { | |
592 | if (!container->error) { | |
593 | container->error = ret; | |
e2c7d025 | 594 | } |
ac6dc389 DG |
595 | } else { |
596 | hw_error("vfio: DMA mapping failed, unable to continue"); | |
e2c7d025 EA |
597 | } |
598 | } | |
599 | ||
600 | static void vfio_listener_region_del(MemoryListener *listener, | |
601 | MemoryRegionSection *section) | |
602 | { | |
ee0bf0e5 | 603 | VFIOContainer *container = container_of(listener, VFIOContainer, listener); |
e2c7d025 | 604 | hwaddr iova, end; |
7a057b4f | 605 | Int128 llend, llsize; |
e2c7d025 | 606 | int ret; |
567b5b30 | 607 | bool try_unmap = true; |
e2c7d025 EA |
608 | |
609 | if (vfio_listener_skipped_section(section)) { | |
610 | trace_vfio_listener_region_del_skip( | |
611 | section->offset_within_address_space, | |
612 | section->offset_within_address_space + | |
613 | int128_get64(int128_sub(section->size, int128_one()))); | |
614 | return; | |
615 | } | |
616 | ||
617 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != | |
618 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { | |
619 | error_report("%s received unaligned region", __func__); | |
620 | return; | |
621 | } | |
622 | ||
623 | if (memory_region_is_iommu(section->mr)) { | |
624 | VFIOGuestIOMMU *giommu; | |
625 | ||
626 | QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { | |
3df9d748 | 627 | if (MEMORY_REGION(giommu->iommu) == section->mr && |
698feb5e | 628 | giommu->n.start == section->offset_within_region) { |
3df9d748 | 629 | memory_region_unregister_iommu_notifier(section->mr, |
d22d8956 | 630 | &giommu->n); |
e2c7d025 EA |
631 | QLIST_REMOVE(giommu, giommu_next); |
632 | g_free(giommu); | |
633 | break; | |
634 | } | |
635 | } | |
636 | ||
637 | /* | |
638 | * FIXME: We assume the one big unmap below is adequate to | |
639 | * remove any individual page mappings in the IOMMU which | |
640 | * might have been copied into VFIO. This works for a page table | |
641 | * based IOMMU where a big unmap flattens a large range of IO-PTEs. | |
642 | * That may not be true for all IOMMU types. | |
643 | */ | |
644 | } | |
645 | ||
646 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); | |
7a057b4f AK |
647 | llend = int128_make64(section->offset_within_address_space); |
648 | llend = int128_add(llend, section->size); | |
649 | llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); | |
e2c7d025 | 650 | |
7a057b4f | 651 | if (int128_ge(int128_make64(iova), llend)) { |
e2c7d025 EA |
652 | return; |
653 | } | |
7a057b4f AK |
654 | end = int128_get64(int128_sub(llend, int128_one())); |
655 | ||
656 | llsize = int128_sub(llend, int128_make64(iova)); | |
e2c7d025 | 657 | |
7a057b4f | 658 | trace_vfio_listener_region_del(iova, end); |
e2c7d025 | 659 | |
567b5b30 AK |
660 | if (memory_region_is_ram_device(section->mr)) { |
661 | hwaddr pgmask; | |
662 | VFIOHostDMAWindow *hostwin; | |
663 | bool hostwin_found = false; | |
664 | ||
665 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { | |
666 | if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { | |
667 | hostwin_found = true; | |
668 | break; | |
669 | } | |
670 | } | |
671 | assert(hostwin_found); /* or region_add() would have failed */ | |
672 | ||
673 | pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; | |
674 | try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); | |
e2c7d025 | 675 | } |
2e4109de | 676 | |
567b5b30 AK |
677 | if (try_unmap) { |
678 | ret = vfio_dma_unmap(container, iova, int128_get64(llsize)); | |
679 | if (ret) { | |
680 | error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " | |
681 | "0x%"HWADDR_PRIx") = %d (%m)", | |
682 | container, iova, int128_get64(llsize), ret); | |
683 | } | |
684 | } | |
685 | ||
686 | memory_region_unref(section->mr); | |
687 | ||
2e4109de AK |
688 | if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { |
689 | vfio_spapr_remove_window(container, | |
690 | section->offset_within_address_space); | |
691 | if (vfio_host_win_del(container, | |
692 | section->offset_within_address_space, | |
693 | section->offset_within_address_space + | |
694 | int128_get64(section->size) - 1) < 0) { | |
695 | hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx, | |
696 | __func__, section->offset_within_address_space); | |
697 | } | |
698 | } | |
e2c7d025 EA |
699 | } |
700 | ||
51b833f4 | 701 | static const MemoryListener vfio_memory_listener = { |
e2c7d025 EA |
702 | .region_add = vfio_listener_region_add, |
703 | .region_del = vfio_listener_region_del, | |
704 | }; | |
705 | ||
51b833f4 | 706 | static void vfio_listener_release(VFIOContainer *container) |
e2c7d025 | 707 | { |
ee0bf0e5 | 708 | memory_listener_unregister(&container->listener); |
318f67ce AK |
709 | if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { |
710 | memory_listener_unregister(&container->prereg_listener); | |
711 | } | |
e2c7d025 EA |
712 | } |
713 | ||
b53b0f69 AW |
714 | static struct vfio_info_cap_header * |
715 | vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) | |
716 | { | |
717 | struct vfio_info_cap_header *hdr; | |
718 | void *ptr = info; | |
719 | ||
720 | if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { | |
721 | return NULL; | |
722 | } | |
723 | ||
724 | for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { | |
725 | if (hdr->id == id) { | |
726 | return hdr; | |
727 | } | |
728 | } | |
729 | ||
730 | return NULL; | |
731 | } | |
732 | ||
24acf72b AW |
733 | static int vfio_setup_region_sparse_mmaps(VFIORegion *region, |
734 | struct vfio_region_info *info) | |
b53b0f69 AW |
735 | { |
736 | struct vfio_info_cap_header *hdr; | |
737 | struct vfio_region_info_cap_sparse_mmap *sparse; | |
24acf72b | 738 | int i, j; |
b53b0f69 AW |
739 | |
740 | hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); | |
741 | if (!hdr) { | |
24acf72b | 742 | return -ENODEV; |
b53b0f69 AW |
743 | } |
744 | ||
745 | sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); | |
746 | ||
747 | trace_vfio_region_sparse_mmap_header(region->vbasedev->name, | |
748 | region->nr, sparse->nr_areas); | |
749 | ||
24acf72b AW |
750 | region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); |
751 | ||
752 | for (i = 0, j = 0; i < sparse->nr_areas; i++) { | |
753 | trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, | |
754 | sparse->areas[i].offset + | |
755 | sparse->areas[i].size); | |
b53b0f69 | 756 | |
24acf72b AW |
757 | if (sparse->areas[i].size) { |
758 | region->mmaps[j].offset = sparse->areas[i].offset; | |
759 | region->mmaps[j].size = sparse->areas[i].size; | |
760 | j++; | |
761 | } | |
b53b0f69 | 762 | } |
24acf72b AW |
763 | |
764 | region->nr_mmaps = j; | |
765 | region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); | |
766 | ||
767 | return 0; | |
b53b0f69 AW |
768 | } |
769 | ||
db0da029 AW |
770 | int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, |
771 | int index, const char *name) | |
e2c7d025 | 772 | { |
db0da029 AW |
773 | struct vfio_region_info *info; |
774 | int ret; | |
775 | ||
776 | ret = vfio_get_region_info(vbasedev, index, &info); | |
777 | if (ret) { | |
778 | return ret; | |
779 | } | |
780 | ||
781 | region->vbasedev = vbasedev; | |
782 | region->flags = info->flags; | |
783 | region->size = info->size; | |
784 | region->fd_offset = info->offset; | |
785 | region->nr = index; | |
786 | ||
787 | if (region->size) { | |
788 | region->mem = g_new0(MemoryRegion, 1); | |
789 | memory_region_init_io(region->mem, obj, &vfio_region_ops, | |
790 | region, name, region->size); | |
e2c7d025 | 791 | |
db0da029 | 792 | if (!vbasedev->no_mmap && |
95251725 | 793 | region->flags & VFIO_REGION_INFO_FLAG_MMAP) { |
e2c7d025 | 794 | |
24acf72b | 795 | ret = vfio_setup_region_sparse_mmaps(region, info); |
db0da029 | 796 | |
24acf72b | 797 | if (ret) { |
b53b0f69 AW |
798 | region->nr_mmaps = 1; |
799 | region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); | |
800 | region->mmaps[0].offset = 0; | |
801 | region->mmaps[0].size = region->size; | |
802 | } | |
e2c7d025 | 803 | } |
db0da029 AW |
804 | } |
805 | ||
806 | g_free(info); | |
807 | ||
808 | trace_vfio_region_setup(vbasedev->name, index, name, | |
809 | region->flags, region->fd_offset, region->size); | |
810 | return 0; | |
811 | } | |
e2c7d025 | 812 | |
db0da029 AW |
813 | int vfio_region_mmap(VFIORegion *region) |
814 | { | |
815 | int i, prot = 0; | |
816 | char *name; | |
817 | ||
818 | if (!region->mem) { | |
819 | return 0; | |
820 | } | |
821 | ||
822 | prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; | |
823 | prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; | |
824 | ||
825 | for (i = 0; i < region->nr_mmaps; i++) { | |
826 | region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot, | |
827 | MAP_SHARED, region->vbasedev->fd, | |
828 | region->fd_offset + | |
829 | region->mmaps[i].offset); | |
830 | if (region->mmaps[i].mmap == MAP_FAILED) { | |
831 | int ret = -errno; | |
832 | ||
833 | trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, | |
834 | region->fd_offset + | |
835 | region->mmaps[i].offset, | |
836 | region->fd_offset + | |
837 | region->mmaps[i].offset + | |
838 | region->mmaps[i].size - 1, ret); | |
839 | ||
840 | region->mmaps[i].mmap = NULL; | |
841 | ||
842 | for (i--; i >= 0; i--) { | |
843 | memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); | |
844 | munmap(region->mmaps[i].mmap, region->mmaps[i].size); | |
845 | object_unparent(OBJECT(®ion->mmaps[i].mem)); | |
846 | region->mmaps[i].mmap = NULL; | |
847 | } | |
848 | ||
849 | return ret; | |
e2c7d025 EA |
850 | } |
851 | ||
db0da029 AW |
852 | name = g_strdup_printf("%s mmaps[%d]", |
853 | memory_region_name(region->mem), i); | |
21e00fa5 AW |
854 | memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, |
855 | memory_region_owner(region->mem), | |
856 | name, region->mmaps[i].size, | |
857 | region->mmaps[i].mmap); | |
db0da029 | 858 | g_free(name); |
db0da029 AW |
859 | memory_region_add_subregion(region->mem, region->mmaps[i].offset, |
860 | ®ion->mmaps[i].mem); | |
861 | ||
862 | trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), | |
863 | region->mmaps[i].offset, | |
864 | region->mmaps[i].offset + | |
865 | region->mmaps[i].size - 1); | |
866 | } | |
867 | ||
868 | return 0; | |
869 | } | |
870 | ||
871 | void vfio_region_exit(VFIORegion *region) | |
872 | { | |
873 | int i; | |
874 | ||
875 | if (!region->mem) { | |
876 | return; | |
877 | } | |
878 | ||
879 | for (i = 0; i < region->nr_mmaps; i++) { | |
880 | if (region->mmaps[i].mmap) { | |
881 | memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); | |
e2c7d025 | 882 | } |
db0da029 | 883 | } |
e2c7d025 | 884 | |
db0da029 AW |
885 | trace_vfio_region_exit(region->vbasedev->name, region->nr); |
886 | } | |
887 | ||
888 | void vfio_region_finalize(VFIORegion *region) | |
889 | { | |
890 | int i; | |
891 | ||
892 | if (!region->mem) { | |
893 | return; | |
e2c7d025 EA |
894 | } |
895 | ||
db0da029 AW |
896 | for (i = 0; i < region->nr_mmaps; i++) { |
897 | if (region->mmaps[i].mmap) { | |
898 | munmap(region->mmaps[i].mmap, region->mmaps[i].size); | |
899 | object_unparent(OBJECT(®ion->mmaps[i].mem)); | |
900 | } | |
901 | } | |
902 | ||
903 | object_unparent(OBJECT(region->mem)); | |
904 | ||
905 | g_free(region->mem); | |
906 | g_free(region->mmaps); | |
907 | ||
908 | trace_vfio_region_finalize(region->vbasedev->name, region->nr); | |
92f86bff GH |
909 | |
910 | region->mem = NULL; | |
911 | region->mmaps = NULL; | |
912 | region->nr_mmaps = 0; | |
913 | region->size = 0; | |
914 | region->flags = 0; | |
915 | region->nr = 0; | |
db0da029 AW |
916 | } |
917 | ||
918 | void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) | |
919 | { | |
920 | int i; | |
921 | ||
922 | if (!region->mem) { | |
923 | return; | |
924 | } | |
925 | ||
926 | for (i = 0; i < region->nr_mmaps; i++) { | |
927 | if (region->mmaps[i].mmap) { | |
928 | memory_region_set_enabled(®ion->mmaps[i].mem, enabled); | |
929 | } | |
930 | } | |
e2c7d025 | 931 | |
db0da029 AW |
932 | trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), |
933 | enabled); | |
e2c7d025 EA |
934 | } |
935 | ||
936 | void vfio_reset_handler(void *opaque) | |
937 | { | |
938 | VFIOGroup *group; | |
939 | VFIODevice *vbasedev; | |
940 | ||
941 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
942 | QLIST_FOREACH(vbasedev, &group->device_list, next) { | |
7da624e2 AW |
943 | if (vbasedev->dev->realized) { |
944 | vbasedev->ops->vfio_compute_needs_reset(vbasedev); | |
945 | } | |
e2c7d025 EA |
946 | } |
947 | } | |
948 | ||
949 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
950 | QLIST_FOREACH(vbasedev, &group->device_list, next) { | |
7da624e2 | 951 | if (vbasedev->dev->realized && vbasedev->needs_reset) { |
e2c7d025 EA |
952 | vbasedev->ops->vfio_hot_reset_multi(vbasedev); |
953 | } | |
954 | } | |
955 | } | |
956 | } | |
957 | ||
958 | static void vfio_kvm_device_add_group(VFIOGroup *group) | |
959 | { | |
960 | #ifdef CONFIG_KVM | |
961 | struct kvm_device_attr attr = { | |
962 | .group = KVM_DEV_VFIO_GROUP, | |
963 | .attr = KVM_DEV_VFIO_GROUP_ADD, | |
964 | .addr = (uint64_t)(unsigned long)&group->fd, | |
965 | }; | |
966 | ||
967 | if (!kvm_enabled()) { | |
968 | return; | |
969 | } | |
970 | ||
971 | if (vfio_kvm_device_fd < 0) { | |
972 | struct kvm_create_device cd = { | |
973 | .type = KVM_DEV_TYPE_VFIO, | |
974 | }; | |
975 | ||
976 | if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { | |
78e5b17f | 977 | error_report("Failed to create KVM VFIO device: %m"); |
e2c7d025 EA |
978 | return; |
979 | } | |
980 | ||
981 | vfio_kvm_device_fd = cd.fd; | |
982 | } | |
983 | ||
984 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { | |
985 | error_report("Failed to add group %d to KVM VFIO device: %m", | |
986 | group->groupid); | |
987 | } | |
988 | #endif | |
989 | } | |
990 | ||
991 | static void vfio_kvm_device_del_group(VFIOGroup *group) | |
992 | { | |
993 | #ifdef CONFIG_KVM | |
994 | struct kvm_device_attr attr = { | |
995 | .group = KVM_DEV_VFIO_GROUP, | |
996 | .attr = KVM_DEV_VFIO_GROUP_DEL, | |
997 | .addr = (uint64_t)(unsigned long)&group->fd, | |
998 | }; | |
999 | ||
1000 | if (vfio_kvm_device_fd < 0) { | |
1001 | return; | |
1002 | } | |
1003 | ||
1004 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { | |
1005 | error_report("Failed to remove group %d from KVM VFIO device: %m", | |
1006 | group->groupid); | |
1007 | } | |
1008 | #endif | |
1009 | } | |
1010 | ||
1011 | static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) | |
1012 | { | |
1013 | VFIOAddressSpace *space; | |
1014 | ||
1015 | QLIST_FOREACH(space, &vfio_address_spaces, list) { | |
1016 | if (space->as == as) { | |
1017 | return space; | |
1018 | } | |
1019 | } | |
1020 | ||
1021 | /* No suitable VFIOAddressSpace, create a new one */ | |
1022 | space = g_malloc0(sizeof(*space)); | |
1023 | space->as = as; | |
1024 | QLIST_INIT(&space->containers); | |
1025 | ||
1026 | QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); | |
1027 | ||
1028 | return space; | |
1029 | } | |
1030 | ||
1031 | static void vfio_put_address_space(VFIOAddressSpace *space) | |
1032 | { | |
1033 | if (QLIST_EMPTY(&space->containers)) { | |
1034 | QLIST_REMOVE(space, list); | |
1035 | g_free(space); | |
1036 | } | |
1037 | } | |
1038 | ||
01905f58 EA |
1039 | static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, |
1040 | Error **errp) | |
e2c7d025 EA |
1041 | { |
1042 | VFIOContainer *container; | |
1043 | int ret, fd; | |
1044 | VFIOAddressSpace *space; | |
1045 | ||
1046 | space = vfio_get_address_space(as); | |
1047 | ||
c65ee433 AW |
1048 | /* |
1049 | * VFIO is currently incompatible with memory ballooning insofar as the | |
1050 | * madvise to purge (zap) the page from QEMU's address space does not | |
1051 | * interact with the memory API and therefore leaves stale virtual to | |
1052 | * physical mappings in the IOMMU if the page was previously pinned. We | |
1053 | * therefore add a balloon inhibit for each group added to a container, | |
1054 | * whether the container is used individually or shared. This provides | |
1055 | * us with options to allow devices within a group to opt-in and allow | |
1056 | * ballooning, so long as it is done consistently for a group (for instance | |
1057 | * if the device is an mdev device where it is known that the host vendor | |
1058 | * driver will never pin pages outside of the working set of the guest | |
1059 | * driver, which would thus not be ballooning candidates). | |
1060 | * | |
1061 | * The first opportunity to induce pinning occurs here where we attempt to | |
1062 | * attach the group to existing containers within the AddressSpace. If any | |
1063 | * pages are already zapped from the virtual address space, such as from a | |
1064 | * previous ballooning opt-in, new pinning will cause valid mappings to be | |
1065 | * re-established. Likewise, when the overall MemoryListener for a new | |
1066 | * container is registered, a replay of mappings within the AddressSpace | |
1067 | * will occur, re-establishing any previously zapped pages as well. | |
1068 | * | |
1069 | * NB. Balloon inhibiting does not currently block operation of the | |
1070 | * balloon driver or revoke previously pinned pages, it only prevents | |
1071 | * calling madvise to modify the virtual mapping of ballooned pages. | |
1072 | */ | |
1073 | qemu_balloon_inhibit(true); | |
1074 | ||
e2c7d025 EA |
1075 | QLIST_FOREACH(container, &space->containers, next) { |
1076 | if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { | |
1077 | group->container = container; | |
1078 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); | |
2016986a | 1079 | vfio_kvm_device_add_group(group); |
e2c7d025 EA |
1080 | return 0; |
1081 | } | |
1082 | } | |
1083 | ||
1084 | fd = qemu_open("/dev/vfio/vfio", O_RDWR); | |
1085 | if (fd < 0) { | |
01905f58 | 1086 | error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio"); |
e2c7d025 EA |
1087 | ret = -errno; |
1088 | goto put_space_exit; | |
1089 | } | |
1090 | ||
1091 | ret = ioctl(fd, VFIO_GET_API_VERSION); | |
1092 | if (ret != VFIO_API_VERSION) { | |
01905f58 EA |
1093 | error_setg(errp, "supported vfio version: %d, " |
1094 | "reported version: %d", VFIO_API_VERSION, ret); | |
e2c7d025 EA |
1095 | ret = -EINVAL; |
1096 | goto close_fd_exit; | |
1097 | } | |
1098 | ||
1099 | container = g_malloc0(sizeof(*container)); | |
1100 | container->space = space; | |
1101 | container->fd = fd; | |
f7f9c7b2 LY |
1102 | QLIST_INIT(&container->giommu_list); |
1103 | QLIST_INIT(&container->hostwin_list); | |
2e6e697e AW |
1104 | if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) || |
1105 | ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) { | |
1106 | bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU); | |
7a140a57 | 1107 | struct vfio_iommu_type1_info info; |
2e6e697e | 1108 | |
e2c7d025 EA |
1109 | ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); |
1110 | if (ret) { | |
01905f58 | 1111 | error_setg_errno(errp, errno, "failed to set group container"); |
e2c7d025 EA |
1112 | ret = -errno; |
1113 | goto free_container_exit; | |
1114 | } | |
1115 | ||
318f67ce AK |
1116 | container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU; |
1117 | ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type); | |
e2c7d025 | 1118 | if (ret) { |
01905f58 | 1119 | error_setg_errno(errp, errno, "failed to set iommu for container"); |
e2c7d025 EA |
1120 | ret = -errno; |
1121 | goto free_container_exit; | |
1122 | } | |
3898aad3 DG |
1123 | |
1124 | /* | |
1125 | * FIXME: This assumes that a Type1 IOMMU can map any 64-bit | |
1126 | * IOVA whatsoever. That's not actually true, but the current | |
1127 | * kernel interface doesn't tell us what it can map, and the | |
1128 | * existing Type1 IOMMUs generally support any IOVA we're | |
1129 | * going to actually try in practice. | |
1130 | */ | |
7a140a57 DG |
1131 | info.argsz = sizeof(info); |
1132 | ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info); | |
1133 | /* Ignore errors */ | |
f4ec5e26 AK |
1134 | if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) { |
1135 | /* Assume 4k IOVA page size */ | |
1136 | info.iova_pgsizes = 4096; | |
7a140a57 | 1137 | } |
f4ec5e26 | 1138 | vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes); |
318f67ce AK |
1139 | } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) || |
1140 | ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) { | |
3898aad3 | 1141 | struct vfio_iommu_spapr_tce_info info; |
318f67ce | 1142 | bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU); |
3898aad3 | 1143 | |
e2c7d025 EA |
1144 | ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); |
1145 | if (ret) { | |
01905f58 | 1146 | error_setg_errno(errp, errno, "failed to set group container"); |
e2c7d025 EA |
1147 | ret = -errno; |
1148 | goto free_container_exit; | |
1149 | } | |
318f67ce AK |
1150 | container->iommu_type = |
1151 | v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU; | |
1152 | ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type); | |
c6e7958e AK |
1153 | if (ret) { |
1154 | container->iommu_type = VFIO_SPAPR_TCE_IOMMU; | |
1155 | v2 = false; | |
1156 | ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type); | |
1157 | } | |
e2c7d025 | 1158 | if (ret) { |
01905f58 | 1159 | error_setg_errno(errp, errno, "failed to set iommu for container"); |
e2c7d025 EA |
1160 | ret = -errno; |
1161 | goto free_container_exit; | |
1162 | } | |
1163 | ||
1164 | /* | |
1165 | * The host kernel code implementing VFIO_IOMMU_DISABLE is called | |
1166 | * when container fd is closed so we do not call it explicitly | |
1167 | * in this file. | |
1168 | */ | |
318f67ce AK |
1169 | if (!v2) { |
1170 | ret = ioctl(fd, VFIO_IOMMU_ENABLE); | |
1171 | if (ret) { | |
01905f58 | 1172 | error_setg_errno(errp, errno, "failed to enable container"); |
318f67ce AK |
1173 | ret = -errno; |
1174 | goto free_container_exit; | |
1175 | } | |
1176 | } else { | |
1177 | container->prereg_listener = vfio_prereg_listener; | |
1178 | ||
1179 | memory_listener_register(&container->prereg_listener, | |
1180 | &address_space_memory); | |
1181 | if (container->error) { | |
1182 | memory_listener_unregister(&container->prereg_listener); | |
01905f58 EA |
1183 | ret = container->error; |
1184 | error_setg(errp, | |
1185 | "RAM memory listener initialization failed for container"); | |
318f67ce AK |
1186 | goto free_container_exit; |
1187 | } | |
e2c7d025 | 1188 | } |
3898aad3 | 1189 | |
3898aad3 DG |
1190 | info.argsz = sizeof(info); |
1191 | ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); | |
1192 | if (ret) { | |
01905f58 EA |
1193 | error_setg_errno(errp, errno, |
1194 | "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed"); | |
3898aad3 | 1195 | ret = -errno; |
318f67ce AK |
1196 | if (v2) { |
1197 | memory_listener_unregister(&container->prereg_listener); | |
1198 | } | |
3898aad3 DG |
1199 | goto free_container_exit; |
1200 | } | |
7a140a57 | 1201 | |
2e4109de AK |
1202 | if (v2) { |
1203 | /* | |
1204 | * There is a default window in just created container. | |
1205 | * To make region_add/del simpler, we better remove this | |
1206 | * window now and let those iommu_listener callbacks | |
1207 | * create/remove them when needed. | |
1208 | */ | |
1209 | ret = vfio_spapr_remove_window(container, info.dma32_window_start); | |
1210 | if (ret) { | |
01905f58 EA |
1211 | error_setg_errno(errp, -ret, |
1212 | "failed to remove existing window"); | |
2e4109de AK |
1213 | goto free_container_exit; |
1214 | } | |
1215 | } else { | |
1216 | /* The default table uses 4K pages */ | |
1217 | vfio_host_win_add(container, info.dma32_window_start, | |
1218 | info.dma32_window_start + | |
1219 | info.dma32_window_size - 1, | |
1220 | 0x1000); | |
1221 | } | |
e2c7d025 | 1222 | } else { |
01905f58 | 1223 | error_setg(errp, "No available IOMMU models"); |
e2c7d025 EA |
1224 | ret = -EINVAL; |
1225 | goto free_container_exit; | |
1226 | } | |
1227 | ||
8c37faa4 AK |
1228 | vfio_kvm_device_add_group(group); |
1229 | ||
1230 | QLIST_INIT(&container->group_list); | |
1231 | QLIST_INSERT_HEAD(&space->containers, container, next); | |
1232 | ||
1233 | group->container = container; | |
1234 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); | |
1235 | ||
ee0bf0e5 DG |
1236 | container->listener = vfio_memory_listener; |
1237 | ||
1238 | memory_listener_register(&container->listener, container->space->as); | |
1239 | ||
1240 | if (container->error) { | |
1241 | ret = container->error; | |
01905f58 EA |
1242 | error_setg_errno(errp, -ret, |
1243 | "memory listener initialization failed for container"); | |
ee0bf0e5 DG |
1244 | goto listener_release_exit; |
1245 | } | |
1246 | ||
1247 | container->initialized = true; | |
1248 | ||
e2c7d025 EA |
1249 | return 0; |
1250 | listener_release_exit: | |
8c37faa4 AK |
1251 | QLIST_REMOVE(group, container_next); |
1252 | QLIST_REMOVE(container, next); | |
1253 | vfio_kvm_device_del_group(group); | |
e2c7d025 EA |
1254 | vfio_listener_release(container); |
1255 | ||
1256 | free_container_exit: | |
1257 | g_free(container); | |
1258 | ||
1259 | close_fd_exit: | |
1260 | close(fd); | |
1261 | ||
1262 | put_space_exit: | |
c65ee433 | 1263 | qemu_balloon_inhibit(false); |
e2c7d025 EA |
1264 | vfio_put_address_space(space); |
1265 | ||
1266 | return ret; | |
1267 | } | |
1268 | ||
1269 | static void vfio_disconnect_container(VFIOGroup *group) | |
1270 | { | |
1271 | VFIOContainer *container = group->container; | |
1272 | ||
36968626 PX |
1273 | QLIST_REMOVE(group, container_next); |
1274 | group->container = NULL; | |
1275 | ||
1276 | /* | |
1277 | * Explicitly release the listener first before unset container, | |
1278 | * since unset may destroy the backend container if it's the last | |
1279 | * group. | |
1280 | */ | |
1281 | if (QLIST_EMPTY(&container->group_list)) { | |
1282 | vfio_listener_release(container); | |
1283 | } | |
1284 | ||
e2c7d025 EA |
1285 | if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { |
1286 | error_report("vfio: error disconnecting group %d from container", | |
1287 | group->groupid); | |
1288 | } | |
1289 | ||
e2c7d025 EA |
1290 | if (QLIST_EMPTY(&container->group_list)) { |
1291 | VFIOAddressSpace *space = container->space; | |
f8d8a944 | 1292 | VFIOGuestIOMMU *giommu, *tmp; |
e2c7d025 | 1293 | |
e2c7d025 | 1294 | QLIST_REMOVE(container, next); |
f8d8a944 AK |
1295 | |
1296 | QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { | |
3df9d748 AK |
1297 | memory_region_unregister_iommu_notifier( |
1298 | MEMORY_REGION(giommu->iommu), &giommu->n); | |
f8d8a944 AK |
1299 | QLIST_REMOVE(giommu, giommu_next); |
1300 | g_free(giommu); | |
1301 | } | |
1302 | ||
e2c7d025 EA |
1303 | trace_vfio_disconnect_container(container->fd); |
1304 | close(container->fd); | |
1305 | g_free(container); | |
1306 | ||
1307 | vfio_put_address_space(space); | |
1308 | } | |
1309 | } | |
1310 | ||
1b808d5b | 1311 | VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) |
e2c7d025 EA |
1312 | { |
1313 | VFIOGroup *group; | |
1314 | char path[32]; | |
1315 | struct vfio_group_status status = { .argsz = sizeof(status) }; | |
1316 | ||
1317 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
1318 | if (group->groupid == groupid) { | |
1319 | /* Found it. Now is it already in the right context? */ | |
1320 | if (group->container->space->as == as) { | |
1321 | return group; | |
1322 | } else { | |
1b808d5b EA |
1323 | error_setg(errp, "group %d used in multiple address spaces", |
1324 | group->groupid); | |
e2c7d025 EA |
1325 | return NULL; |
1326 | } | |
1327 | } | |
1328 | } | |
1329 | ||
1330 | group = g_malloc0(sizeof(*group)); | |
1331 | ||
1332 | snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); | |
1333 | group->fd = qemu_open(path, O_RDWR); | |
1334 | if (group->fd < 0) { | |
1b808d5b | 1335 | error_setg_errno(errp, errno, "failed to open %s", path); |
e2c7d025 EA |
1336 | goto free_group_exit; |
1337 | } | |
1338 | ||
1339 | if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { | |
1b808d5b | 1340 | error_setg_errno(errp, errno, "failed to get group %d status", groupid); |
e2c7d025 EA |
1341 | goto close_fd_exit; |
1342 | } | |
1343 | ||
1344 | if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { | |
1b808d5b EA |
1345 | error_setg(errp, "group %d is not viable", groupid); |
1346 | error_append_hint(errp, | |
1347 | "Please ensure all devices within the iommu_group " | |
1348 | "are bound to their vfio bus driver.\n"); | |
e2c7d025 EA |
1349 | goto close_fd_exit; |
1350 | } | |
1351 | ||
1352 | group->groupid = groupid; | |
1353 | QLIST_INIT(&group->device_list); | |
1354 | ||
1b808d5b EA |
1355 | if (vfio_connect_container(group, as, errp)) { |
1356 | error_prepend(errp, "failed to setup container for group %d: ", | |
1357 | groupid); | |
e2c7d025 EA |
1358 | goto close_fd_exit; |
1359 | } | |
1360 | ||
1361 | if (QLIST_EMPTY(&vfio_group_list)) { | |
1362 | qemu_register_reset(vfio_reset_handler, NULL); | |
1363 | } | |
1364 | ||
1365 | QLIST_INSERT_HEAD(&vfio_group_list, group, next); | |
1366 | ||
e2c7d025 EA |
1367 | return group; |
1368 | ||
1369 | close_fd_exit: | |
1370 | close(group->fd); | |
1371 | ||
1372 | free_group_exit: | |
1373 | g_free(group); | |
1374 | ||
1375 | return NULL; | |
1376 | } | |
1377 | ||
1378 | void vfio_put_group(VFIOGroup *group) | |
1379 | { | |
77a10d04 | 1380 | if (!group || !QLIST_EMPTY(&group->device_list)) { |
e2c7d025 EA |
1381 | return; |
1382 | } | |
1383 | ||
238e9172 AW |
1384 | if (!group->balloon_allowed) { |
1385 | qemu_balloon_inhibit(false); | |
1386 | } | |
e2c7d025 EA |
1387 | vfio_kvm_device_del_group(group); |
1388 | vfio_disconnect_container(group); | |
1389 | QLIST_REMOVE(group, next); | |
1390 | trace_vfio_put_group(group->fd); | |
1391 | close(group->fd); | |
1392 | g_free(group); | |
1393 | ||
1394 | if (QLIST_EMPTY(&vfio_group_list)) { | |
1395 | qemu_unregister_reset(vfio_reset_handler, NULL); | |
1396 | } | |
1397 | } | |
1398 | ||
1399 | int vfio_get_device(VFIOGroup *group, const char *name, | |
59f7d674 | 1400 | VFIODevice *vbasedev, Error **errp) |
e2c7d025 EA |
1401 | { |
1402 | struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; | |
217e9fdc | 1403 | int ret, fd; |
e2c7d025 | 1404 | |
217e9fdc PB |
1405 | fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); |
1406 | if (fd < 0) { | |
59f7d674 EA |
1407 | error_setg_errno(errp, errno, "error getting device from group %d", |
1408 | group->groupid); | |
1409 | error_append_hint(errp, | |
1410 | "Verify all devices in group %d are bound to vfio-<bus> " | |
1411 | "or pci-stub and not already in use\n", group->groupid); | |
217e9fdc | 1412 | return fd; |
e2c7d025 EA |
1413 | } |
1414 | ||
217e9fdc | 1415 | ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info); |
e2c7d025 | 1416 | if (ret) { |
59f7d674 | 1417 | error_setg_errno(errp, errno, "error getting device info"); |
217e9fdc PB |
1418 | close(fd); |
1419 | return ret; | |
e2c7d025 EA |
1420 | } |
1421 | ||
238e9172 AW |
1422 | /* |
1423 | * Clear the balloon inhibitor for this group if the driver knows the | |
1424 | * device operates compatibly with ballooning. Setting must be consistent | |
1425 | * per group, but since compatibility is really only possible with mdev | |
1426 | * currently, we expect singleton groups. | |
1427 | */ | |
1428 | if (vbasedev->balloon_allowed != group->balloon_allowed) { | |
1429 | if (!QLIST_EMPTY(&group->device_list)) { | |
1430 | error_setg(errp, | |
1431 | "Inconsistent device balloon setting within group"); | |
1432 | return -1; | |
1433 | } | |
1434 | ||
1435 | if (!group->balloon_allowed) { | |
1436 | group->balloon_allowed = true; | |
1437 | qemu_balloon_inhibit(false); | |
1438 | } | |
1439 | } | |
1440 | ||
217e9fdc PB |
1441 | vbasedev->fd = fd; |
1442 | vbasedev->group = group; | |
1443 | QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); | |
1444 | ||
e2c7d025 EA |
1445 | vbasedev->num_irqs = dev_info.num_irqs; |
1446 | vbasedev->num_regions = dev_info.num_regions; | |
1447 | vbasedev->flags = dev_info.flags; | |
1448 | ||
1449 | trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions, | |
1450 | dev_info.num_irqs); | |
1451 | ||
1452 | vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); | |
217e9fdc | 1453 | return 0; |
e2c7d025 EA |
1454 | } |
1455 | ||
1456 | void vfio_put_base_device(VFIODevice *vbasedev) | |
1457 | { | |
77a10d04 PB |
1458 | if (!vbasedev->group) { |
1459 | return; | |
1460 | } | |
e2c7d025 EA |
1461 | QLIST_REMOVE(vbasedev, next); |
1462 | vbasedev->group = NULL; | |
1463 | trace_vfio_put_base_device(vbasedev->fd); | |
1464 | close(vbasedev->fd); | |
1465 | } | |
1466 | ||
46900226 AW |
1467 | int vfio_get_region_info(VFIODevice *vbasedev, int index, |
1468 | struct vfio_region_info **info) | |
1469 | { | |
1470 | size_t argsz = sizeof(struct vfio_region_info); | |
1471 | ||
1472 | *info = g_malloc0(argsz); | |
1473 | ||
1474 | (*info)->index = index; | |
b53b0f69 | 1475 | retry: |
46900226 AW |
1476 | (*info)->argsz = argsz; |
1477 | ||
1478 | if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { | |
1479 | g_free(*info); | |
e61a424f | 1480 | *info = NULL; |
46900226 AW |
1481 | return -errno; |
1482 | } | |
1483 | ||
b53b0f69 AW |
1484 | if ((*info)->argsz > argsz) { |
1485 | argsz = (*info)->argsz; | |
1486 | *info = g_realloc(*info, argsz); | |
1487 | ||
1488 | goto retry; | |
1489 | } | |
1490 | ||
46900226 AW |
1491 | return 0; |
1492 | } | |
1493 | ||
e61a424f AW |
1494 | int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, |
1495 | uint32_t subtype, struct vfio_region_info **info) | |
1496 | { | |
1497 | int i; | |
1498 | ||
1499 | for (i = 0; i < vbasedev->num_regions; i++) { | |
1500 | struct vfio_info_cap_header *hdr; | |
1501 | struct vfio_region_info_cap_type *cap_type; | |
1502 | ||
1503 | if (vfio_get_region_info(vbasedev, i, info)) { | |
1504 | continue; | |
1505 | } | |
1506 | ||
1507 | hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); | |
1508 | if (!hdr) { | |
1509 | g_free(*info); | |
1510 | continue; | |
1511 | } | |
1512 | ||
1513 | cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); | |
1514 | ||
1515 | trace_vfio_get_dev_region(vbasedev->name, i, | |
1516 | cap_type->type, cap_type->subtype); | |
1517 | ||
1518 | if (cap_type->type == type && cap_type->subtype == subtype) { | |
1519 | return 0; | |
1520 | } | |
1521 | ||
1522 | g_free(*info); | |
1523 | } | |
1524 | ||
1525 | *info = NULL; | |
1526 | return -ENODEV; | |
1527 | } | |
1528 | ||
ae0215b2 AK |
1529 | bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) |
1530 | { | |
1531 | struct vfio_region_info *info = NULL; | |
1532 | bool ret = false; | |
1533 | ||
1534 | if (!vfio_get_region_info(vbasedev, region, &info)) { | |
1535 | if (vfio_get_region_info_cap(info, cap_type)) { | |
1536 | ret = true; | |
1537 | } | |
1538 | g_free(info); | |
1539 | } | |
1540 | ||
1541 | return ret; | |
1542 | } | |
1543 | ||
3153119e DG |
1544 | /* |
1545 | * Interfaces for IBM EEH (Enhanced Error Handling) | |
1546 | */ | |
1547 | static bool vfio_eeh_container_ok(VFIOContainer *container) | |
1548 | { | |
1549 | /* | |
1550 | * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO | |
1551 | * implementation is broken if there are multiple groups in a | |
1552 | * container. The hardware works in units of Partitionable | |
1553 | * Endpoints (== IOMMU groups) and the EEH operations naively | |
1554 | * iterate across all groups in the container, without any logic | |
1555 | * to make sure the groups have their state synchronized. For | |
1556 | * certain operations (ENABLE) that might be ok, until an error | |
1557 | * occurs, but for others (GET_STATE) it's clearly broken. | |
1558 | */ | |
1559 | ||
1560 | /* | |
1561 | * XXX Once fixed kernels exist, test for them here | |
1562 | */ | |
1563 | ||
1564 | if (QLIST_EMPTY(&container->group_list)) { | |
1565 | return false; | |
1566 | } | |
1567 | ||
1568 | if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) { | |
1569 | return false; | |
1570 | } | |
1571 | ||
1572 | return true; | |
1573 | } | |
1574 | ||
1575 | static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) | |
1576 | { | |
1577 | struct vfio_eeh_pe_op pe_op = { | |
1578 | .argsz = sizeof(pe_op), | |
1579 | .op = op, | |
1580 | }; | |
1581 | int ret; | |
1582 | ||
1583 | if (!vfio_eeh_container_ok(container)) { | |
1584 | error_report("vfio/eeh: EEH_PE_OP 0x%x: " | |
1585 | "kernel requires a container with exactly one group", op); | |
1586 | return -EPERM; | |
1587 | } | |
1588 | ||
1589 | ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op); | |
1590 | if (ret < 0) { | |
1591 | error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op); | |
1592 | return -errno; | |
1593 | } | |
1594 | ||
d917e88d | 1595 | return ret; |
3153119e DG |
1596 | } |
1597 | ||
1598 | static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) | |
1599 | { | |
1600 | VFIOAddressSpace *space = vfio_get_address_space(as); | |
1601 | VFIOContainer *container = NULL; | |
1602 | ||
1603 | if (QLIST_EMPTY(&space->containers)) { | |
1604 | /* No containers to act on */ | |
1605 | goto out; | |
1606 | } | |
1607 | ||
1608 | container = QLIST_FIRST(&space->containers); | |
1609 | ||
1610 | if (QLIST_NEXT(container, next)) { | |
1611 | /* We don't yet have logic to synchronize EEH state across | |
1612 | * multiple containers */ | |
1613 | container = NULL; | |
1614 | goto out; | |
1615 | } | |
1616 | ||
1617 | out: | |
1618 | vfio_put_address_space(space); | |
1619 | return container; | |
1620 | } | |
1621 | ||
1622 | bool vfio_eeh_as_ok(AddressSpace *as) | |
1623 | { | |
1624 | VFIOContainer *container = vfio_eeh_as_container(as); | |
1625 | ||
1626 | return (container != NULL) && vfio_eeh_container_ok(container); | |
1627 | } | |
1628 | ||
1629 | int vfio_eeh_as_op(AddressSpace *as, uint32_t op) | |
1630 | { | |
1631 | VFIOContainer *container = vfio_eeh_as_container(as); | |
1632 | ||
1633 | if (!container) { | |
1634 | return -ENODEV; | |
1635 | } | |
1636 | return vfio_eeh_container_op(container, op); | |
1637 | } |