]>
Commit | Line | Data |
---|---|---|
e2c7d025 EA |
1 | /* |
2 | * generic functions used by VFIO devices | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Alex Williamson <alex.williamson@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Based on qemu-kvm device-assignment: | |
13 | * Adapted for KVM by Qumranet. | |
14 | * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) | |
15 | * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) | |
16 | * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) | |
17 | * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) | |
18 | * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) | |
19 | */ | |
20 | ||
c6eacb1a | 21 | #include "qemu/osdep.h" |
e2c7d025 | 22 | #include <sys/ioctl.h> |
a9c94277 MA |
23 | #ifdef CONFIG_KVM |
24 | #include <linux/kvm.h> | |
25 | #endif | |
e2c7d025 EA |
26 | #include <linux/vfio.h> |
27 | ||
28 | #include "hw/vfio/vfio-common.h" | |
29 | #include "hw/vfio/vfio.h" | |
30 | #include "exec/address-spaces.h" | |
31 | #include "exec/memory.h" | |
b6dd6504 | 32 | #include "exec/ram_addr.h" |
e2c7d025 EA |
33 | #include "hw/hw.h" |
34 | #include "qemu/error-report.h" | |
db725815 | 35 | #include "qemu/main-loop.h" |
f4ec5e26 | 36 | #include "qemu/range.h" |
e2c7d025 | 37 | #include "sysemu/kvm.h" |
71e8a915 | 38 | #include "sysemu/reset.h" |
e2c7d025 | 39 | #include "trace.h" |
01905f58 | 40 | #include "qapi/error.h" |
b6dd6504 | 41 | #include "migration/migration.h" |
e2c7d025 | 42 | |
f481ee2d | 43 | VFIOGroupList vfio_group_list = |
39cb514f | 44 | QLIST_HEAD_INITIALIZER(vfio_group_list); |
10ca76b4 | 45 | static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = |
e2c7d025 EA |
46 | QLIST_HEAD_INITIALIZER(vfio_address_spaces); |
47 | ||
48 | #ifdef CONFIG_KVM | |
49 | /* | |
50 | * We have a single VFIO pseudo device per KVM VM. Once created it lives | |
51 | * for the life of the VM. Closing the file descriptor only drops our | |
52 | * reference to it and the device's reference to kvm. Therefore once | |
53 | * initialized, this file descriptor is only released on QEMU exit and | |
54 | * we'll re-use it should another vfio device be attached before then. | |
55 | */ | |
56 | static int vfio_kvm_device_fd = -1; | |
57 | #endif | |
58 | ||
59 | /* | |
60 | * Common VFIO interrupt disable | |
61 | */ | |
62 | void vfio_disable_irqindex(VFIODevice *vbasedev, int index) | |
63 | { | |
64 | struct vfio_irq_set irq_set = { | |
65 | .argsz = sizeof(irq_set), | |
66 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, | |
67 | .index = index, | |
68 | .start = 0, | |
69 | .count = 0, | |
70 | }; | |
71 | ||
72 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
73 | } | |
74 | ||
75 | void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) | |
76 | { | |
77 | struct vfio_irq_set irq_set = { | |
78 | .argsz = sizeof(irq_set), | |
79 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, | |
80 | .index = index, | |
81 | .start = 0, | |
82 | .count = 1, | |
83 | }; | |
84 | ||
85 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
86 | } | |
87 | ||
88 | void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) | |
89 | { | |
90 | struct vfio_irq_set irq_set = { | |
91 | .argsz = sizeof(irq_set), | |
92 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, | |
93 | .index = index, | |
94 | .start = 0, | |
95 | .count = 1, | |
96 | }; | |
97 | ||
98 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
99 | } | |
100 | ||
201a7331 EA |
101 | static inline const char *action_to_str(int action) |
102 | { | |
103 | switch (action) { | |
104 | case VFIO_IRQ_SET_ACTION_MASK: | |
105 | return "MASK"; | |
106 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
107 | return "UNMASK"; | |
108 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
109 | return "TRIGGER"; | |
110 | default: | |
111 | return "UNKNOWN ACTION"; | |
112 | } | |
113 | } | |
114 | ||
115 | static const char *index_to_str(VFIODevice *vbasedev, int index) | |
116 | { | |
117 | if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { | |
118 | return NULL; | |
119 | } | |
120 | ||
121 | switch (index) { | |
122 | case VFIO_PCI_INTX_IRQ_INDEX: | |
123 | return "INTX"; | |
124 | case VFIO_PCI_MSI_IRQ_INDEX: | |
125 | return "MSI"; | |
126 | case VFIO_PCI_MSIX_IRQ_INDEX: | |
127 | return "MSIX"; | |
128 | case VFIO_PCI_ERR_IRQ_INDEX: | |
129 | return "ERR"; | |
130 | case VFIO_PCI_REQ_IRQ_INDEX: | |
131 | return "REQ"; | |
132 | default: | |
133 | return NULL; | |
134 | } | |
135 | } | |
136 | ||
137 | int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, | |
138 | int action, int fd, Error **errp) | |
139 | { | |
140 | struct vfio_irq_set *irq_set; | |
141 | int argsz, ret = 0; | |
142 | const char *name; | |
143 | int32_t *pfd; | |
144 | ||
145 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
146 | ||
147 | irq_set = g_malloc0(argsz); | |
148 | irq_set->argsz = argsz; | |
149 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; | |
150 | irq_set->index = index; | |
151 | irq_set->start = subindex; | |
152 | irq_set->count = 1; | |
153 | pfd = (int32_t *)&irq_set->data; | |
154 | *pfd = fd; | |
155 | ||
156 | if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) { | |
157 | ret = -errno; | |
158 | } | |
159 | g_free(irq_set); | |
160 | ||
161 | if (!ret) { | |
162 | return 0; | |
163 | } | |
164 | ||
165 | error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure"); | |
166 | ||
167 | name = index_to_str(vbasedev, index); | |
168 | if (name) { | |
169 | error_prepend(errp, "%s-%d: ", name, subindex); | |
170 | } else { | |
171 | error_prepend(errp, "index %d-%d: ", index, subindex); | |
172 | } | |
173 | error_prepend(errp, | |
174 | "Failed to %s %s eventfd signaling for interrupt ", | |
175 | fd < 0 ? "tear down" : "set up", action_to_str(action)); | |
176 | return ret; | |
177 | } | |
178 | ||
e2c7d025 EA |
179 | /* |
180 | * IO Port/MMIO - Beware of the endians, VFIO is always little endian | |
181 | */ | |
182 | void vfio_region_write(void *opaque, hwaddr addr, | |
183 | uint64_t data, unsigned size) | |
184 | { | |
185 | VFIORegion *region = opaque; | |
186 | VFIODevice *vbasedev = region->vbasedev; | |
187 | union { | |
188 | uint8_t byte; | |
189 | uint16_t word; | |
190 | uint32_t dword; | |
191 | uint64_t qword; | |
192 | } buf; | |
193 | ||
194 | switch (size) { | |
195 | case 1: | |
196 | buf.byte = data; | |
197 | break; | |
198 | case 2: | |
199 | buf.word = cpu_to_le16(data); | |
200 | break; | |
201 | case 4: | |
202 | buf.dword = cpu_to_le32(data); | |
203 | break; | |
38d49e8c JRZ |
204 | case 8: |
205 | buf.qword = cpu_to_le64(data); | |
206 | break; | |
e2c7d025 EA |
207 | default: |
208 | hw_error("vfio: unsupported write size, %d bytes", size); | |
209 | break; | |
210 | } | |
211 | ||
212 | if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { | |
213 | error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 | |
214 | ",%d) failed: %m", | |
215 | __func__, vbasedev->name, region->nr, | |
216 | addr, data, size); | |
217 | } | |
218 | ||
219 | trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); | |
220 | ||
221 | /* | |
222 | * A read or write to a BAR always signals an INTx EOI. This will | |
223 | * do nothing if not pending (including not in INTx mode). We assume | |
224 | * that a BAR access is in response to an interrupt and that BAR | |
225 | * accesses will service the interrupt. Unfortunately, we don't know | |
226 | * which access will service the interrupt, so we're potentially | |
227 | * getting quite a few host interrupts per guest interrupt. | |
228 | */ | |
229 | vbasedev->ops->vfio_eoi(vbasedev); | |
230 | } | |
231 | ||
232 | uint64_t vfio_region_read(void *opaque, | |
233 | hwaddr addr, unsigned size) | |
234 | { | |
235 | VFIORegion *region = opaque; | |
236 | VFIODevice *vbasedev = region->vbasedev; | |
237 | union { | |
238 | uint8_t byte; | |
239 | uint16_t word; | |
240 | uint32_t dword; | |
241 | uint64_t qword; | |
242 | } buf; | |
243 | uint64_t data = 0; | |
244 | ||
245 | if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { | |
246 | error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", | |
247 | __func__, vbasedev->name, region->nr, | |
248 | addr, size); | |
249 | return (uint64_t)-1; | |
250 | } | |
251 | switch (size) { | |
252 | case 1: | |
253 | data = buf.byte; | |
254 | break; | |
255 | case 2: | |
256 | data = le16_to_cpu(buf.word); | |
257 | break; | |
258 | case 4: | |
259 | data = le32_to_cpu(buf.dword); | |
260 | break; | |
38d49e8c JRZ |
261 | case 8: |
262 | data = le64_to_cpu(buf.qword); | |
263 | break; | |
e2c7d025 EA |
264 | default: |
265 | hw_error("vfio: unsupported read size, %d bytes", size); | |
266 | break; | |
267 | } | |
268 | ||
269 | trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); | |
270 | ||
271 | /* Same as write above */ | |
272 | vbasedev->ops->vfio_eoi(vbasedev); | |
273 | ||
274 | return data; | |
275 | } | |
276 | ||
277 | const MemoryRegionOps vfio_region_ops = { | |
278 | .read = vfio_region_read, | |
279 | .write = vfio_region_write, | |
280 | .endianness = DEVICE_LITTLE_ENDIAN, | |
15126cba JRZ |
281 | .valid = { |
282 | .min_access_size = 1, | |
283 | .max_access_size = 8, | |
284 | }, | |
38d49e8c JRZ |
285 | .impl = { |
286 | .min_access_size = 1, | |
287 | .max_access_size = 8, | |
288 | }, | |
e2c7d025 EA |
289 | }; |
290 | ||
b6dd6504 KW |
291 | /* |
292 | * Device state interfaces | |
293 | */ | |
294 | ||
3710586c KW |
295 | bool vfio_mig_active(void) |
296 | { | |
297 | VFIOGroup *group; | |
298 | VFIODevice *vbasedev; | |
299 | ||
300 | if (QLIST_EMPTY(&vfio_group_list)) { | |
301 | return false; | |
302 | } | |
303 | ||
304 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
305 | QLIST_FOREACH(vbasedev, &group->device_list, next) { | |
306 | if (vbasedev->migration_blocker) { | |
307 | return false; | |
308 | } | |
309 | } | |
310 | } | |
311 | return true; | |
312 | } | |
313 | ||
b6dd6504 KW |
314 | static bool vfio_devices_all_stopped_and_saving(VFIOContainer *container) |
315 | { | |
316 | VFIOGroup *group; | |
317 | VFIODevice *vbasedev; | |
318 | MigrationState *ms = migrate_get_current(); | |
319 | ||
320 | if (!migration_is_setup_or_active(ms->state)) { | |
321 | return false; | |
322 | } | |
323 | ||
324 | QLIST_FOREACH(group, &container->group_list, container_next) { | |
325 | QLIST_FOREACH(vbasedev, &group->device_list, next) { | |
326 | VFIOMigration *migration = vbasedev->migration; | |
327 | ||
328 | if (!migration) { | |
329 | return false; | |
330 | } | |
331 | ||
332 | if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) && | |
333 | !(migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { | |
334 | continue; | |
335 | } else { | |
336 | return false; | |
337 | } | |
338 | } | |
339 | } | |
340 | return true; | |
341 | } | |
342 | ||
9e7b0442 KW |
343 | static bool vfio_devices_all_running_and_saving(VFIOContainer *container) |
344 | { | |
345 | VFIOGroup *group; | |
346 | VFIODevice *vbasedev; | |
347 | MigrationState *ms = migrate_get_current(); | |
348 | ||
349 | if (!migration_is_setup_or_active(ms->state)) { | |
350 | return false; | |
351 | } | |
352 | ||
353 | QLIST_FOREACH(group, &container->group_list, container_next) { | |
354 | QLIST_FOREACH(vbasedev, &group->device_list, next) { | |
355 | VFIOMigration *migration = vbasedev->migration; | |
356 | ||
357 | if (!migration) { | |
358 | return false; | |
359 | } | |
360 | ||
361 | if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) && | |
362 | (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { | |
363 | continue; | |
364 | } else { | |
365 | return false; | |
366 | } | |
367 | } | |
368 | } | |
369 | return true; | |
370 | } | |
371 | ||
372 | static int vfio_dma_unmap_bitmap(VFIOContainer *container, | |
373 | hwaddr iova, ram_addr_t size, | |
374 | IOMMUTLBEntry *iotlb) | |
375 | { | |
376 | struct vfio_iommu_type1_dma_unmap *unmap; | |
377 | struct vfio_bitmap *bitmap; | |
378 | uint64_t pages = TARGET_PAGE_ALIGN(size) >> TARGET_PAGE_BITS; | |
379 | int ret; | |
380 | ||
381 | unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); | |
382 | ||
383 | unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); | |
384 | unmap->iova = iova; | |
385 | unmap->size = size; | |
386 | unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; | |
387 | bitmap = (struct vfio_bitmap *)&unmap->data; | |
388 | ||
389 | /* | |
390 | * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of | |
391 | * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap_pgsize to | |
392 | * TARGET_PAGE_SIZE. | |
393 | */ | |
394 | ||
395 | bitmap->pgsize = TARGET_PAGE_SIZE; | |
396 | bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / | |
397 | BITS_PER_BYTE; | |
398 | ||
399 | if (bitmap->size > container->max_dirty_bitmap_size) { | |
400 | error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, | |
401 | (uint64_t)bitmap->size); | |
402 | ret = -E2BIG; | |
403 | goto unmap_exit; | |
404 | } | |
405 | ||
406 | bitmap->data = g_try_malloc0(bitmap->size); | |
407 | if (!bitmap->data) { | |
408 | ret = -ENOMEM; | |
409 | goto unmap_exit; | |
410 | } | |
411 | ||
412 | ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); | |
413 | if (!ret) { | |
414 | cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data, | |
415 | iotlb->translated_addr, pages); | |
416 | } else { | |
417 | error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); | |
418 | } | |
419 | ||
420 | g_free(bitmap->data); | |
421 | unmap_exit: | |
422 | g_free(unmap); | |
423 | return ret; | |
424 | } | |
425 | ||
e2c7d025 EA |
426 | /* |
427 | * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 | |
428 | */ | |
429 | static int vfio_dma_unmap(VFIOContainer *container, | |
9e7b0442 KW |
430 | hwaddr iova, ram_addr_t size, |
431 | IOMMUTLBEntry *iotlb) | |
e2c7d025 EA |
432 | { |
433 | struct vfio_iommu_type1_dma_unmap unmap = { | |
434 | .argsz = sizeof(unmap), | |
435 | .flags = 0, | |
436 | .iova = iova, | |
437 | .size = size, | |
438 | }; | |
439 | ||
9e7b0442 KW |
440 | if (iotlb && container->dirty_pages_supported && |
441 | vfio_devices_all_running_and_saving(container)) { | |
442 | return vfio_dma_unmap_bitmap(container, iova, size, iotlb); | |
443 | } | |
444 | ||
567d7d3e AW |
445 | while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { |
446 | /* | |
447 | * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c | |
448 | * v4.15) where an overflow in its wrap-around check prevents us from | |
449 | * unmapping the last page of the address space. Test for the error | |
450 | * condition and re-try the unmap excluding the last page. The | |
451 | * expectation is that we've never mapped the last page anyway and this | |
452 | * unmap request comes via vIOMMU support which also makes it unlikely | |
453 | * that this page is used. This bug was introduced well after type1 v2 | |
454 | * support was introduced, so we shouldn't need to test for v1. A fix | |
455 | * is queued for kernel v5.0 so this workaround can be removed once | |
456 | * affected kernels are sufficiently deprecated. | |
457 | */ | |
458 | if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && | |
459 | container->iommu_type == VFIO_TYPE1v2_IOMMU) { | |
460 | trace_vfio_dma_unmap_overflow_workaround(); | |
461 | unmap.size -= 1ULL << ctz64(container->pgsizes); | |
462 | continue; | |
463 | } | |
b09d51c9 | 464 | error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); |
e2c7d025 EA |
465 | return -errno; |
466 | } | |
467 | ||
468 | return 0; | |
469 | } | |
470 | ||
471 | static int vfio_dma_map(VFIOContainer *container, hwaddr iova, | |
472 | ram_addr_t size, void *vaddr, bool readonly) | |
473 | { | |
474 | struct vfio_iommu_type1_dma_map map = { | |
475 | .argsz = sizeof(map), | |
476 | .flags = VFIO_DMA_MAP_FLAG_READ, | |
477 | .vaddr = (__u64)(uintptr_t)vaddr, | |
478 | .iova = iova, | |
479 | .size = size, | |
480 | }; | |
481 | ||
482 | if (!readonly) { | |
483 | map.flags |= VFIO_DMA_MAP_FLAG_WRITE; | |
484 | } | |
485 | ||
486 | /* | |
487 | * Try the mapping, if it fails with EBUSY, unmap the region and try | |
488 | * again. This shouldn't be necessary, but we sometimes see it in | |
b6af0975 | 489 | * the VGA ROM space. |
e2c7d025 EA |
490 | */ |
491 | if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || | |
9e7b0442 | 492 | (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 && |
e2c7d025 EA |
493 | ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { |
494 | return 0; | |
495 | } | |
496 | ||
b09d51c9 | 497 | error_report("VFIO_MAP_DMA failed: %s", strerror(errno)); |
e2c7d025 EA |
498 | return -errno; |
499 | } | |
500 | ||
f4ec5e26 AK |
501 | static void vfio_host_win_add(VFIOContainer *container, |
502 | hwaddr min_iova, hwaddr max_iova, | |
503 | uint64_t iova_pgsizes) | |
504 | { | |
505 | VFIOHostDMAWindow *hostwin; | |
506 | ||
507 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { | |
508 | if (ranges_overlap(hostwin->min_iova, | |
509 | hostwin->max_iova - hostwin->min_iova + 1, | |
510 | min_iova, | |
511 | max_iova - min_iova + 1)) { | |
512 | hw_error("%s: Overlapped IOMMU are not enabled", __func__); | |
513 | } | |
514 | } | |
515 | ||
516 | hostwin = g_malloc0(sizeof(*hostwin)); | |
517 | ||
518 | hostwin->min_iova = min_iova; | |
519 | hostwin->max_iova = max_iova; | |
520 | hostwin->iova_pgsizes = iova_pgsizes; | |
521 | QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); | |
522 | } | |
523 | ||
2e4109de AK |
524 | static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, |
525 | hwaddr max_iova) | |
526 | { | |
527 | VFIOHostDMAWindow *hostwin; | |
528 | ||
529 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { | |
530 | if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { | |
531 | QLIST_REMOVE(hostwin, hostwin_next); | |
532 | return 0; | |
533 | } | |
534 | } | |
535 | ||
536 | return -1; | |
537 | } | |
538 | ||
e2c7d025 EA |
539 | static bool vfio_listener_skipped_section(MemoryRegionSection *section) |
540 | { | |
541 | return (!memory_region_is_ram(section->mr) && | |
542 | !memory_region_is_iommu(section->mr)) || | |
543 | /* | |
544 | * Sizing an enabled 64-bit BAR can cause spurious mappings to | |
545 | * addresses in the upper part of the 64-bit address space. These | |
546 | * are never accessed by the CPU and beyond the address width of | |
547 | * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. | |
548 | */ | |
549 | section->offset_within_address_space & (1ULL << 63); | |
550 | } | |
551 | ||
4a4b88fb | 552 | /* Called with rcu_read_lock held. */ |
9a04fe09 KW |
553 | static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, |
554 | ram_addr_t *ram_addr, bool *read_only) | |
e2c7d025 | 555 | { |
e2c7d025 EA |
556 | MemoryRegion *mr; |
557 | hwaddr xlat; | |
558 | hwaddr len = iotlb->addr_mask + 1; | |
4a4b88fb | 559 | bool writable = iotlb->perm & IOMMU_WO; |
f1f93650 | 560 | |
e2c7d025 EA |
561 | /* |
562 | * The IOMMU TLB entry we have just covers translation through | |
563 | * this IOMMU to its immediate target. We need to translate | |
564 | * it the rest of the way through to memory. | |
565 | */ | |
566 | mr = address_space_translate(&address_space_memory, | |
567 | iotlb->translated_addr, | |
bc6b1cec PM |
568 | &xlat, &len, writable, |
569 | MEMTXATTRS_UNSPECIFIED); | |
e2c7d025 | 570 | if (!memory_region_is_ram(mr)) { |
78e5b17f | 571 | error_report("iommu map to non memory area %"HWADDR_PRIx"", |
e2c7d025 | 572 | xlat); |
4a4b88fb | 573 | return false; |
e2c7d025 | 574 | } |
4a4b88fb | 575 | |
e2c7d025 EA |
576 | /* |
577 | * Translation truncates length to the IOMMU page size, | |
578 | * check that it did not truncate too much. | |
579 | */ | |
580 | if (len & iotlb->addr_mask) { | |
78e5b17f | 581 | error_report("iommu has granularity incompatible with target AS"); |
4a4b88fb PX |
582 | return false; |
583 | } | |
584 | ||
9a04fe09 KW |
585 | if (vaddr) { |
586 | *vaddr = memory_region_get_ram_ptr(mr) + xlat; | |
587 | } | |
588 | ||
589 | if (ram_addr) { | |
590 | *ram_addr = memory_region_get_ram_addr(mr) + xlat; | |
591 | } | |
592 | ||
593 | if (read_only) { | |
594 | *read_only = !writable || mr->readonly; | |
595 | } | |
4a4b88fb PX |
596 | |
597 | return true; | |
598 | } | |
599 | ||
600 | static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) | |
601 | { | |
602 | VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); | |
603 | VFIOContainer *container = giommu->container; | |
604 | hwaddr iova = iotlb->iova + giommu->iommu_offset; | |
4a4b88fb PX |
605 | void *vaddr; |
606 | int ret; | |
607 | ||
608 | trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP", | |
609 | iova, iova + iotlb->addr_mask); | |
610 | ||
611 | if (iotlb->target_as != &address_space_memory) { | |
612 | error_report("Wrong target AS \"%s\", only system memory is allowed", | |
613 | iotlb->target_as->name ? iotlb->target_as->name : "none"); | |
614 | return; | |
615 | } | |
616 | ||
617 | rcu_read_lock(); | |
618 | ||
e2c7d025 | 619 | if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { |
9a04fe09 KW |
620 | bool read_only; |
621 | ||
622 | if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { | |
dfbd90e5 PX |
623 | goto out; |
624 | } | |
4a4b88fb PX |
625 | /* |
626 | * vaddr is only valid until rcu_read_unlock(). But after | |
627 | * vfio_dma_map has set up the mapping the pages will be | |
628 | * pinned by the kernel. This makes sure that the RAM backend | |
629 | * of vaddr will always be there, even if the memory object is | |
630 | * destroyed and its backing memory munmap-ed. | |
631 | */ | |
d78c19b5 | 632 | ret = vfio_dma_map(container, iova, |
e2c7d025 | 633 | iotlb->addr_mask + 1, vaddr, |
4a4b88fb | 634 | read_only); |
e2c7d025 EA |
635 | if (ret) { |
636 | error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " | |
637 | "0x%"HWADDR_PRIx", %p) = %d (%m)", | |
d78c19b5 | 638 | container, iova, |
e2c7d025 EA |
639 | iotlb->addr_mask + 1, vaddr, ret); |
640 | } | |
641 | } else { | |
9e7b0442 | 642 | ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb); |
e2c7d025 EA |
643 | if (ret) { |
644 | error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " | |
645 | "0x%"HWADDR_PRIx") = %d (%m)", | |
d78c19b5 | 646 | container, iova, |
e2c7d025 EA |
647 | iotlb->addr_mask + 1, ret); |
648 | } | |
649 | } | |
41063e1e PB |
650 | out: |
651 | rcu_read_unlock(); | |
e2c7d025 EA |
652 | } |
653 | ||
654 | static void vfio_listener_region_add(MemoryListener *listener, | |
655 | MemoryRegionSection *section) | |
656 | { | |
ee0bf0e5 | 657 | VFIOContainer *container = container_of(listener, VFIOContainer, listener); |
e2c7d025 | 658 | hwaddr iova, end; |
55efcc53 | 659 | Int128 llend, llsize; |
e2c7d025 EA |
660 | void *vaddr; |
661 | int ret; | |
f4ec5e26 AK |
662 | VFIOHostDMAWindow *hostwin; |
663 | bool hostwin_found; | |
d7d87836 | 664 | Error *err = NULL; |
e2c7d025 EA |
665 | |
666 | if (vfio_listener_skipped_section(section)) { | |
667 | trace_vfio_listener_region_add_skip( | |
668 | section->offset_within_address_space, | |
669 | section->offset_within_address_space + | |
670 | int128_get64(int128_sub(section->size, int128_one()))); | |
671 | return; | |
672 | } | |
673 | ||
674 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != | |
675 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { | |
676 | error_report("%s received unaligned region", __func__); | |
677 | return; | |
678 | } | |
679 | ||
680 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); | |
681 | llend = int128_make64(section->offset_within_address_space); | |
682 | llend = int128_add(llend, section->size); | |
683 | llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); | |
684 | ||
685 | if (int128_ge(int128_make64(iova), llend)) { | |
686 | return; | |
687 | } | |
55efcc53 | 688 | end = int128_get64(int128_sub(llend, int128_one())); |
3898aad3 | 689 | |
2e4109de | 690 | if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { |
2e4109de AK |
691 | hwaddr pgsize = 0; |
692 | ||
693 | /* For now intersections are not allowed, we may relax this later */ | |
694 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { | |
695 | if (ranges_overlap(hostwin->min_iova, | |
696 | hostwin->max_iova - hostwin->min_iova + 1, | |
697 | section->offset_within_address_space, | |
698 | int128_get64(section->size))) { | |
d7d87836 EA |
699 | error_setg(&err, |
700 | "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing" | |
701 | "host DMA window [0x%"PRIx64",0x%"PRIx64"]", | |
702 | section->offset_within_address_space, | |
703 | section->offset_within_address_space + | |
704 | int128_get64(section->size) - 1, | |
705 | hostwin->min_iova, hostwin->max_iova); | |
2e4109de AK |
706 | goto fail; |
707 | } | |
708 | } | |
709 | ||
710 | ret = vfio_spapr_create_window(container, section, &pgsize); | |
711 | if (ret) { | |
d7d87836 | 712 | error_setg_errno(&err, -ret, "Failed to create SPAPR window"); |
2e4109de AK |
713 | goto fail; |
714 | } | |
715 | ||
716 | vfio_host_win_add(container, section->offset_within_address_space, | |
717 | section->offset_within_address_space + | |
718 | int128_get64(section->size) - 1, pgsize); | |
07bc681a AK |
719 | #ifdef CONFIG_KVM |
720 | if (kvm_enabled()) { | |
721 | VFIOGroup *group; | |
722 | IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); | |
723 | struct kvm_vfio_spapr_tce param; | |
724 | struct kvm_device_attr attr = { | |
725 | .group = KVM_DEV_VFIO_GROUP, | |
726 | .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE, | |
727 | .addr = (uint64_t)(unsigned long)¶m, | |
728 | }; | |
729 | ||
730 | if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD, | |
731 | ¶m.tablefd)) { | |
732 | QLIST_FOREACH(group, &container->group_list, container_next) { | |
733 | param.groupfd = group->fd; | |
734 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { | |
735 | error_report("vfio: failed to setup fd %d " | |
736 | "for a group with fd %d: %s", | |
737 | param.tablefd, param.groupfd, | |
738 | strerror(errno)); | |
739 | return; | |
740 | } | |
741 | trace_vfio_spapr_group_attach(param.groupfd, param.tablefd); | |
742 | } | |
743 | } | |
744 | } | |
745 | #endif | |
2e4109de AK |
746 | } |
747 | ||
f4ec5e26 AK |
748 | hostwin_found = false; |
749 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { | |
750 | if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { | |
751 | hostwin_found = true; | |
752 | break; | |
753 | } | |
754 | } | |
755 | ||
756 | if (!hostwin_found) { | |
d7d87836 EA |
757 | error_setg(&err, "Container %p can't map guest IOVA region" |
758 | " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end); | |
3898aad3 DG |
759 | goto fail; |
760 | } | |
e2c7d025 EA |
761 | |
762 | memory_region_ref(section->mr); | |
763 | ||
764 | if (memory_region_is_iommu(section->mr)) { | |
765 | VFIOGuestIOMMU *giommu; | |
3df9d748 | 766 | IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); |
cb1efcf4 | 767 | int iommu_idx; |
e2c7d025 | 768 | |
55efcc53 | 769 | trace_vfio_listener_region_add_iommu(iova, end); |
e2c7d025 | 770 | /* |
e2c7d025 EA |
771 | * FIXME: For VFIO iommu types which have KVM acceleration to |
772 | * avoid bouncing all map/unmaps through qemu this way, this | |
773 | * would be the right place to wire that up (tell the KVM | |
774 | * device emulation the VFIO iommu handles to use). | |
775 | */ | |
e2c7d025 | 776 | giommu = g_malloc0(sizeof(*giommu)); |
3df9d748 | 777 | giommu->iommu = iommu_mr; |
d78c19b5 AK |
778 | giommu->iommu_offset = section->offset_within_address_space - |
779 | section->offset_within_region; | |
e2c7d025 | 780 | giommu->container = container; |
698feb5e PX |
781 | llend = int128_add(int128_make64(section->offset_within_region), |
782 | section->size); | |
783 | llend = int128_sub(llend, int128_one()); | |
cb1efcf4 PM |
784 | iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, |
785 | MEMTXATTRS_UNSPECIFIED); | |
698feb5e PX |
786 | iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, |
787 | IOMMU_NOTIFIER_ALL, | |
788 | section->offset_within_region, | |
cb1efcf4 PM |
789 | int128_get64(llend), |
790 | iommu_idx); | |
508ce5eb | 791 | |
549d4005 EA |
792 | ret = memory_region_register_iommu_notifier(section->mr, &giommu->n, |
793 | &err); | |
794 | if (ret) { | |
795 | g_free(giommu); | |
796 | goto fail; | |
797 | } | |
798 | QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); | |
ad523590 | 799 | memory_region_iommu_replay(giommu->iommu, &giommu->n); |
e2c7d025 EA |
800 | |
801 | return; | |
802 | } | |
803 | ||
804 | /* Here we assume that memory_region_is_ram(section->mr)==true */ | |
805 | ||
e2c7d025 EA |
806 | vaddr = memory_region_get_ram_ptr(section->mr) + |
807 | section->offset_within_region + | |
808 | (iova - section->offset_within_address_space); | |
809 | ||
55efcc53 | 810 | trace_vfio_listener_region_add_ram(iova, end, vaddr); |
e2c7d025 | 811 | |
55efcc53 BD |
812 | llsize = int128_sub(llend, int128_make64(iova)); |
813 | ||
567b5b30 AK |
814 | if (memory_region_is_ram_device(section->mr)) { |
815 | hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; | |
816 | ||
817 | if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { | |
5c086005 EA |
818 | trace_vfio_listener_region_add_no_dma_map( |
819 | memory_region_name(section->mr), | |
820 | section->offset_within_address_space, | |
821 | int128_getlo(section->size), | |
822 | pgmask + 1); | |
567b5b30 AK |
823 | return; |
824 | } | |
825 | } | |
826 | ||
55efcc53 BD |
827 | ret = vfio_dma_map(container, iova, int128_get64(llsize), |
828 | vaddr, section->readonly); | |
e2c7d025 | 829 | if (ret) { |
d7d87836 EA |
830 | error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", " |
831 | "0x%"HWADDR_PRIx", %p) = %d (%m)", | |
832 | container, iova, int128_get64(llsize), vaddr, ret); | |
567b5b30 AK |
833 | if (memory_region_is_ram_device(section->mr)) { |
834 | /* Allow unexpected mappings not to be fatal for RAM devices */ | |
d7d87836 | 835 | error_report_err(err); |
567b5b30 AK |
836 | return; |
837 | } | |
ac6dc389 DG |
838 | goto fail; |
839 | } | |
e2c7d025 | 840 | |
ac6dc389 DG |
841 | return; |
842 | ||
843 | fail: | |
567b5b30 AK |
844 | if (memory_region_is_ram_device(section->mr)) { |
845 | error_report("failed to vfio_dma_map. pci p2p may not work"); | |
846 | return; | |
847 | } | |
ac6dc389 DG |
848 | /* |
849 | * On the initfn path, store the first error in the container so we | |
850 | * can gracefully fail. Runtime, there's not much we can do other | |
851 | * than throw a hardware error. | |
852 | */ | |
853 | if (!container->initialized) { | |
854 | if (!container->error) { | |
d7d87836 EA |
855 | error_propagate_prepend(&container->error, err, |
856 | "Region %s: ", | |
857 | memory_region_name(section->mr)); | |
858 | } else { | |
859 | error_free(err); | |
e2c7d025 | 860 | } |
ac6dc389 | 861 | } else { |
d7d87836 | 862 | error_report_err(err); |
ac6dc389 | 863 | hw_error("vfio: DMA mapping failed, unable to continue"); |
e2c7d025 EA |
864 | } |
865 | } | |
866 | ||
867 | static void vfio_listener_region_del(MemoryListener *listener, | |
868 | MemoryRegionSection *section) | |
869 | { | |
ee0bf0e5 | 870 | VFIOContainer *container = container_of(listener, VFIOContainer, listener); |
e2c7d025 | 871 | hwaddr iova, end; |
7a057b4f | 872 | Int128 llend, llsize; |
e2c7d025 | 873 | int ret; |
567b5b30 | 874 | bool try_unmap = true; |
e2c7d025 EA |
875 | |
876 | if (vfio_listener_skipped_section(section)) { | |
877 | trace_vfio_listener_region_del_skip( | |
878 | section->offset_within_address_space, | |
879 | section->offset_within_address_space + | |
880 | int128_get64(int128_sub(section->size, int128_one()))); | |
881 | return; | |
882 | } | |
883 | ||
884 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != | |
885 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { | |
886 | error_report("%s received unaligned region", __func__); | |
887 | return; | |
888 | } | |
889 | ||
890 | if (memory_region_is_iommu(section->mr)) { | |
891 | VFIOGuestIOMMU *giommu; | |
892 | ||
893 | QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { | |
3df9d748 | 894 | if (MEMORY_REGION(giommu->iommu) == section->mr && |
698feb5e | 895 | giommu->n.start == section->offset_within_region) { |
3df9d748 | 896 | memory_region_unregister_iommu_notifier(section->mr, |
d22d8956 | 897 | &giommu->n); |
e2c7d025 EA |
898 | QLIST_REMOVE(giommu, giommu_next); |
899 | g_free(giommu); | |
900 | break; | |
901 | } | |
902 | } | |
903 | ||
904 | /* | |
905 | * FIXME: We assume the one big unmap below is adequate to | |
906 | * remove any individual page mappings in the IOMMU which | |
907 | * might have been copied into VFIO. This works for a page table | |
908 | * based IOMMU where a big unmap flattens a large range of IO-PTEs. | |
909 | * That may not be true for all IOMMU types. | |
910 | */ | |
911 | } | |
912 | ||
913 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); | |
7a057b4f AK |
914 | llend = int128_make64(section->offset_within_address_space); |
915 | llend = int128_add(llend, section->size); | |
916 | llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); | |
e2c7d025 | 917 | |
7a057b4f | 918 | if (int128_ge(int128_make64(iova), llend)) { |
e2c7d025 EA |
919 | return; |
920 | } | |
7a057b4f AK |
921 | end = int128_get64(int128_sub(llend, int128_one())); |
922 | ||
923 | llsize = int128_sub(llend, int128_make64(iova)); | |
e2c7d025 | 924 | |
7a057b4f | 925 | trace_vfio_listener_region_del(iova, end); |
e2c7d025 | 926 | |
567b5b30 AK |
927 | if (memory_region_is_ram_device(section->mr)) { |
928 | hwaddr pgmask; | |
929 | VFIOHostDMAWindow *hostwin; | |
930 | bool hostwin_found = false; | |
931 | ||
932 | QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { | |
933 | if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { | |
934 | hostwin_found = true; | |
935 | break; | |
936 | } | |
937 | } | |
938 | assert(hostwin_found); /* or region_add() would have failed */ | |
939 | ||
940 | pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; | |
941 | try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); | |
e2c7d025 | 942 | } |
2e4109de | 943 | |
567b5b30 | 944 | if (try_unmap) { |
9e7b0442 | 945 | ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); |
567b5b30 AK |
946 | if (ret) { |
947 | error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " | |
948 | "0x%"HWADDR_PRIx") = %d (%m)", | |
949 | container, iova, int128_get64(llsize), ret); | |
950 | } | |
951 | } | |
952 | ||
953 | memory_region_unref(section->mr); | |
954 | ||
2e4109de AK |
955 | if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { |
956 | vfio_spapr_remove_window(container, | |
957 | section->offset_within_address_space); | |
958 | if (vfio_host_win_del(container, | |
959 | section->offset_within_address_space, | |
960 | section->offset_within_address_space + | |
961 | int128_get64(section->size) - 1) < 0) { | |
962 | hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx, | |
963 | __func__, section->offset_within_address_space); | |
964 | } | |
965 | } | |
e2c7d025 EA |
966 | } |
967 | ||
b6dd6504 KW |
968 | static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, |
969 | uint64_t size, ram_addr_t ram_addr) | |
970 | { | |
971 | struct vfio_iommu_type1_dirty_bitmap *dbitmap; | |
972 | struct vfio_iommu_type1_dirty_bitmap_get *range; | |
973 | uint64_t pages; | |
974 | int ret; | |
975 | ||
976 | dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); | |
977 | ||
978 | dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); | |
979 | dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; | |
980 | range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; | |
981 | range->iova = iova; | |
982 | range->size = size; | |
983 | ||
984 | /* | |
985 | * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of | |
986 | * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap's pgsize to | |
987 | * TARGET_PAGE_SIZE. | |
988 | */ | |
989 | range->bitmap.pgsize = TARGET_PAGE_SIZE; | |
990 | ||
991 | pages = TARGET_PAGE_ALIGN(range->size) >> TARGET_PAGE_BITS; | |
992 | range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / | |
993 | BITS_PER_BYTE; | |
994 | range->bitmap.data = g_try_malloc0(range->bitmap.size); | |
995 | if (!range->bitmap.data) { | |
996 | ret = -ENOMEM; | |
997 | goto err_out; | |
998 | } | |
999 | ||
1000 | ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); | |
1001 | if (ret) { | |
1002 | error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64 | |
1003 | " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova, | |
1004 | (uint64_t)range->size, errno); | |
1005 | goto err_out; | |
1006 | } | |
1007 | ||
1008 | cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data, | |
1009 | ram_addr, pages); | |
1010 | ||
1011 | trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size, | |
1012 | range->bitmap.size, ram_addr); | |
1013 | err_out: | |
1014 | g_free(range->bitmap.data); | |
1015 | g_free(dbitmap); | |
1016 | ||
1017 | return ret; | |
1018 | } | |
1019 | ||
9a04fe09 KW |
1020 | typedef struct { |
1021 | IOMMUNotifier n; | |
1022 | VFIOGuestIOMMU *giommu; | |
1023 | } vfio_giommu_dirty_notifier; | |
1024 | ||
1025 | static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) | |
1026 | { | |
1027 | vfio_giommu_dirty_notifier *gdn = container_of(n, | |
1028 | vfio_giommu_dirty_notifier, n); | |
1029 | VFIOGuestIOMMU *giommu = gdn->giommu; | |
1030 | VFIOContainer *container = giommu->container; | |
1031 | hwaddr iova = iotlb->iova + giommu->iommu_offset; | |
1032 | ram_addr_t translated_addr; | |
1033 | ||
1034 | trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); | |
1035 | ||
1036 | if (iotlb->target_as != &address_space_memory) { | |
1037 | error_report("Wrong target AS \"%s\", only system memory is allowed", | |
1038 | iotlb->target_as->name ? iotlb->target_as->name : "none"); | |
1039 | return; | |
1040 | } | |
1041 | ||
1042 | rcu_read_lock(); | |
1043 | if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { | |
1044 | int ret; | |
1045 | ||
1046 | ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, | |
1047 | translated_addr); | |
1048 | if (ret) { | |
1049 | error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " | |
1050 | "0x%"HWADDR_PRIx") = %d (%m)", | |
1051 | container, iova, | |
1052 | iotlb->addr_mask + 1, ret); | |
1053 | } | |
1054 | } | |
1055 | rcu_read_unlock(); | |
1056 | } | |
1057 | ||
b6dd6504 KW |
1058 | static int vfio_sync_dirty_bitmap(VFIOContainer *container, |
1059 | MemoryRegionSection *section) | |
1060 | { | |
1061 | ram_addr_t ram_addr; | |
1062 | ||
9a04fe09 KW |
1063 | if (memory_region_is_iommu(section->mr)) { |
1064 | VFIOGuestIOMMU *giommu; | |
1065 | ||
1066 | QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { | |
1067 | if (MEMORY_REGION(giommu->iommu) == section->mr && | |
1068 | giommu->n.start == section->offset_within_region) { | |
1069 | Int128 llend; | |
1070 | vfio_giommu_dirty_notifier gdn = { .giommu = giommu }; | |
1071 | int idx = memory_region_iommu_attrs_to_index(giommu->iommu, | |
1072 | MEMTXATTRS_UNSPECIFIED); | |
1073 | ||
1074 | llend = int128_add(int128_make64(section->offset_within_region), | |
1075 | section->size); | |
1076 | llend = int128_sub(llend, int128_one()); | |
1077 | ||
1078 | iommu_notifier_init(&gdn.n, | |
1079 | vfio_iommu_map_dirty_notify, | |
1080 | IOMMU_NOTIFIER_MAP, | |
1081 | section->offset_within_region, | |
1082 | int128_get64(llend), | |
1083 | idx); | |
1084 | memory_region_iommu_replay(giommu->iommu, &gdn.n); | |
1085 | break; | |
1086 | } | |
1087 | } | |
1088 | return 0; | |
1089 | } | |
1090 | ||
b6dd6504 KW |
1091 | ram_addr = memory_region_get_ram_addr(section->mr) + |
1092 | section->offset_within_region; | |
1093 | ||
1094 | return vfio_get_dirty_bitmap(container, | |
1095 | TARGET_PAGE_ALIGN(section->offset_within_address_space), | |
1096 | int128_get64(section->size), ram_addr); | |
1097 | } | |
1098 | ||
1099 | static void vfio_listerner_log_sync(MemoryListener *listener, | |
1100 | MemoryRegionSection *section) | |
1101 | { | |
1102 | VFIOContainer *container = container_of(listener, VFIOContainer, listener); | |
1103 | ||
1104 | if (vfio_listener_skipped_section(section) || | |
1105 | !container->dirty_pages_supported) { | |
1106 | return; | |
1107 | } | |
1108 | ||
1109 | if (vfio_devices_all_stopped_and_saving(container)) { | |
1110 | vfio_sync_dirty_bitmap(container, section); | |
1111 | } | |
1112 | } | |
1113 | ||
51b833f4 | 1114 | static const MemoryListener vfio_memory_listener = { |
e2c7d025 EA |
1115 | .region_add = vfio_listener_region_add, |
1116 | .region_del = vfio_listener_region_del, | |
b6dd6504 | 1117 | .log_sync = vfio_listerner_log_sync, |
e2c7d025 EA |
1118 | }; |
1119 | ||
51b833f4 | 1120 | static void vfio_listener_release(VFIOContainer *container) |
e2c7d025 | 1121 | { |
ee0bf0e5 | 1122 | memory_listener_unregister(&container->listener); |
318f67ce AK |
1123 | if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { |
1124 | memory_listener_unregister(&container->prereg_listener); | |
1125 | } | |
e2c7d025 EA |
1126 | } |
1127 | ||
3ab7a0b4 MR |
1128 | static struct vfio_info_cap_header * |
1129 | vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id) | |
b53b0f69 AW |
1130 | { |
1131 | struct vfio_info_cap_header *hdr; | |
b53b0f69 | 1132 | |
3ab7a0b4 | 1133 | for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) { |
b53b0f69 AW |
1134 | if (hdr->id == id) { |
1135 | return hdr; | |
1136 | } | |
1137 | } | |
1138 | ||
1139 | return NULL; | |
1140 | } | |
1141 | ||
3ab7a0b4 MR |
1142 | struct vfio_info_cap_header * |
1143 | vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) | |
1144 | { | |
1145 | if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { | |
1146 | return NULL; | |
1147 | } | |
1148 | ||
1149 | return vfio_get_cap((void *)info, info->cap_offset, id); | |
1150 | } | |
1151 | ||
7486a628 MR |
1152 | static struct vfio_info_cap_header * |
1153 | vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) | |
1154 | { | |
1155 | if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { | |
1156 | return NULL; | |
1157 | } | |
1158 | ||
1159 | return vfio_get_cap((void *)info, info->cap_offset, id); | |
1160 | } | |
1161 | ||
92fe289a MR |
1162 | struct vfio_info_cap_header * |
1163 | vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id) | |
1164 | { | |
1165 | if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) { | |
1166 | return NULL; | |
1167 | } | |
1168 | ||
1169 | return vfio_get_cap((void *)info, info->cap_offset, id); | |
1170 | } | |
1171 | ||
7486a628 MR |
1172 | bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, |
1173 | unsigned int *avail) | |
1174 | { | |
1175 | struct vfio_info_cap_header *hdr; | |
1176 | struct vfio_iommu_type1_info_dma_avail *cap; | |
1177 | ||
1178 | /* If the capability cannot be found, assume no DMA limiting */ | |
1179 | hdr = vfio_get_iommu_type1_info_cap(info, | |
1180 | VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL); | |
1181 | if (hdr == NULL) { | |
1182 | return false; | |
1183 | } | |
1184 | ||
1185 | if (avail != NULL) { | |
1186 | cap = (void *) hdr; | |
1187 | *avail = cap->avail; | |
1188 | } | |
1189 | ||
1190 | return true; | |
1191 | } | |
1192 | ||
24acf72b AW |
1193 | static int vfio_setup_region_sparse_mmaps(VFIORegion *region, |
1194 | struct vfio_region_info *info) | |
b53b0f69 AW |
1195 | { |
1196 | struct vfio_info_cap_header *hdr; | |
1197 | struct vfio_region_info_cap_sparse_mmap *sparse; | |
24acf72b | 1198 | int i, j; |
b53b0f69 AW |
1199 | |
1200 | hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); | |
1201 | if (!hdr) { | |
24acf72b | 1202 | return -ENODEV; |
b53b0f69 AW |
1203 | } |
1204 | ||
1205 | sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); | |
1206 | ||
1207 | trace_vfio_region_sparse_mmap_header(region->vbasedev->name, | |
1208 | region->nr, sparse->nr_areas); | |
1209 | ||
24acf72b AW |
1210 | region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); |
1211 | ||
1212 | for (i = 0, j = 0; i < sparse->nr_areas; i++) { | |
1213 | trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, | |
1214 | sparse->areas[i].offset + | |
1215 | sparse->areas[i].size); | |
b53b0f69 | 1216 | |
24acf72b AW |
1217 | if (sparse->areas[i].size) { |
1218 | region->mmaps[j].offset = sparse->areas[i].offset; | |
1219 | region->mmaps[j].size = sparse->areas[i].size; | |
1220 | j++; | |
1221 | } | |
b53b0f69 | 1222 | } |
24acf72b AW |
1223 | |
1224 | region->nr_mmaps = j; | |
1225 | region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); | |
1226 | ||
1227 | return 0; | |
b53b0f69 AW |
1228 | } |
1229 | ||
db0da029 AW |
1230 | int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, |
1231 | int index, const char *name) | |
e2c7d025 | 1232 | { |
db0da029 AW |
1233 | struct vfio_region_info *info; |
1234 | int ret; | |
1235 | ||
1236 | ret = vfio_get_region_info(vbasedev, index, &info); | |
1237 | if (ret) { | |
1238 | return ret; | |
1239 | } | |
1240 | ||
1241 | region->vbasedev = vbasedev; | |
1242 | region->flags = info->flags; | |
1243 | region->size = info->size; | |
1244 | region->fd_offset = info->offset; | |
1245 | region->nr = index; | |
1246 | ||
1247 | if (region->size) { | |
1248 | region->mem = g_new0(MemoryRegion, 1); | |
1249 | memory_region_init_io(region->mem, obj, &vfio_region_ops, | |
1250 | region, name, region->size); | |
e2c7d025 | 1251 | |
db0da029 | 1252 | if (!vbasedev->no_mmap && |
95251725 | 1253 | region->flags & VFIO_REGION_INFO_FLAG_MMAP) { |
e2c7d025 | 1254 | |
24acf72b | 1255 | ret = vfio_setup_region_sparse_mmaps(region, info); |
db0da029 | 1256 | |
24acf72b | 1257 | if (ret) { |
b53b0f69 AW |
1258 | region->nr_mmaps = 1; |
1259 | region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); | |
1260 | region->mmaps[0].offset = 0; | |
1261 | region->mmaps[0].size = region->size; | |
1262 | } | |
e2c7d025 | 1263 | } |
db0da029 AW |
1264 | } |
1265 | ||
1266 | g_free(info); | |
1267 | ||
1268 | trace_vfio_region_setup(vbasedev->name, index, name, | |
1269 | region->flags, region->fd_offset, region->size); | |
1270 | return 0; | |
1271 | } | |
e2c7d025 | 1272 | |
0f7a903b KW |
1273 | static void vfio_subregion_unmap(VFIORegion *region, int index) |
1274 | { | |
1275 | trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem), | |
1276 | region->mmaps[index].offset, | |
1277 | region->mmaps[index].offset + | |
1278 | region->mmaps[index].size - 1); | |
1279 | memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem); | |
1280 | munmap(region->mmaps[index].mmap, region->mmaps[index].size); | |
1281 | object_unparent(OBJECT(®ion->mmaps[index].mem)); | |
1282 | region->mmaps[index].mmap = NULL; | |
1283 | } | |
1284 | ||
db0da029 AW |
1285 | int vfio_region_mmap(VFIORegion *region) |
1286 | { | |
1287 | int i, prot = 0; | |
1288 | char *name; | |
1289 | ||
1290 | if (!region->mem) { | |
1291 | return 0; | |
1292 | } | |
1293 | ||
1294 | prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; | |
1295 | prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; | |
1296 | ||
1297 | for (i = 0; i < region->nr_mmaps; i++) { | |
1298 | region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot, | |
1299 | MAP_SHARED, region->vbasedev->fd, | |
1300 | region->fd_offset + | |
1301 | region->mmaps[i].offset); | |
1302 | if (region->mmaps[i].mmap == MAP_FAILED) { | |
1303 | int ret = -errno; | |
1304 | ||
1305 | trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, | |
1306 | region->fd_offset + | |
1307 | region->mmaps[i].offset, | |
1308 | region->fd_offset + | |
1309 | region->mmaps[i].offset + | |
1310 | region->mmaps[i].size - 1, ret); | |
1311 | ||
1312 | region->mmaps[i].mmap = NULL; | |
1313 | ||
1314 | for (i--; i >= 0; i--) { | |
0f7a903b | 1315 | vfio_subregion_unmap(region, i); |
db0da029 AW |
1316 | } |
1317 | ||
1318 | return ret; | |
e2c7d025 EA |
1319 | } |
1320 | ||
db0da029 AW |
1321 | name = g_strdup_printf("%s mmaps[%d]", |
1322 | memory_region_name(region->mem), i); | |
21e00fa5 AW |
1323 | memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, |
1324 | memory_region_owner(region->mem), | |
1325 | name, region->mmaps[i].size, | |
1326 | region->mmaps[i].mmap); | |
db0da029 | 1327 | g_free(name); |
db0da029 AW |
1328 | memory_region_add_subregion(region->mem, region->mmaps[i].offset, |
1329 | ®ion->mmaps[i].mem); | |
1330 | ||
1331 | trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), | |
1332 | region->mmaps[i].offset, | |
1333 | region->mmaps[i].offset + | |
1334 | region->mmaps[i].size - 1); | |
1335 | } | |
1336 | ||
1337 | return 0; | |
1338 | } | |
1339 | ||
0f7a903b KW |
1340 | void vfio_region_unmap(VFIORegion *region) |
1341 | { | |
1342 | int i; | |
1343 | ||
1344 | if (!region->mem) { | |
1345 | return; | |
1346 | } | |
1347 | ||
1348 | for (i = 0; i < region->nr_mmaps; i++) { | |
1349 | if (region->mmaps[i].mmap) { | |
1350 | vfio_subregion_unmap(region, i); | |
1351 | } | |
1352 | } | |
1353 | } | |
1354 | ||
db0da029 AW |
1355 | void vfio_region_exit(VFIORegion *region) |
1356 | { | |
1357 | int i; | |
1358 | ||
1359 | if (!region->mem) { | |
1360 | return; | |
1361 | } | |
1362 | ||
1363 | for (i = 0; i < region->nr_mmaps; i++) { | |
1364 | if (region->mmaps[i].mmap) { | |
1365 | memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); | |
e2c7d025 | 1366 | } |
db0da029 | 1367 | } |
e2c7d025 | 1368 | |
db0da029 AW |
1369 | trace_vfio_region_exit(region->vbasedev->name, region->nr); |
1370 | } | |
1371 | ||
1372 | void vfio_region_finalize(VFIORegion *region) | |
1373 | { | |
1374 | int i; | |
1375 | ||
1376 | if (!region->mem) { | |
1377 | return; | |
e2c7d025 EA |
1378 | } |
1379 | ||
db0da029 AW |
1380 | for (i = 0; i < region->nr_mmaps; i++) { |
1381 | if (region->mmaps[i].mmap) { | |
1382 | munmap(region->mmaps[i].mmap, region->mmaps[i].size); | |
1383 | object_unparent(OBJECT(®ion->mmaps[i].mem)); | |
1384 | } | |
1385 | } | |
1386 | ||
1387 | object_unparent(OBJECT(region->mem)); | |
1388 | ||
1389 | g_free(region->mem); | |
1390 | g_free(region->mmaps); | |
1391 | ||
1392 | trace_vfio_region_finalize(region->vbasedev->name, region->nr); | |
92f86bff GH |
1393 | |
1394 | region->mem = NULL; | |
1395 | region->mmaps = NULL; | |
1396 | region->nr_mmaps = 0; | |
1397 | region->size = 0; | |
1398 | region->flags = 0; | |
1399 | region->nr = 0; | |
db0da029 AW |
1400 | } |
1401 | ||
1402 | void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) | |
1403 | { | |
1404 | int i; | |
1405 | ||
1406 | if (!region->mem) { | |
1407 | return; | |
1408 | } | |
1409 | ||
1410 | for (i = 0; i < region->nr_mmaps; i++) { | |
1411 | if (region->mmaps[i].mmap) { | |
1412 | memory_region_set_enabled(®ion->mmaps[i].mem, enabled); | |
1413 | } | |
1414 | } | |
e2c7d025 | 1415 | |
db0da029 AW |
1416 | trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), |
1417 | enabled); | |
e2c7d025 EA |
1418 | } |
1419 | ||
1420 | void vfio_reset_handler(void *opaque) | |
1421 | { | |
1422 | VFIOGroup *group; | |
1423 | VFIODevice *vbasedev; | |
1424 | ||
1425 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
1426 | QLIST_FOREACH(vbasedev, &group->device_list, next) { | |
7da624e2 AW |
1427 | if (vbasedev->dev->realized) { |
1428 | vbasedev->ops->vfio_compute_needs_reset(vbasedev); | |
1429 | } | |
e2c7d025 EA |
1430 | } |
1431 | } | |
1432 | ||
1433 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
1434 | QLIST_FOREACH(vbasedev, &group->device_list, next) { | |
7da624e2 | 1435 | if (vbasedev->dev->realized && vbasedev->needs_reset) { |
e2c7d025 EA |
1436 | vbasedev->ops->vfio_hot_reset_multi(vbasedev); |
1437 | } | |
1438 | } | |
1439 | } | |
1440 | } | |
1441 | ||
1442 | static void vfio_kvm_device_add_group(VFIOGroup *group) | |
1443 | { | |
1444 | #ifdef CONFIG_KVM | |
1445 | struct kvm_device_attr attr = { | |
1446 | .group = KVM_DEV_VFIO_GROUP, | |
1447 | .attr = KVM_DEV_VFIO_GROUP_ADD, | |
1448 | .addr = (uint64_t)(unsigned long)&group->fd, | |
1449 | }; | |
1450 | ||
1451 | if (!kvm_enabled()) { | |
1452 | return; | |
1453 | } | |
1454 | ||
1455 | if (vfio_kvm_device_fd < 0) { | |
1456 | struct kvm_create_device cd = { | |
1457 | .type = KVM_DEV_TYPE_VFIO, | |
1458 | }; | |
1459 | ||
1460 | if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { | |
78e5b17f | 1461 | error_report("Failed to create KVM VFIO device: %m"); |
e2c7d025 EA |
1462 | return; |
1463 | } | |
1464 | ||
1465 | vfio_kvm_device_fd = cd.fd; | |
1466 | } | |
1467 | ||
1468 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { | |
1469 | error_report("Failed to add group %d to KVM VFIO device: %m", | |
1470 | group->groupid); | |
1471 | } | |
1472 | #endif | |
1473 | } | |
1474 | ||
1475 | static void vfio_kvm_device_del_group(VFIOGroup *group) | |
1476 | { | |
1477 | #ifdef CONFIG_KVM | |
1478 | struct kvm_device_attr attr = { | |
1479 | .group = KVM_DEV_VFIO_GROUP, | |
1480 | .attr = KVM_DEV_VFIO_GROUP_DEL, | |
1481 | .addr = (uint64_t)(unsigned long)&group->fd, | |
1482 | }; | |
1483 | ||
1484 | if (vfio_kvm_device_fd < 0) { | |
1485 | return; | |
1486 | } | |
1487 | ||
1488 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { | |
1489 | error_report("Failed to remove group %d from KVM VFIO device: %m", | |
1490 | group->groupid); | |
1491 | } | |
1492 | #endif | |
1493 | } | |
1494 | ||
1495 | static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) | |
1496 | { | |
1497 | VFIOAddressSpace *space; | |
1498 | ||
1499 | QLIST_FOREACH(space, &vfio_address_spaces, list) { | |
1500 | if (space->as == as) { | |
1501 | return space; | |
1502 | } | |
1503 | } | |
1504 | ||
1505 | /* No suitable VFIOAddressSpace, create a new one */ | |
1506 | space = g_malloc0(sizeof(*space)); | |
1507 | space->as = as; | |
1508 | QLIST_INIT(&space->containers); | |
1509 | ||
1510 | QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); | |
1511 | ||
1512 | return space; | |
1513 | } | |
1514 | ||
1515 | static void vfio_put_address_space(VFIOAddressSpace *space) | |
1516 | { | |
1517 | if (QLIST_EMPTY(&space->containers)) { | |
1518 | QLIST_REMOVE(space, list); | |
1519 | g_free(space); | |
1520 | } | |
1521 | } | |
1522 | ||
2b6326c0 EA |
1523 | /* |
1524 | * vfio_get_iommu_type - selects the richest iommu_type (v2 first) | |
1525 | */ | |
1526 | static int vfio_get_iommu_type(VFIOContainer *container, | |
1527 | Error **errp) | |
1528 | { | |
1529 | int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, | |
1530 | VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; | |
1531 | int i; | |
1532 | ||
1533 | for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { | |
1534 | if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { | |
1535 | return iommu_types[i]; | |
1536 | } | |
1537 | } | |
1538 | error_setg(errp, "No available IOMMU models"); | |
1539 | return -EINVAL; | |
1540 | } | |
1541 | ||
1542 | static int vfio_init_container(VFIOContainer *container, int group_fd, | |
1543 | Error **errp) | |
1544 | { | |
1545 | int iommu_type, ret; | |
1546 | ||
1547 | iommu_type = vfio_get_iommu_type(container, errp); | |
1548 | if (iommu_type < 0) { | |
1549 | return iommu_type; | |
1550 | } | |
1551 | ||
1552 | ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd); | |
1553 | if (ret) { | |
1554 | error_setg_errno(errp, errno, "Failed to set group container"); | |
1555 | return -errno; | |
1556 | } | |
1557 | ||
1558 | while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) { | |
1559 | if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { | |
1560 | /* | |
1561 | * On sPAPR, despite the IOMMU subdriver always advertises v1 and | |
1562 | * v2, the running platform may not support v2 and there is no | |
1563 | * way to guess it until an IOMMU group gets added to the container. | |
1564 | * So in case it fails with v2, try v1 as a fallback. | |
1565 | */ | |
1566 | iommu_type = VFIO_SPAPR_TCE_IOMMU; | |
1567 | continue; | |
1568 | } | |
1569 | error_setg_errno(errp, errno, "Failed to set iommu for container"); | |
1570 | return -errno; | |
1571 | } | |
1572 | ||
1573 | container->iommu_type = iommu_type; | |
1574 | return 0; | |
1575 | } | |
1576 | ||
87ea529c KW |
1577 | static int vfio_get_iommu_info(VFIOContainer *container, |
1578 | struct vfio_iommu_type1_info **info) | |
1579 | { | |
1580 | ||
1581 | size_t argsz = sizeof(struct vfio_iommu_type1_info); | |
1582 | ||
1583 | *info = g_new0(struct vfio_iommu_type1_info, 1); | |
1584 | again: | |
1585 | (*info)->argsz = argsz; | |
1586 | ||
1587 | if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { | |
1588 | g_free(*info); | |
1589 | *info = NULL; | |
1590 | return -errno; | |
1591 | } | |
1592 | ||
1593 | if (((*info)->argsz > argsz)) { | |
1594 | argsz = (*info)->argsz; | |
1595 | *info = g_realloc(*info, argsz); | |
1596 | goto again; | |
1597 | } | |
1598 | ||
1599 | return 0; | |
1600 | } | |
1601 | ||
1602 | static struct vfio_info_cap_header * | |
1603 | vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) | |
1604 | { | |
1605 | struct vfio_info_cap_header *hdr; | |
1606 | void *ptr = info; | |
1607 | ||
1608 | if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { | |
1609 | return NULL; | |
1610 | } | |
1611 | ||
1612 | for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { | |
1613 | if (hdr->id == id) { | |
1614 | return hdr; | |
1615 | } | |
1616 | } | |
1617 | ||
1618 | return NULL; | |
1619 | } | |
1620 | ||
1621 | static void vfio_get_iommu_info_migration(VFIOContainer *container, | |
1622 | struct vfio_iommu_type1_info *info) | |
1623 | { | |
1624 | struct vfio_info_cap_header *hdr; | |
1625 | struct vfio_iommu_type1_info_cap_migration *cap_mig; | |
1626 | ||
1627 | hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); | |
1628 | if (!hdr) { | |
1629 | return; | |
1630 | } | |
1631 | ||
1632 | cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, | |
1633 | header); | |
1634 | ||
1635 | /* | |
1636 | * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of | |
1637 | * TARGET_PAGE_SIZE to mark those dirty. | |
1638 | */ | |
1639 | if (cap_mig->pgsize_bitmap & TARGET_PAGE_SIZE) { | |
1640 | container->dirty_pages_supported = true; | |
1641 | container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; | |
1642 | container->dirty_pgsizes = cap_mig->pgsize_bitmap; | |
1643 | } | |
1644 | } | |
1645 | ||
01905f58 EA |
1646 | static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, |
1647 | Error **errp) | |
e2c7d025 EA |
1648 | { |
1649 | VFIOContainer *container; | |
1650 | int ret, fd; | |
1651 | VFIOAddressSpace *space; | |
1652 | ||
1653 | space = vfio_get_address_space(as); | |
1654 | ||
c65ee433 | 1655 | /* |
aff92b82 | 1656 | * VFIO is currently incompatible with discarding of RAM insofar as the |
c65ee433 AW |
1657 | * madvise to purge (zap) the page from QEMU's address space does not |
1658 | * interact with the memory API and therefore leaves stale virtual to | |
1659 | * physical mappings in the IOMMU if the page was previously pinned. We | |
aff92b82 | 1660 | * therefore set discarding broken for each group added to a container, |
c65ee433 AW |
1661 | * whether the container is used individually or shared. This provides |
1662 | * us with options to allow devices within a group to opt-in and allow | |
aff92b82 | 1663 | * discarding, so long as it is done consistently for a group (for instance |
c65ee433 AW |
1664 | * if the device is an mdev device where it is known that the host vendor |
1665 | * driver will never pin pages outside of the working set of the guest | |
aff92b82 | 1666 | * driver, which would thus not be discarding candidates). |
c65ee433 AW |
1667 | * |
1668 | * The first opportunity to induce pinning occurs here where we attempt to | |
1669 | * attach the group to existing containers within the AddressSpace. If any | |
aff92b82 DH |
1670 | * pages are already zapped from the virtual address space, such as from |
1671 | * previous discards, new pinning will cause valid mappings to be | |
c65ee433 AW |
1672 | * re-established. Likewise, when the overall MemoryListener for a new |
1673 | * container is registered, a replay of mappings within the AddressSpace | |
1674 | * will occur, re-establishing any previously zapped pages as well. | |
1675 | * | |
aff92b82 DH |
1676 | * Especially virtio-balloon is currently only prevented from discarding |
1677 | * new memory, it will not yet set ram_block_discard_set_required() and | |
1678 | * therefore, neither stops us here or deals with the sudden memory | |
1679 | * consumption of inflated memory. | |
c65ee433 | 1680 | */ |
aff92b82 DH |
1681 | ret = ram_block_discard_disable(true); |
1682 | if (ret) { | |
1683 | error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); | |
1684 | return ret; | |
1685 | } | |
c65ee433 | 1686 | |
e2c7d025 EA |
1687 | QLIST_FOREACH(container, &space->containers, next) { |
1688 | if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { | |
1689 | group->container = container; | |
1690 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); | |
2016986a | 1691 | vfio_kvm_device_add_group(group); |
e2c7d025 EA |
1692 | return 0; |
1693 | } | |
1694 | } | |
1695 | ||
448058aa | 1696 | fd = qemu_open_old("/dev/vfio/vfio", O_RDWR); |
e2c7d025 | 1697 | if (fd < 0) { |
01905f58 | 1698 | error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio"); |
e2c7d025 EA |
1699 | ret = -errno; |
1700 | goto put_space_exit; | |
1701 | } | |
1702 | ||
1703 | ret = ioctl(fd, VFIO_GET_API_VERSION); | |
1704 | if (ret != VFIO_API_VERSION) { | |
01905f58 EA |
1705 | error_setg(errp, "supported vfio version: %d, " |
1706 | "reported version: %d", VFIO_API_VERSION, ret); | |
e2c7d025 EA |
1707 | ret = -EINVAL; |
1708 | goto close_fd_exit; | |
1709 | } | |
1710 | ||
1711 | container = g_malloc0(sizeof(*container)); | |
1712 | container->space = space; | |
1713 | container->fd = fd; | |
d7d87836 | 1714 | container->error = NULL; |
87ea529c | 1715 | container->dirty_pages_supported = false; |
f7f9c7b2 LY |
1716 | QLIST_INIT(&container->giommu_list); |
1717 | QLIST_INIT(&container->hostwin_list); | |
2e6e697e | 1718 | |
2b6326c0 EA |
1719 | ret = vfio_init_container(container, group->fd, errp); |
1720 | if (ret) { | |
1721 | goto free_container_exit; | |
1722 | } | |
e2c7d025 | 1723 | |
2b6326c0 EA |
1724 | switch (container->iommu_type) { |
1725 | case VFIO_TYPE1v2_IOMMU: | |
1726 | case VFIO_TYPE1_IOMMU: | |
1727 | { | |
87ea529c | 1728 | struct vfio_iommu_type1_info *info; |
3898aad3 DG |
1729 | |
1730 | /* | |
1731 | * FIXME: This assumes that a Type1 IOMMU can map any 64-bit | |
1732 | * IOVA whatsoever. That's not actually true, but the current | |
1733 | * kernel interface doesn't tell us what it can map, and the | |
1734 | * existing Type1 IOMMUs generally support any IOVA we're | |
1735 | * going to actually try in practice. | |
1736 | */ | |
87ea529c KW |
1737 | ret = vfio_get_iommu_info(container, &info); |
1738 | ||
1739 | if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) { | |
f4ec5e26 | 1740 | /* Assume 4k IOVA page size */ |
87ea529c KW |
1741 | info->iova_pgsizes = 4096; |
1742 | } | |
1743 | vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes); | |
1744 | container->pgsizes = info->iova_pgsizes; | |
1745 | ||
1746 | if (!ret) { | |
1747 | vfio_get_iommu_info_migration(container, info); | |
7a140a57 | 1748 | } |
87ea529c | 1749 | g_free(info); |
2b6326c0 EA |
1750 | break; |
1751 | } | |
1752 | case VFIO_SPAPR_TCE_v2_IOMMU: | |
1753 | case VFIO_SPAPR_TCE_IOMMU: | |
1754 | { | |
3898aad3 | 1755 | struct vfio_iommu_spapr_tce_info info; |
2b6326c0 | 1756 | bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU; |
e2c7d025 EA |
1757 | |
1758 | /* | |
1759 | * The host kernel code implementing VFIO_IOMMU_DISABLE is called | |
1760 | * when container fd is closed so we do not call it explicitly | |
1761 | * in this file. | |
1762 | */ | |
318f67ce AK |
1763 | if (!v2) { |
1764 | ret = ioctl(fd, VFIO_IOMMU_ENABLE); | |
1765 | if (ret) { | |
01905f58 | 1766 | error_setg_errno(errp, errno, "failed to enable container"); |
318f67ce AK |
1767 | ret = -errno; |
1768 | goto free_container_exit; | |
1769 | } | |
1770 | } else { | |
1771 | container->prereg_listener = vfio_prereg_listener; | |
1772 | ||
1773 | memory_listener_register(&container->prereg_listener, | |
1774 | &address_space_memory); | |
1775 | if (container->error) { | |
1776 | memory_listener_unregister(&container->prereg_listener); | |
d7d87836 EA |
1777 | ret = -1; |
1778 | error_propagate_prepend(errp, container->error, | |
1779 | "RAM memory listener initialization failed: "); | |
318f67ce AK |
1780 | goto free_container_exit; |
1781 | } | |
e2c7d025 | 1782 | } |
3898aad3 | 1783 | |
3898aad3 DG |
1784 | info.argsz = sizeof(info); |
1785 | ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); | |
1786 | if (ret) { | |
01905f58 EA |
1787 | error_setg_errno(errp, errno, |
1788 | "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed"); | |
3898aad3 | 1789 | ret = -errno; |
318f67ce AK |
1790 | if (v2) { |
1791 | memory_listener_unregister(&container->prereg_listener); | |
1792 | } | |
3898aad3 DG |
1793 | goto free_container_exit; |
1794 | } | |
7a140a57 | 1795 | |
2e4109de | 1796 | if (v2) { |
c26bc185 | 1797 | container->pgsizes = info.ddw.pgsizes; |
2e4109de AK |
1798 | /* |
1799 | * There is a default window in just created container. | |
1800 | * To make region_add/del simpler, we better remove this | |
1801 | * window now and let those iommu_listener callbacks | |
1802 | * create/remove them when needed. | |
1803 | */ | |
1804 | ret = vfio_spapr_remove_window(container, info.dma32_window_start); | |
1805 | if (ret) { | |
01905f58 EA |
1806 | error_setg_errno(errp, -ret, |
1807 | "failed to remove existing window"); | |
2e4109de AK |
1808 | goto free_container_exit; |
1809 | } | |
1810 | } else { | |
1811 | /* The default table uses 4K pages */ | |
c26bc185 | 1812 | container->pgsizes = 0x1000; |
2e4109de AK |
1813 | vfio_host_win_add(container, info.dma32_window_start, |
1814 | info.dma32_window_start + | |
1815 | info.dma32_window_size - 1, | |
1816 | 0x1000); | |
1817 | } | |
2b6326c0 | 1818 | } |
e2c7d025 EA |
1819 | } |
1820 | ||
8c37faa4 AK |
1821 | vfio_kvm_device_add_group(group); |
1822 | ||
1823 | QLIST_INIT(&container->group_list); | |
1824 | QLIST_INSERT_HEAD(&space->containers, container, next); | |
1825 | ||
1826 | group->container = container; | |
1827 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); | |
1828 | ||
ee0bf0e5 DG |
1829 | container->listener = vfio_memory_listener; |
1830 | ||
1831 | memory_listener_register(&container->listener, container->space->as); | |
1832 | ||
1833 | if (container->error) { | |
d7d87836 EA |
1834 | ret = -1; |
1835 | error_propagate_prepend(errp, container->error, | |
1836 | "memory listener initialization failed: "); | |
ee0bf0e5 DG |
1837 | goto listener_release_exit; |
1838 | } | |
1839 | ||
1840 | container->initialized = true; | |
1841 | ||
e2c7d025 EA |
1842 | return 0; |
1843 | listener_release_exit: | |
8c37faa4 AK |
1844 | QLIST_REMOVE(group, container_next); |
1845 | QLIST_REMOVE(container, next); | |
1846 | vfio_kvm_device_del_group(group); | |
e2c7d025 EA |
1847 | vfio_listener_release(container); |
1848 | ||
1849 | free_container_exit: | |
1850 | g_free(container); | |
1851 | ||
1852 | close_fd_exit: | |
1853 | close(fd); | |
1854 | ||
1855 | put_space_exit: | |
aff92b82 | 1856 | ram_block_discard_disable(false); |
e2c7d025 EA |
1857 | vfio_put_address_space(space); |
1858 | ||
1859 | return ret; | |
1860 | } | |
1861 | ||
1862 | static void vfio_disconnect_container(VFIOGroup *group) | |
1863 | { | |
1864 | VFIOContainer *container = group->container; | |
1865 | ||
36968626 PX |
1866 | QLIST_REMOVE(group, container_next); |
1867 | group->container = NULL; | |
1868 | ||
1869 | /* | |
1870 | * Explicitly release the listener first before unset container, | |
1871 | * since unset may destroy the backend container if it's the last | |
1872 | * group. | |
1873 | */ | |
1874 | if (QLIST_EMPTY(&container->group_list)) { | |
1875 | vfio_listener_release(container); | |
1876 | } | |
1877 | ||
e2c7d025 EA |
1878 | if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { |
1879 | error_report("vfio: error disconnecting group %d from container", | |
1880 | group->groupid); | |
1881 | } | |
1882 | ||
e2c7d025 EA |
1883 | if (QLIST_EMPTY(&container->group_list)) { |
1884 | VFIOAddressSpace *space = container->space; | |
f8d8a944 | 1885 | VFIOGuestIOMMU *giommu, *tmp; |
e2c7d025 | 1886 | |
e2c7d025 | 1887 | QLIST_REMOVE(container, next); |
f8d8a944 AK |
1888 | |
1889 | QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { | |
3df9d748 AK |
1890 | memory_region_unregister_iommu_notifier( |
1891 | MEMORY_REGION(giommu->iommu), &giommu->n); | |
f8d8a944 AK |
1892 | QLIST_REMOVE(giommu, giommu_next); |
1893 | g_free(giommu); | |
1894 | } | |
1895 | ||
e2c7d025 EA |
1896 | trace_vfio_disconnect_container(container->fd); |
1897 | close(container->fd); | |
1898 | g_free(container); | |
1899 | ||
1900 | vfio_put_address_space(space); | |
1901 | } | |
1902 | } | |
1903 | ||
1b808d5b | 1904 | VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) |
e2c7d025 EA |
1905 | { |
1906 | VFIOGroup *group; | |
1907 | char path[32]; | |
1908 | struct vfio_group_status status = { .argsz = sizeof(status) }; | |
1909 | ||
1910 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
1911 | if (group->groupid == groupid) { | |
1912 | /* Found it. Now is it already in the right context? */ | |
1913 | if (group->container->space->as == as) { | |
1914 | return group; | |
1915 | } else { | |
1b808d5b EA |
1916 | error_setg(errp, "group %d used in multiple address spaces", |
1917 | group->groupid); | |
e2c7d025 EA |
1918 | return NULL; |
1919 | } | |
1920 | } | |
1921 | } | |
1922 | ||
1923 | group = g_malloc0(sizeof(*group)); | |
1924 | ||
1925 | snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); | |
448058aa | 1926 | group->fd = qemu_open_old(path, O_RDWR); |
e2c7d025 | 1927 | if (group->fd < 0) { |
1b808d5b | 1928 | error_setg_errno(errp, errno, "failed to open %s", path); |
e2c7d025 EA |
1929 | goto free_group_exit; |
1930 | } | |
1931 | ||
1932 | if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { | |
1b808d5b | 1933 | error_setg_errno(errp, errno, "failed to get group %d status", groupid); |
e2c7d025 EA |
1934 | goto close_fd_exit; |
1935 | } | |
1936 | ||
1937 | if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { | |
1b808d5b EA |
1938 | error_setg(errp, "group %d is not viable", groupid); |
1939 | error_append_hint(errp, | |
1940 | "Please ensure all devices within the iommu_group " | |
1941 | "are bound to their vfio bus driver.\n"); | |
e2c7d025 EA |
1942 | goto close_fd_exit; |
1943 | } | |
1944 | ||
1945 | group->groupid = groupid; | |
1946 | QLIST_INIT(&group->device_list); | |
1947 | ||
1b808d5b EA |
1948 | if (vfio_connect_container(group, as, errp)) { |
1949 | error_prepend(errp, "failed to setup container for group %d: ", | |
1950 | groupid); | |
e2c7d025 EA |
1951 | goto close_fd_exit; |
1952 | } | |
1953 | ||
1954 | if (QLIST_EMPTY(&vfio_group_list)) { | |
1955 | qemu_register_reset(vfio_reset_handler, NULL); | |
1956 | } | |
1957 | ||
1958 | QLIST_INSERT_HEAD(&vfio_group_list, group, next); | |
1959 | ||
e2c7d025 EA |
1960 | return group; |
1961 | ||
1962 | close_fd_exit: | |
1963 | close(group->fd); | |
1964 | ||
1965 | free_group_exit: | |
1966 | g_free(group); | |
1967 | ||
1968 | return NULL; | |
1969 | } | |
1970 | ||
1971 | void vfio_put_group(VFIOGroup *group) | |
1972 | { | |
77a10d04 | 1973 | if (!group || !QLIST_EMPTY(&group->device_list)) { |
e2c7d025 EA |
1974 | return; |
1975 | } | |
1976 | ||
aff92b82 DH |
1977 | if (!group->ram_block_discard_allowed) { |
1978 | ram_block_discard_disable(false); | |
238e9172 | 1979 | } |
e2c7d025 EA |
1980 | vfio_kvm_device_del_group(group); |
1981 | vfio_disconnect_container(group); | |
1982 | QLIST_REMOVE(group, next); | |
1983 | trace_vfio_put_group(group->fd); | |
1984 | close(group->fd); | |
1985 | g_free(group); | |
1986 | ||
1987 | if (QLIST_EMPTY(&vfio_group_list)) { | |
1988 | qemu_unregister_reset(vfio_reset_handler, NULL); | |
1989 | } | |
1990 | } | |
1991 | ||
1992 | int vfio_get_device(VFIOGroup *group, const char *name, | |
59f7d674 | 1993 | VFIODevice *vbasedev, Error **errp) |
e2c7d025 EA |
1994 | { |
1995 | struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; | |
217e9fdc | 1996 | int ret, fd; |
e2c7d025 | 1997 | |
217e9fdc PB |
1998 | fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); |
1999 | if (fd < 0) { | |
59f7d674 EA |
2000 | error_setg_errno(errp, errno, "error getting device from group %d", |
2001 | group->groupid); | |
2002 | error_append_hint(errp, | |
2003 | "Verify all devices in group %d are bound to vfio-<bus> " | |
2004 | "or pci-stub and not already in use\n", group->groupid); | |
217e9fdc | 2005 | return fd; |
e2c7d025 EA |
2006 | } |
2007 | ||
217e9fdc | 2008 | ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info); |
e2c7d025 | 2009 | if (ret) { |
59f7d674 | 2010 | error_setg_errno(errp, errno, "error getting device info"); |
217e9fdc PB |
2011 | close(fd); |
2012 | return ret; | |
e2c7d025 EA |
2013 | } |
2014 | ||
238e9172 | 2015 | /* |
aff92b82 DH |
2016 | * Set discarding of RAM as not broken for this group if the driver knows |
2017 | * the device operates compatibly with discarding. Setting must be | |
2018 | * consistent per group, but since compatibility is really only possible | |
2019 | * with mdev currently, we expect singleton groups. | |
238e9172 | 2020 | */ |
aff92b82 DH |
2021 | if (vbasedev->ram_block_discard_allowed != |
2022 | group->ram_block_discard_allowed) { | |
238e9172 | 2023 | if (!QLIST_EMPTY(&group->device_list)) { |
aff92b82 DH |
2024 | error_setg(errp, "Inconsistent setting of support for discarding " |
2025 | "RAM (e.g., balloon) within group"); | |
8709b395 | 2026 | close(fd); |
238e9172 AW |
2027 | return -1; |
2028 | } | |
2029 | ||
aff92b82 DH |
2030 | if (!group->ram_block_discard_allowed) { |
2031 | group->ram_block_discard_allowed = true; | |
2032 | ram_block_discard_disable(false); | |
238e9172 AW |
2033 | } |
2034 | } | |
2035 | ||
217e9fdc PB |
2036 | vbasedev->fd = fd; |
2037 | vbasedev->group = group; | |
2038 | QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); | |
2039 | ||
e2c7d025 EA |
2040 | vbasedev->num_irqs = dev_info.num_irqs; |
2041 | vbasedev->num_regions = dev_info.num_regions; | |
2042 | vbasedev->flags = dev_info.flags; | |
2043 | ||
2044 | trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions, | |
2045 | dev_info.num_irqs); | |
2046 | ||
2047 | vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); | |
217e9fdc | 2048 | return 0; |
e2c7d025 EA |
2049 | } |
2050 | ||
2051 | void vfio_put_base_device(VFIODevice *vbasedev) | |
2052 | { | |
77a10d04 PB |
2053 | if (!vbasedev->group) { |
2054 | return; | |
2055 | } | |
e2c7d025 EA |
2056 | QLIST_REMOVE(vbasedev, next); |
2057 | vbasedev->group = NULL; | |
2058 | trace_vfio_put_base_device(vbasedev->fd); | |
2059 | close(vbasedev->fd); | |
2060 | } | |
2061 | ||
46900226 AW |
2062 | int vfio_get_region_info(VFIODevice *vbasedev, int index, |
2063 | struct vfio_region_info **info) | |
2064 | { | |
2065 | size_t argsz = sizeof(struct vfio_region_info); | |
2066 | ||
2067 | *info = g_malloc0(argsz); | |
2068 | ||
2069 | (*info)->index = index; | |
b53b0f69 | 2070 | retry: |
46900226 AW |
2071 | (*info)->argsz = argsz; |
2072 | ||
2073 | if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { | |
2074 | g_free(*info); | |
e61a424f | 2075 | *info = NULL; |
46900226 AW |
2076 | return -errno; |
2077 | } | |
2078 | ||
b53b0f69 AW |
2079 | if ((*info)->argsz > argsz) { |
2080 | argsz = (*info)->argsz; | |
2081 | *info = g_realloc(*info, argsz); | |
2082 | ||
2083 | goto retry; | |
2084 | } | |
2085 | ||
46900226 AW |
2086 | return 0; |
2087 | } | |
2088 | ||
e61a424f AW |
2089 | int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, |
2090 | uint32_t subtype, struct vfio_region_info **info) | |
2091 | { | |
2092 | int i; | |
2093 | ||
2094 | for (i = 0; i < vbasedev->num_regions; i++) { | |
2095 | struct vfio_info_cap_header *hdr; | |
2096 | struct vfio_region_info_cap_type *cap_type; | |
2097 | ||
2098 | if (vfio_get_region_info(vbasedev, i, info)) { | |
2099 | continue; | |
2100 | } | |
2101 | ||
2102 | hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); | |
2103 | if (!hdr) { | |
2104 | g_free(*info); | |
2105 | continue; | |
2106 | } | |
2107 | ||
2108 | cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); | |
2109 | ||
2110 | trace_vfio_get_dev_region(vbasedev->name, i, | |
2111 | cap_type->type, cap_type->subtype); | |
2112 | ||
2113 | if (cap_type->type == type && cap_type->subtype == subtype) { | |
2114 | return 0; | |
2115 | } | |
2116 | ||
2117 | g_free(*info); | |
2118 | } | |
2119 | ||
2120 | *info = NULL; | |
2121 | return -ENODEV; | |
2122 | } | |
2123 | ||
ae0215b2 AK |
2124 | bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) |
2125 | { | |
2126 | struct vfio_region_info *info = NULL; | |
2127 | bool ret = false; | |
2128 | ||
2129 | if (!vfio_get_region_info(vbasedev, region, &info)) { | |
2130 | if (vfio_get_region_info_cap(info, cap_type)) { | |
2131 | ret = true; | |
2132 | } | |
2133 | g_free(info); | |
2134 | } | |
2135 | ||
2136 | return ret; | |
2137 | } | |
2138 | ||
3153119e DG |
2139 | /* |
2140 | * Interfaces for IBM EEH (Enhanced Error Handling) | |
2141 | */ | |
2142 | static bool vfio_eeh_container_ok(VFIOContainer *container) | |
2143 | { | |
2144 | /* | |
2145 | * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO | |
2146 | * implementation is broken if there are multiple groups in a | |
2147 | * container. The hardware works in units of Partitionable | |
2148 | * Endpoints (== IOMMU groups) and the EEH operations naively | |
2149 | * iterate across all groups in the container, without any logic | |
2150 | * to make sure the groups have their state synchronized. For | |
2151 | * certain operations (ENABLE) that might be ok, until an error | |
2152 | * occurs, but for others (GET_STATE) it's clearly broken. | |
2153 | */ | |
2154 | ||
2155 | /* | |
2156 | * XXX Once fixed kernels exist, test for them here | |
2157 | */ | |
2158 | ||
2159 | if (QLIST_EMPTY(&container->group_list)) { | |
2160 | return false; | |
2161 | } | |
2162 | ||
2163 | if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) { | |
2164 | return false; | |
2165 | } | |
2166 | ||
2167 | return true; | |
2168 | } | |
2169 | ||
2170 | static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) | |
2171 | { | |
2172 | struct vfio_eeh_pe_op pe_op = { | |
2173 | .argsz = sizeof(pe_op), | |
2174 | .op = op, | |
2175 | }; | |
2176 | int ret; | |
2177 | ||
2178 | if (!vfio_eeh_container_ok(container)) { | |
2179 | error_report("vfio/eeh: EEH_PE_OP 0x%x: " | |
2180 | "kernel requires a container with exactly one group", op); | |
2181 | return -EPERM; | |
2182 | } | |
2183 | ||
2184 | ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op); | |
2185 | if (ret < 0) { | |
2186 | error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op); | |
2187 | return -errno; | |
2188 | } | |
2189 | ||
d917e88d | 2190 | return ret; |
3153119e DG |
2191 | } |
2192 | ||
2193 | static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) | |
2194 | { | |
2195 | VFIOAddressSpace *space = vfio_get_address_space(as); | |
2196 | VFIOContainer *container = NULL; | |
2197 | ||
2198 | if (QLIST_EMPTY(&space->containers)) { | |
2199 | /* No containers to act on */ | |
2200 | goto out; | |
2201 | } | |
2202 | ||
2203 | container = QLIST_FIRST(&space->containers); | |
2204 | ||
2205 | if (QLIST_NEXT(container, next)) { | |
2206 | /* We don't yet have logic to synchronize EEH state across | |
2207 | * multiple containers */ | |
2208 | container = NULL; | |
2209 | goto out; | |
2210 | } | |
2211 | ||
2212 | out: | |
2213 | vfio_put_address_space(space); | |
2214 | return container; | |
2215 | } | |
2216 | ||
2217 | bool vfio_eeh_as_ok(AddressSpace *as) | |
2218 | { | |
2219 | VFIOContainer *container = vfio_eeh_as_container(as); | |
2220 | ||
2221 | return (container != NULL) && vfio_eeh_container_ok(container); | |
2222 | } | |
2223 | ||
2224 | int vfio_eeh_as_op(AddressSpace *as, uint32_t op) | |
2225 | { | |
2226 | VFIOContainer *container = vfio_eeh_as_container(as); | |
2227 | ||
2228 | if (!container) { | |
2229 | return -ENODEV; | |
2230 | } | |
2231 | return vfio_eeh_container_op(container, op); | |
2232 | } |