]>
Commit | Line | Data |
---|---|---|
e2c7d025 EA |
1 | /* |
2 | * generic functions used by VFIO devices | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Alex Williamson <alex.williamson@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Based on qemu-kvm device-assignment: | |
13 | * Adapted for KVM by Qumranet. | |
14 | * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) | |
15 | * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) | |
16 | * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) | |
17 | * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) | |
18 | * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) | |
19 | */ | |
20 | ||
21 | #include <sys/ioctl.h> | |
22 | #include <sys/mman.h> | |
23 | #include <linux/vfio.h> | |
24 | ||
25 | #include "hw/vfio/vfio-common.h" | |
26 | #include "hw/vfio/vfio.h" | |
27 | #include "exec/address-spaces.h" | |
28 | #include "exec/memory.h" | |
29 | #include "hw/hw.h" | |
30 | #include "qemu/error-report.h" | |
31 | #include "sysemu/kvm.h" | |
32 | #include "trace.h" | |
33 | ||
34 | struct vfio_group_head vfio_group_list = | |
39cb514f | 35 | QLIST_HEAD_INITIALIZER(vfio_group_list); |
e2c7d025 EA |
36 | struct vfio_as_head vfio_address_spaces = |
37 | QLIST_HEAD_INITIALIZER(vfio_address_spaces); | |
38 | ||
39 | #ifdef CONFIG_KVM | |
40 | /* | |
41 | * We have a single VFIO pseudo device per KVM VM. Once created it lives | |
42 | * for the life of the VM. Closing the file descriptor only drops our | |
43 | * reference to it and the device's reference to kvm. Therefore once | |
44 | * initialized, this file descriptor is only released on QEMU exit and | |
45 | * we'll re-use it should another vfio device be attached before then. | |
46 | */ | |
47 | static int vfio_kvm_device_fd = -1; | |
48 | #endif | |
49 | ||
50 | /* | |
51 | * Common VFIO interrupt disable | |
52 | */ | |
53 | void vfio_disable_irqindex(VFIODevice *vbasedev, int index) | |
54 | { | |
55 | struct vfio_irq_set irq_set = { | |
56 | .argsz = sizeof(irq_set), | |
57 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, | |
58 | .index = index, | |
59 | .start = 0, | |
60 | .count = 0, | |
61 | }; | |
62 | ||
63 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
64 | } | |
65 | ||
66 | void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) | |
67 | { | |
68 | struct vfio_irq_set irq_set = { | |
69 | .argsz = sizeof(irq_set), | |
70 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, | |
71 | .index = index, | |
72 | .start = 0, | |
73 | .count = 1, | |
74 | }; | |
75 | ||
76 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
77 | } | |
78 | ||
79 | void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) | |
80 | { | |
81 | struct vfio_irq_set irq_set = { | |
82 | .argsz = sizeof(irq_set), | |
83 | .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, | |
84 | .index = index, | |
85 | .start = 0, | |
86 | .count = 1, | |
87 | }; | |
88 | ||
89 | ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); | |
90 | } | |
91 | ||
92 | /* | |
93 | * IO Port/MMIO - Beware of the endians, VFIO is always little endian | |
94 | */ | |
95 | void vfio_region_write(void *opaque, hwaddr addr, | |
96 | uint64_t data, unsigned size) | |
97 | { | |
98 | VFIORegion *region = opaque; | |
99 | VFIODevice *vbasedev = region->vbasedev; | |
100 | union { | |
101 | uint8_t byte; | |
102 | uint16_t word; | |
103 | uint32_t dword; | |
104 | uint64_t qword; | |
105 | } buf; | |
106 | ||
107 | switch (size) { | |
108 | case 1: | |
109 | buf.byte = data; | |
110 | break; | |
111 | case 2: | |
112 | buf.word = cpu_to_le16(data); | |
113 | break; | |
114 | case 4: | |
115 | buf.dword = cpu_to_le32(data); | |
116 | break; | |
117 | default: | |
118 | hw_error("vfio: unsupported write size, %d bytes", size); | |
119 | break; | |
120 | } | |
121 | ||
122 | if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { | |
123 | error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 | |
124 | ",%d) failed: %m", | |
125 | __func__, vbasedev->name, region->nr, | |
126 | addr, data, size); | |
127 | } | |
128 | ||
129 | trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); | |
130 | ||
131 | /* | |
132 | * A read or write to a BAR always signals an INTx EOI. This will | |
133 | * do nothing if not pending (including not in INTx mode). We assume | |
134 | * that a BAR access is in response to an interrupt and that BAR | |
135 | * accesses will service the interrupt. Unfortunately, we don't know | |
136 | * which access will service the interrupt, so we're potentially | |
137 | * getting quite a few host interrupts per guest interrupt. | |
138 | */ | |
139 | vbasedev->ops->vfio_eoi(vbasedev); | |
140 | } | |
141 | ||
142 | uint64_t vfio_region_read(void *opaque, | |
143 | hwaddr addr, unsigned size) | |
144 | { | |
145 | VFIORegion *region = opaque; | |
146 | VFIODevice *vbasedev = region->vbasedev; | |
147 | union { | |
148 | uint8_t byte; | |
149 | uint16_t word; | |
150 | uint32_t dword; | |
151 | uint64_t qword; | |
152 | } buf; | |
153 | uint64_t data = 0; | |
154 | ||
155 | if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { | |
156 | error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", | |
157 | __func__, vbasedev->name, region->nr, | |
158 | addr, size); | |
159 | return (uint64_t)-1; | |
160 | } | |
161 | switch (size) { | |
162 | case 1: | |
163 | data = buf.byte; | |
164 | break; | |
165 | case 2: | |
166 | data = le16_to_cpu(buf.word); | |
167 | break; | |
168 | case 4: | |
169 | data = le32_to_cpu(buf.dword); | |
170 | break; | |
171 | default: | |
172 | hw_error("vfio: unsupported read size, %d bytes", size); | |
173 | break; | |
174 | } | |
175 | ||
176 | trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); | |
177 | ||
178 | /* Same as write above */ | |
179 | vbasedev->ops->vfio_eoi(vbasedev); | |
180 | ||
181 | return data; | |
182 | } | |
183 | ||
184 | const MemoryRegionOps vfio_region_ops = { | |
185 | .read = vfio_region_read, | |
186 | .write = vfio_region_write, | |
187 | .endianness = DEVICE_LITTLE_ENDIAN, | |
188 | }; | |
189 | ||
190 | /* | |
191 | * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 | |
192 | */ | |
193 | static int vfio_dma_unmap(VFIOContainer *container, | |
194 | hwaddr iova, ram_addr_t size) | |
195 | { | |
196 | struct vfio_iommu_type1_dma_unmap unmap = { | |
197 | .argsz = sizeof(unmap), | |
198 | .flags = 0, | |
199 | .iova = iova, | |
200 | .size = size, | |
201 | }; | |
202 | ||
203 | if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { | |
204 | error_report("VFIO_UNMAP_DMA: %d\n", -errno); | |
205 | return -errno; | |
206 | } | |
207 | ||
208 | return 0; | |
209 | } | |
210 | ||
211 | static int vfio_dma_map(VFIOContainer *container, hwaddr iova, | |
212 | ram_addr_t size, void *vaddr, bool readonly) | |
213 | { | |
214 | struct vfio_iommu_type1_dma_map map = { | |
215 | .argsz = sizeof(map), | |
216 | .flags = VFIO_DMA_MAP_FLAG_READ, | |
217 | .vaddr = (__u64)(uintptr_t)vaddr, | |
218 | .iova = iova, | |
219 | .size = size, | |
220 | }; | |
221 | ||
222 | if (!readonly) { | |
223 | map.flags |= VFIO_DMA_MAP_FLAG_WRITE; | |
224 | } | |
225 | ||
226 | /* | |
227 | * Try the mapping, if it fails with EBUSY, unmap the region and try | |
228 | * again. This shouldn't be necessary, but we sometimes see it in | |
229 | * the the VGA ROM space. | |
230 | */ | |
231 | if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || | |
232 | (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 && | |
233 | ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { | |
234 | return 0; | |
235 | } | |
236 | ||
237 | error_report("VFIO_MAP_DMA: %d\n", -errno); | |
238 | return -errno; | |
239 | } | |
240 | ||
241 | static bool vfio_listener_skipped_section(MemoryRegionSection *section) | |
242 | { | |
243 | return (!memory_region_is_ram(section->mr) && | |
244 | !memory_region_is_iommu(section->mr)) || | |
245 | /* | |
246 | * Sizing an enabled 64-bit BAR can cause spurious mappings to | |
247 | * addresses in the upper part of the 64-bit address space. These | |
248 | * are never accessed by the CPU and beyond the address width of | |
249 | * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. | |
250 | */ | |
251 | section->offset_within_address_space & (1ULL << 63); | |
252 | } | |
253 | ||
254 | static void vfio_iommu_map_notify(Notifier *n, void *data) | |
255 | { | |
256 | VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); | |
257 | VFIOContainer *container = giommu->container; | |
258 | IOMMUTLBEntry *iotlb = data; | |
259 | MemoryRegion *mr; | |
260 | hwaddr xlat; | |
261 | hwaddr len = iotlb->addr_mask + 1; | |
262 | void *vaddr; | |
263 | int ret; | |
264 | ||
265 | trace_vfio_iommu_map_notify(iotlb->iova, | |
266 | iotlb->iova + iotlb->addr_mask); | |
267 | ||
268 | /* | |
269 | * The IOMMU TLB entry we have just covers translation through | |
270 | * this IOMMU to its immediate target. We need to translate | |
271 | * it the rest of the way through to memory. | |
272 | */ | |
273 | mr = address_space_translate(&address_space_memory, | |
274 | iotlb->translated_addr, | |
275 | &xlat, &len, iotlb->perm & IOMMU_WO); | |
276 | if (!memory_region_is_ram(mr)) { | |
277 | error_report("iommu map to non memory area %"HWADDR_PRIx"\n", | |
278 | xlat); | |
279 | return; | |
280 | } | |
281 | /* | |
282 | * Translation truncates length to the IOMMU page size, | |
283 | * check that it did not truncate too much. | |
284 | */ | |
285 | if (len & iotlb->addr_mask) { | |
286 | error_report("iommu has granularity incompatible with target AS\n"); | |
287 | return; | |
288 | } | |
289 | ||
290 | if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { | |
291 | vaddr = memory_region_get_ram_ptr(mr) + xlat; | |
292 | ret = vfio_dma_map(container, iotlb->iova, | |
293 | iotlb->addr_mask + 1, vaddr, | |
294 | !(iotlb->perm & IOMMU_WO) || mr->readonly); | |
295 | if (ret) { | |
296 | error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " | |
297 | "0x%"HWADDR_PRIx", %p) = %d (%m)", | |
298 | container, iotlb->iova, | |
299 | iotlb->addr_mask + 1, vaddr, ret); | |
300 | } | |
301 | } else { | |
302 | ret = vfio_dma_unmap(container, iotlb->iova, iotlb->addr_mask + 1); | |
303 | if (ret) { | |
304 | error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " | |
305 | "0x%"HWADDR_PRIx") = %d (%m)", | |
306 | container, iotlb->iova, | |
307 | iotlb->addr_mask + 1, ret); | |
308 | } | |
309 | } | |
310 | } | |
311 | ||
312 | static void vfio_listener_region_add(MemoryListener *listener, | |
313 | MemoryRegionSection *section) | |
314 | { | |
315 | VFIOContainer *container = container_of(listener, VFIOContainer, | |
316 | iommu_data.type1.listener); | |
317 | hwaddr iova, end; | |
318 | Int128 llend; | |
319 | void *vaddr; | |
320 | int ret; | |
321 | ||
322 | if (vfio_listener_skipped_section(section)) { | |
323 | trace_vfio_listener_region_add_skip( | |
324 | section->offset_within_address_space, | |
325 | section->offset_within_address_space + | |
326 | int128_get64(int128_sub(section->size, int128_one()))); | |
327 | return; | |
328 | } | |
329 | ||
330 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != | |
331 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { | |
332 | error_report("%s received unaligned region", __func__); | |
333 | return; | |
334 | } | |
335 | ||
336 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); | |
337 | llend = int128_make64(section->offset_within_address_space); | |
338 | llend = int128_add(llend, section->size); | |
339 | llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); | |
340 | ||
341 | if (int128_ge(int128_make64(iova), llend)) { | |
342 | return; | |
343 | } | |
344 | ||
345 | memory_region_ref(section->mr); | |
346 | ||
347 | if (memory_region_is_iommu(section->mr)) { | |
348 | VFIOGuestIOMMU *giommu; | |
349 | ||
350 | trace_vfio_listener_region_add_iommu(iova, | |
351 | int128_get64(int128_sub(llend, int128_one()))); | |
352 | /* | |
353 | * FIXME: We should do some checking to see if the | |
354 | * capabilities of the host VFIO IOMMU are adequate to model | |
355 | * the guest IOMMU | |
356 | * | |
357 | * FIXME: For VFIO iommu types which have KVM acceleration to | |
358 | * avoid bouncing all map/unmaps through qemu this way, this | |
359 | * would be the right place to wire that up (tell the KVM | |
360 | * device emulation the VFIO iommu handles to use). | |
361 | */ | |
362 | /* | |
363 | * This assumes that the guest IOMMU is empty of | |
364 | * mappings at this point. | |
365 | * | |
366 | * One way of doing this is: | |
367 | * 1. Avoid sharing IOMMUs between emulated devices or different | |
368 | * IOMMU groups. | |
369 | * 2. Implement VFIO_IOMMU_ENABLE in the host kernel to fail if | |
370 | * there are some mappings in IOMMU. | |
371 | * | |
372 | * VFIO on SPAPR does that. Other IOMMU models may do that different, | |
373 | * they must make sure there are no existing mappings or | |
374 | * loop through existing mappings to map them into VFIO. | |
375 | */ | |
376 | giommu = g_malloc0(sizeof(*giommu)); | |
377 | giommu->iommu = section->mr; | |
378 | giommu->container = container; | |
379 | giommu->n.notify = vfio_iommu_map_notify; | |
380 | QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); | |
381 | memory_region_register_iommu_notifier(giommu->iommu, &giommu->n); | |
382 | ||
383 | return; | |
384 | } | |
385 | ||
386 | /* Here we assume that memory_region_is_ram(section->mr)==true */ | |
387 | ||
388 | end = int128_get64(llend); | |
389 | vaddr = memory_region_get_ram_ptr(section->mr) + | |
390 | section->offset_within_region + | |
391 | (iova - section->offset_within_address_space); | |
392 | ||
393 | trace_vfio_listener_region_add_ram(iova, end - 1, vaddr); | |
394 | ||
395 | ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly); | |
396 | if (ret) { | |
397 | error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " | |
398 | "0x%"HWADDR_PRIx", %p) = %d (%m)", | |
399 | container, iova, end - iova, vaddr, ret); | |
400 | ||
401 | /* | |
402 | * On the initfn path, store the first error in the container so we | |
403 | * can gracefully fail. Runtime, there's not much we can do other | |
404 | * than throw a hardware error. | |
405 | */ | |
406 | if (!container->iommu_data.type1.initialized) { | |
407 | if (!container->iommu_data.type1.error) { | |
408 | container->iommu_data.type1.error = ret; | |
409 | } | |
410 | } else { | |
411 | hw_error("vfio: DMA mapping failed, unable to continue"); | |
412 | } | |
413 | } | |
414 | } | |
415 | ||
416 | static void vfio_listener_region_del(MemoryListener *listener, | |
417 | MemoryRegionSection *section) | |
418 | { | |
419 | VFIOContainer *container = container_of(listener, VFIOContainer, | |
420 | iommu_data.type1.listener); | |
421 | hwaddr iova, end; | |
422 | int ret; | |
423 | ||
424 | if (vfio_listener_skipped_section(section)) { | |
425 | trace_vfio_listener_region_del_skip( | |
426 | section->offset_within_address_space, | |
427 | section->offset_within_address_space + | |
428 | int128_get64(int128_sub(section->size, int128_one()))); | |
429 | return; | |
430 | } | |
431 | ||
432 | if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != | |
433 | (section->offset_within_region & ~TARGET_PAGE_MASK))) { | |
434 | error_report("%s received unaligned region", __func__); | |
435 | return; | |
436 | } | |
437 | ||
438 | if (memory_region_is_iommu(section->mr)) { | |
439 | VFIOGuestIOMMU *giommu; | |
440 | ||
441 | QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { | |
442 | if (giommu->iommu == section->mr) { | |
443 | memory_region_unregister_iommu_notifier(&giommu->n); | |
444 | QLIST_REMOVE(giommu, giommu_next); | |
445 | g_free(giommu); | |
446 | break; | |
447 | } | |
448 | } | |
449 | ||
450 | /* | |
451 | * FIXME: We assume the one big unmap below is adequate to | |
452 | * remove any individual page mappings in the IOMMU which | |
453 | * might have been copied into VFIO. This works for a page table | |
454 | * based IOMMU where a big unmap flattens a large range of IO-PTEs. | |
455 | * That may not be true for all IOMMU types. | |
456 | */ | |
457 | } | |
458 | ||
459 | iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); | |
460 | end = (section->offset_within_address_space + int128_get64(section->size)) & | |
461 | TARGET_PAGE_MASK; | |
462 | ||
463 | if (iova >= end) { | |
464 | return; | |
465 | } | |
466 | ||
467 | trace_vfio_listener_region_del(iova, end - 1); | |
468 | ||
469 | ret = vfio_dma_unmap(container, iova, end - iova); | |
470 | memory_region_unref(section->mr); | |
471 | if (ret) { | |
472 | error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " | |
473 | "0x%"HWADDR_PRIx") = %d (%m)", | |
474 | container, iova, end - iova, ret); | |
475 | } | |
476 | } | |
477 | ||
51b833f4 | 478 | static const MemoryListener vfio_memory_listener = { |
e2c7d025 EA |
479 | .region_add = vfio_listener_region_add, |
480 | .region_del = vfio_listener_region_del, | |
481 | }; | |
482 | ||
51b833f4 | 483 | static void vfio_listener_release(VFIOContainer *container) |
e2c7d025 EA |
484 | { |
485 | memory_listener_unregister(&container->iommu_data.type1.listener); | |
486 | } | |
487 | ||
488 | int vfio_mmap_region(Object *obj, VFIORegion *region, | |
489 | MemoryRegion *mem, MemoryRegion *submem, | |
490 | void **map, size_t size, off_t offset, | |
491 | const char *name) | |
492 | { | |
493 | int ret = 0; | |
494 | VFIODevice *vbasedev = region->vbasedev; | |
495 | ||
496 | if (VFIO_ALLOW_MMAP && size && region->flags & | |
497 | VFIO_REGION_INFO_FLAG_MMAP) { | |
498 | int prot = 0; | |
499 | ||
500 | if (region->flags & VFIO_REGION_INFO_FLAG_READ) { | |
501 | prot |= PROT_READ; | |
502 | } | |
503 | ||
504 | if (region->flags & VFIO_REGION_INFO_FLAG_WRITE) { | |
505 | prot |= PROT_WRITE; | |
506 | } | |
507 | ||
508 | *map = mmap(NULL, size, prot, MAP_SHARED, | |
509 | vbasedev->fd, | |
510 | region->fd_offset + offset); | |
511 | if (*map == MAP_FAILED) { | |
512 | *map = NULL; | |
513 | ret = -errno; | |
514 | goto empty_region; | |
515 | } | |
516 | ||
517 | memory_region_init_ram_ptr(submem, obj, name, size, *map); | |
518 | memory_region_set_skip_dump(submem); | |
519 | } else { | |
520 | empty_region: | |
521 | /* Create a zero sized sub-region to make cleanup easy. */ | |
522 | memory_region_init(submem, obj, name, 0); | |
523 | } | |
524 | ||
525 | memory_region_add_subregion(mem, offset, submem); | |
526 | ||
527 | return ret; | |
528 | } | |
529 | ||
530 | void vfio_reset_handler(void *opaque) | |
531 | { | |
532 | VFIOGroup *group; | |
533 | VFIODevice *vbasedev; | |
534 | ||
535 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
536 | QLIST_FOREACH(vbasedev, &group->device_list, next) { | |
537 | vbasedev->ops->vfio_compute_needs_reset(vbasedev); | |
538 | } | |
539 | } | |
540 | ||
541 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
542 | QLIST_FOREACH(vbasedev, &group->device_list, next) { | |
543 | if (vbasedev->needs_reset) { | |
544 | vbasedev->ops->vfio_hot_reset_multi(vbasedev); | |
545 | } | |
546 | } | |
547 | } | |
548 | } | |
549 | ||
550 | static void vfio_kvm_device_add_group(VFIOGroup *group) | |
551 | { | |
552 | #ifdef CONFIG_KVM | |
553 | struct kvm_device_attr attr = { | |
554 | .group = KVM_DEV_VFIO_GROUP, | |
555 | .attr = KVM_DEV_VFIO_GROUP_ADD, | |
556 | .addr = (uint64_t)(unsigned long)&group->fd, | |
557 | }; | |
558 | ||
559 | if (!kvm_enabled()) { | |
560 | return; | |
561 | } | |
562 | ||
563 | if (vfio_kvm_device_fd < 0) { | |
564 | struct kvm_create_device cd = { | |
565 | .type = KVM_DEV_TYPE_VFIO, | |
566 | }; | |
567 | ||
568 | if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { | |
dcbfc5ce | 569 | error_report("Failed to create KVM VFIO device: %m\n"); |
e2c7d025 EA |
570 | return; |
571 | } | |
572 | ||
573 | vfio_kvm_device_fd = cd.fd; | |
574 | } | |
575 | ||
576 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { | |
577 | error_report("Failed to add group %d to KVM VFIO device: %m", | |
578 | group->groupid); | |
579 | } | |
580 | #endif | |
581 | } | |
582 | ||
583 | static void vfio_kvm_device_del_group(VFIOGroup *group) | |
584 | { | |
585 | #ifdef CONFIG_KVM | |
586 | struct kvm_device_attr attr = { | |
587 | .group = KVM_DEV_VFIO_GROUP, | |
588 | .attr = KVM_DEV_VFIO_GROUP_DEL, | |
589 | .addr = (uint64_t)(unsigned long)&group->fd, | |
590 | }; | |
591 | ||
592 | if (vfio_kvm_device_fd < 0) { | |
593 | return; | |
594 | } | |
595 | ||
596 | if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { | |
597 | error_report("Failed to remove group %d from KVM VFIO device: %m", | |
598 | group->groupid); | |
599 | } | |
600 | #endif | |
601 | } | |
602 | ||
603 | static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) | |
604 | { | |
605 | VFIOAddressSpace *space; | |
606 | ||
607 | QLIST_FOREACH(space, &vfio_address_spaces, list) { | |
608 | if (space->as == as) { | |
609 | return space; | |
610 | } | |
611 | } | |
612 | ||
613 | /* No suitable VFIOAddressSpace, create a new one */ | |
614 | space = g_malloc0(sizeof(*space)); | |
615 | space->as = as; | |
616 | QLIST_INIT(&space->containers); | |
617 | ||
618 | QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); | |
619 | ||
620 | return space; | |
621 | } | |
622 | ||
623 | static void vfio_put_address_space(VFIOAddressSpace *space) | |
624 | { | |
625 | if (QLIST_EMPTY(&space->containers)) { | |
626 | QLIST_REMOVE(space, list); | |
627 | g_free(space); | |
628 | } | |
629 | } | |
630 | ||
631 | static int vfio_connect_container(VFIOGroup *group, AddressSpace *as) | |
632 | { | |
633 | VFIOContainer *container; | |
634 | int ret, fd; | |
635 | VFIOAddressSpace *space; | |
636 | ||
637 | space = vfio_get_address_space(as); | |
638 | ||
639 | QLIST_FOREACH(container, &space->containers, next) { | |
640 | if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { | |
641 | group->container = container; | |
642 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); | |
643 | return 0; | |
644 | } | |
645 | } | |
646 | ||
647 | fd = qemu_open("/dev/vfio/vfio", O_RDWR); | |
648 | if (fd < 0) { | |
649 | error_report("vfio: failed to open /dev/vfio/vfio: %m"); | |
650 | ret = -errno; | |
651 | goto put_space_exit; | |
652 | } | |
653 | ||
654 | ret = ioctl(fd, VFIO_GET_API_VERSION); | |
655 | if (ret != VFIO_API_VERSION) { | |
656 | error_report("vfio: supported vfio version: %d, " | |
657 | "reported version: %d", VFIO_API_VERSION, ret); | |
658 | ret = -EINVAL; | |
659 | goto close_fd_exit; | |
660 | } | |
661 | ||
662 | container = g_malloc0(sizeof(*container)); | |
663 | container->space = space; | |
664 | container->fd = fd; | |
2e6e697e AW |
665 | if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) || |
666 | ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) { | |
667 | bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU); | |
668 | ||
e2c7d025 EA |
669 | ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); |
670 | if (ret) { | |
671 | error_report("vfio: failed to set group container: %m"); | |
672 | ret = -errno; | |
673 | goto free_container_exit; | |
674 | } | |
675 | ||
2e6e697e AW |
676 | ret = ioctl(fd, VFIO_SET_IOMMU, |
677 | v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU); | |
e2c7d025 EA |
678 | if (ret) { |
679 | error_report("vfio: failed to set iommu for container: %m"); | |
680 | ret = -errno; | |
681 | goto free_container_exit; | |
682 | } | |
683 | ||
684 | container->iommu_data.type1.listener = vfio_memory_listener; | |
685 | container->iommu_data.release = vfio_listener_release; | |
686 | ||
687 | memory_listener_register(&container->iommu_data.type1.listener, | |
688 | container->space->as); | |
689 | ||
690 | if (container->iommu_data.type1.error) { | |
691 | ret = container->iommu_data.type1.error; | |
692 | error_report("vfio: memory listener initialization failed for container"); | |
693 | goto listener_release_exit; | |
694 | } | |
695 | ||
696 | container->iommu_data.type1.initialized = true; | |
697 | ||
698 | } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) { | |
699 | ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); | |
700 | if (ret) { | |
701 | error_report("vfio: failed to set group container: %m"); | |
702 | ret = -errno; | |
703 | goto free_container_exit; | |
704 | } | |
705 | ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU); | |
706 | if (ret) { | |
707 | error_report("vfio: failed to set iommu for container: %m"); | |
708 | ret = -errno; | |
709 | goto free_container_exit; | |
710 | } | |
711 | ||
712 | /* | |
713 | * The host kernel code implementing VFIO_IOMMU_DISABLE is called | |
714 | * when container fd is closed so we do not call it explicitly | |
715 | * in this file. | |
716 | */ | |
717 | ret = ioctl(fd, VFIO_IOMMU_ENABLE); | |
718 | if (ret) { | |
719 | error_report("vfio: failed to enable container: %m"); | |
720 | ret = -errno; | |
721 | goto free_container_exit; | |
722 | } | |
723 | ||
724 | container->iommu_data.type1.listener = vfio_memory_listener; | |
725 | container->iommu_data.release = vfio_listener_release; | |
726 | ||
727 | memory_listener_register(&container->iommu_data.type1.listener, | |
728 | container->space->as); | |
729 | ||
730 | } else { | |
731 | error_report("vfio: No available IOMMU models"); | |
732 | ret = -EINVAL; | |
733 | goto free_container_exit; | |
734 | } | |
735 | ||
736 | QLIST_INIT(&container->group_list); | |
737 | QLIST_INSERT_HEAD(&space->containers, container, next); | |
738 | ||
739 | group->container = container; | |
740 | QLIST_INSERT_HEAD(&container->group_list, group, container_next); | |
741 | ||
742 | return 0; | |
743 | listener_release_exit: | |
744 | vfio_listener_release(container); | |
745 | ||
746 | free_container_exit: | |
747 | g_free(container); | |
748 | ||
749 | close_fd_exit: | |
750 | close(fd); | |
751 | ||
752 | put_space_exit: | |
753 | vfio_put_address_space(space); | |
754 | ||
755 | return ret; | |
756 | } | |
757 | ||
758 | static void vfio_disconnect_container(VFIOGroup *group) | |
759 | { | |
760 | VFIOContainer *container = group->container; | |
761 | ||
762 | if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { | |
763 | error_report("vfio: error disconnecting group %d from container", | |
764 | group->groupid); | |
765 | } | |
766 | ||
767 | QLIST_REMOVE(group, container_next); | |
768 | group->container = NULL; | |
769 | ||
770 | if (QLIST_EMPTY(&container->group_list)) { | |
771 | VFIOAddressSpace *space = container->space; | |
772 | ||
773 | if (container->iommu_data.release) { | |
774 | container->iommu_data.release(container); | |
775 | } | |
776 | QLIST_REMOVE(container, next); | |
777 | trace_vfio_disconnect_container(container->fd); | |
778 | close(container->fd); | |
779 | g_free(container); | |
780 | ||
781 | vfio_put_address_space(space); | |
782 | } | |
783 | } | |
784 | ||
785 | VFIOGroup *vfio_get_group(int groupid, AddressSpace *as) | |
786 | { | |
787 | VFIOGroup *group; | |
788 | char path[32]; | |
789 | struct vfio_group_status status = { .argsz = sizeof(status) }; | |
790 | ||
791 | QLIST_FOREACH(group, &vfio_group_list, next) { | |
792 | if (group->groupid == groupid) { | |
793 | /* Found it. Now is it already in the right context? */ | |
794 | if (group->container->space->as == as) { | |
795 | return group; | |
796 | } else { | |
797 | error_report("vfio: group %d used in multiple address spaces", | |
798 | group->groupid); | |
799 | return NULL; | |
800 | } | |
801 | } | |
802 | } | |
803 | ||
804 | group = g_malloc0(sizeof(*group)); | |
805 | ||
806 | snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); | |
807 | group->fd = qemu_open(path, O_RDWR); | |
808 | if (group->fd < 0) { | |
809 | error_report("vfio: error opening %s: %m", path); | |
810 | goto free_group_exit; | |
811 | } | |
812 | ||
813 | if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { | |
814 | error_report("vfio: error getting group status: %m"); | |
815 | goto close_fd_exit; | |
816 | } | |
817 | ||
818 | if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { | |
819 | error_report("vfio: error, group %d is not viable, please ensure " | |
820 | "all devices within the iommu_group are bound to their " | |
821 | "vfio bus driver.", groupid); | |
822 | goto close_fd_exit; | |
823 | } | |
824 | ||
825 | group->groupid = groupid; | |
826 | QLIST_INIT(&group->device_list); | |
827 | ||
828 | if (vfio_connect_container(group, as)) { | |
829 | error_report("vfio: failed to setup container for group %d", groupid); | |
830 | goto close_fd_exit; | |
831 | } | |
832 | ||
833 | if (QLIST_EMPTY(&vfio_group_list)) { | |
834 | qemu_register_reset(vfio_reset_handler, NULL); | |
835 | } | |
836 | ||
837 | QLIST_INSERT_HEAD(&vfio_group_list, group, next); | |
838 | ||
839 | vfio_kvm_device_add_group(group); | |
840 | ||
841 | return group; | |
842 | ||
843 | close_fd_exit: | |
844 | close(group->fd); | |
845 | ||
846 | free_group_exit: | |
847 | g_free(group); | |
848 | ||
849 | return NULL; | |
850 | } | |
851 | ||
852 | void vfio_put_group(VFIOGroup *group) | |
853 | { | |
77a10d04 | 854 | if (!group || !QLIST_EMPTY(&group->device_list)) { |
e2c7d025 EA |
855 | return; |
856 | } | |
857 | ||
858 | vfio_kvm_device_del_group(group); | |
859 | vfio_disconnect_container(group); | |
860 | QLIST_REMOVE(group, next); | |
861 | trace_vfio_put_group(group->fd); | |
862 | close(group->fd); | |
863 | g_free(group); | |
864 | ||
865 | if (QLIST_EMPTY(&vfio_group_list)) { | |
866 | qemu_unregister_reset(vfio_reset_handler, NULL); | |
867 | } | |
868 | } | |
869 | ||
870 | int vfio_get_device(VFIOGroup *group, const char *name, | |
871 | VFIODevice *vbasedev) | |
872 | { | |
873 | struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; | |
217e9fdc | 874 | int ret, fd; |
e2c7d025 | 875 | |
217e9fdc PB |
876 | fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); |
877 | if (fd < 0) { | |
e2c7d025 EA |
878 | error_report("vfio: error getting device %s from group %d: %m", |
879 | name, group->groupid); | |
880 | error_printf("Verify all devices in group %d are bound to vfio-<bus> " | |
881 | "or pci-stub and not already in use\n", group->groupid); | |
217e9fdc | 882 | return fd; |
e2c7d025 EA |
883 | } |
884 | ||
217e9fdc | 885 | ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info); |
e2c7d025 EA |
886 | if (ret) { |
887 | error_report("vfio: error getting device info: %m"); | |
217e9fdc PB |
888 | close(fd); |
889 | return ret; | |
e2c7d025 EA |
890 | } |
891 | ||
217e9fdc PB |
892 | vbasedev->fd = fd; |
893 | vbasedev->group = group; | |
894 | QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); | |
895 | ||
e2c7d025 EA |
896 | vbasedev->num_irqs = dev_info.num_irqs; |
897 | vbasedev->num_regions = dev_info.num_regions; | |
898 | vbasedev->flags = dev_info.flags; | |
899 | ||
900 | trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions, | |
901 | dev_info.num_irqs); | |
902 | ||
903 | vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); | |
217e9fdc | 904 | return 0; |
e2c7d025 EA |
905 | } |
906 | ||
907 | void vfio_put_base_device(VFIODevice *vbasedev) | |
908 | { | |
77a10d04 PB |
909 | if (!vbasedev->group) { |
910 | return; | |
911 | } | |
e2c7d025 EA |
912 | QLIST_REMOVE(vbasedev, next); |
913 | vbasedev->group = NULL; | |
914 | trace_vfio_put_base_device(vbasedev->fd); | |
915 | close(vbasedev->fd); | |
916 | } | |
917 | ||
918 | static int vfio_container_do_ioctl(AddressSpace *as, int32_t groupid, | |
919 | int req, void *param) | |
920 | { | |
921 | VFIOGroup *group; | |
922 | VFIOContainer *container; | |
923 | int ret = -1; | |
924 | ||
925 | group = vfio_get_group(groupid, as); | |
926 | if (!group) { | |
927 | error_report("vfio: group %d not registered", groupid); | |
928 | return ret; | |
929 | } | |
930 | ||
931 | container = group->container; | |
932 | if (group->container) { | |
933 | ret = ioctl(container->fd, req, param); | |
934 | if (ret < 0) { | |
46f770d4 AK |
935 | error_report("vfio: failed to ioctl %d to container: ret=%d, %s", |
936 | _IOC_NR(req) - VFIO_BASE, ret, strerror(errno)); | |
e2c7d025 EA |
937 | } |
938 | } | |
939 | ||
940 | vfio_put_group(group); | |
941 | ||
942 | return ret; | |
943 | } | |
944 | ||
945 | int vfio_container_ioctl(AddressSpace *as, int32_t groupid, | |
946 | int req, void *param) | |
947 | { | |
948 | /* We allow only certain ioctls to the container */ | |
949 | switch (req) { | |
950 | case VFIO_CHECK_EXTENSION: | |
951 | case VFIO_IOMMU_SPAPR_TCE_GET_INFO: | |
952 | break; | |
953 | default: | |
954 | /* Return an error on unknown requests */ | |
955 | error_report("vfio: unsupported ioctl %X", req); | |
956 | return -1; | |
957 | } | |
958 | ||
959 | return vfio_container_do_ioctl(as, groupid, req, param); | |
960 | } |