]> git.proxmox.com Git - mirror_qemu.git/blame - hw/vfio/common.c
Merge remote-tracking branch 'remotes/kraxel/tags/usb-20170717-pull-request' into...
[mirror_qemu.git] / hw / vfio / common.c
CommitLineData
e2c7d025
EA
1/*
2 * generic functions used by VFIO devices
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
c6eacb1a 21#include "qemu/osdep.h"
e2c7d025 22#include <sys/ioctl.h>
a9c94277
MA
23#ifdef CONFIG_KVM
24#include <linux/kvm.h>
25#endif
e2c7d025
EA
26#include <linux/vfio.h>
27
28#include "hw/vfio/vfio-common.h"
29#include "hw/vfio/vfio.h"
30#include "exec/address-spaces.h"
31#include "exec/memory.h"
32#include "hw/hw.h"
33#include "qemu/error-report.h"
f4ec5e26 34#include "qemu/range.h"
e2c7d025
EA
35#include "sysemu/kvm.h"
36#include "trace.h"
01905f58 37#include "qapi/error.h"
e2c7d025
EA
38
39struct vfio_group_head vfio_group_list =
39cb514f 40 QLIST_HEAD_INITIALIZER(vfio_group_list);
e2c7d025
EA
41struct vfio_as_head vfio_address_spaces =
42 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
43
44#ifdef CONFIG_KVM
45/*
46 * We have a single VFIO pseudo device per KVM VM. Once created it lives
47 * for the life of the VM. Closing the file descriptor only drops our
48 * reference to it and the device's reference to kvm. Therefore once
49 * initialized, this file descriptor is only released on QEMU exit and
50 * we'll re-use it should another vfio device be attached before then.
51 */
52static int vfio_kvm_device_fd = -1;
53#endif
54
55/*
56 * Common VFIO interrupt disable
57 */
58void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
59{
60 struct vfio_irq_set irq_set = {
61 .argsz = sizeof(irq_set),
62 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
63 .index = index,
64 .start = 0,
65 .count = 0,
66 };
67
68 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
69}
70
71void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
72{
73 struct vfio_irq_set irq_set = {
74 .argsz = sizeof(irq_set),
75 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
76 .index = index,
77 .start = 0,
78 .count = 1,
79 };
80
81 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
82}
83
84void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
85{
86 struct vfio_irq_set irq_set = {
87 .argsz = sizeof(irq_set),
88 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
89 .index = index,
90 .start = 0,
91 .count = 1,
92 };
93
94 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
95}
96
97/*
98 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
99 */
100void vfio_region_write(void *opaque, hwaddr addr,
101 uint64_t data, unsigned size)
102{
103 VFIORegion *region = opaque;
104 VFIODevice *vbasedev = region->vbasedev;
105 union {
106 uint8_t byte;
107 uint16_t word;
108 uint32_t dword;
109 uint64_t qword;
110 } buf;
111
112 switch (size) {
113 case 1:
114 buf.byte = data;
115 break;
116 case 2:
117 buf.word = cpu_to_le16(data);
118 break;
119 case 4:
120 buf.dword = cpu_to_le32(data);
121 break;
38d49e8c
JRZ
122 case 8:
123 buf.qword = cpu_to_le64(data);
124 break;
e2c7d025
EA
125 default:
126 hw_error("vfio: unsupported write size, %d bytes", size);
127 break;
128 }
129
130 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
131 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
132 ",%d) failed: %m",
133 __func__, vbasedev->name, region->nr,
134 addr, data, size);
135 }
136
137 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
138
139 /*
140 * A read or write to a BAR always signals an INTx EOI. This will
141 * do nothing if not pending (including not in INTx mode). We assume
142 * that a BAR access is in response to an interrupt and that BAR
143 * accesses will service the interrupt. Unfortunately, we don't know
144 * which access will service the interrupt, so we're potentially
145 * getting quite a few host interrupts per guest interrupt.
146 */
147 vbasedev->ops->vfio_eoi(vbasedev);
148}
149
150uint64_t vfio_region_read(void *opaque,
151 hwaddr addr, unsigned size)
152{
153 VFIORegion *region = opaque;
154 VFIODevice *vbasedev = region->vbasedev;
155 union {
156 uint8_t byte;
157 uint16_t word;
158 uint32_t dword;
159 uint64_t qword;
160 } buf;
161 uint64_t data = 0;
162
163 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
164 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
165 __func__, vbasedev->name, region->nr,
166 addr, size);
167 return (uint64_t)-1;
168 }
169 switch (size) {
170 case 1:
171 data = buf.byte;
172 break;
173 case 2:
174 data = le16_to_cpu(buf.word);
175 break;
176 case 4:
177 data = le32_to_cpu(buf.dword);
178 break;
38d49e8c
JRZ
179 case 8:
180 data = le64_to_cpu(buf.qword);
181 break;
e2c7d025
EA
182 default:
183 hw_error("vfio: unsupported read size, %d bytes", size);
184 break;
185 }
186
187 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
188
189 /* Same as write above */
190 vbasedev->ops->vfio_eoi(vbasedev);
191
192 return data;
193}
194
195const MemoryRegionOps vfio_region_ops = {
196 .read = vfio_region_read,
197 .write = vfio_region_write,
198 .endianness = DEVICE_LITTLE_ENDIAN,
15126cba
JRZ
199 .valid = {
200 .min_access_size = 1,
201 .max_access_size = 8,
202 },
38d49e8c
JRZ
203 .impl = {
204 .min_access_size = 1,
205 .max_access_size = 8,
206 },
e2c7d025
EA
207};
208
209/*
210 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
211 */
212static int vfio_dma_unmap(VFIOContainer *container,
213 hwaddr iova, ram_addr_t size)
214{
215 struct vfio_iommu_type1_dma_unmap unmap = {
216 .argsz = sizeof(unmap),
217 .flags = 0,
218 .iova = iova,
219 .size = size,
220 };
221
222 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
78e5b17f 223 error_report("VFIO_UNMAP_DMA: %d", -errno);
e2c7d025
EA
224 return -errno;
225 }
226
227 return 0;
228}
229
230static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
231 ram_addr_t size, void *vaddr, bool readonly)
232{
233 struct vfio_iommu_type1_dma_map map = {
234 .argsz = sizeof(map),
235 .flags = VFIO_DMA_MAP_FLAG_READ,
236 .vaddr = (__u64)(uintptr_t)vaddr,
237 .iova = iova,
238 .size = size,
239 };
240
241 if (!readonly) {
242 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
243 }
244
245 /*
246 * Try the mapping, if it fails with EBUSY, unmap the region and try
247 * again. This shouldn't be necessary, but we sometimes see it in
b6af0975 248 * the VGA ROM space.
e2c7d025
EA
249 */
250 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
251 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
252 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
253 return 0;
254 }
255
78e5b17f 256 error_report("VFIO_MAP_DMA: %d", -errno);
e2c7d025
EA
257 return -errno;
258}
259
f4ec5e26
AK
260static void vfio_host_win_add(VFIOContainer *container,
261 hwaddr min_iova, hwaddr max_iova,
262 uint64_t iova_pgsizes)
263{
264 VFIOHostDMAWindow *hostwin;
265
266 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
267 if (ranges_overlap(hostwin->min_iova,
268 hostwin->max_iova - hostwin->min_iova + 1,
269 min_iova,
270 max_iova - min_iova + 1)) {
271 hw_error("%s: Overlapped IOMMU are not enabled", __func__);
272 }
273 }
274
275 hostwin = g_malloc0(sizeof(*hostwin));
276
277 hostwin->min_iova = min_iova;
278 hostwin->max_iova = max_iova;
279 hostwin->iova_pgsizes = iova_pgsizes;
280 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
281}
282
2e4109de
AK
283static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
284 hwaddr max_iova)
285{
286 VFIOHostDMAWindow *hostwin;
287
288 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
289 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
290 QLIST_REMOVE(hostwin, hostwin_next);
291 return 0;
292 }
293 }
294
295 return -1;
296}
297
e2c7d025
EA
298static bool vfio_listener_skipped_section(MemoryRegionSection *section)
299{
300 return (!memory_region_is_ram(section->mr) &&
301 !memory_region_is_iommu(section->mr)) ||
302 /*
303 * Sizing an enabled 64-bit BAR can cause spurious mappings to
304 * addresses in the upper part of the 64-bit address space. These
305 * are never accessed by the CPU and beyond the address width of
306 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
307 */
308 section->offset_within_address_space & (1ULL << 63);
309}
310
4a4b88fb
PX
311/* Called with rcu_read_lock held. */
312static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
313 bool *read_only)
e2c7d025 314{
e2c7d025
EA
315 MemoryRegion *mr;
316 hwaddr xlat;
317 hwaddr len = iotlb->addr_mask + 1;
4a4b88fb 318 bool writable = iotlb->perm & IOMMU_WO;
f1f93650 319
e2c7d025
EA
320 /*
321 * The IOMMU TLB entry we have just covers translation through
322 * this IOMMU to its immediate target. We need to translate
323 * it the rest of the way through to memory.
324 */
325 mr = address_space_translate(&address_space_memory,
326 iotlb->translated_addr,
4a4b88fb 327 &xlat, &len, writable);
e2c7d025 328 if (!memory_region_is_ram(mr)) {
78e5b17f 329 error_report("iommu map to non memory area %"HWADDR_PRIx"",
e2c7d025 330 xlat);
4a4b88fb 331 return false;
e2c7d025 332 }
4a4b88fb 333
e2c7d025
EA
334 /*
335 * Translation truncates length to the IOMMU page size,
336 * check that it did not truncate too much.
337 */
338 if (len & iotlb->addr_mask) {
78e5b17f 339 error_report("iommu has granularity incompatible with target AS");
4a4b88fb
PX
340 return false;
341 }
342
343 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
344 *read_only = !writable || mr->readonly;
345
346 return true;
347}
348
349static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
350{
351 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
352 VFIOContainer *container = giommu->container;
353 hwaddr iova = iotlb->iova + giommu->iommu_offset;
354 bool read_only;
355 void *vaddr;
356 int ret;
357
358 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
359 iova, iova + iotlb->addr_mask);
360
361 if (iotlb->target_as != &address_space_memory) {
362 error_report("Wrong target AS \"%s\", only system memory is allowed",
363 iotlb->target_as->name ? iotlb->target_as->name : "none");
364 return;
365 }
366
367 rcu_read_lock();
368
e2c7d025 369 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
dfbd90e5
PX
370 if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) {
371 goto out;
372 }
4a4b88fb
PX
373 /*
374 * vaddr is only valid until rcu_read_unlock(). But after
375 * vfio_dma_map has set up the mapping the pages will be
376 * pinned by the kernel. This makes sure that the RAM backend
377 * of vaddr will always be there, even if the memory object is
378 * destroyed and its backing memory munmap-ed.
379 */
d78c19b5 380 ret = vfio_dma_map(container, iova,
e2c7d025 381 iotlb->addr_mask + 1, vaddr,
4a4b88fb 382 read_only);
e2c7d025
EA
383 if (ret) {
384 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
385 "0x%"HWADDR_PRIx", %p) = %d (%m)",
d78c19b5 386 container, iova,
e2c7d025
EA
387 iotlb->addr_mask + 1, vaddr, ret);
388 }
389 } else {
d78c19b5 390 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1);
e2c7d025
EA
391 if (ret) {
392 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
393 "0x%"HWADDR_PRIx") = %d (%m)",
d78c19b5 394 container, iova,
e2c7d025
EA
395 iotlb->addr_mask + 1, ret);
396 }
397 }
41063e1e
PB
398out:
399 rcu_read_unlock();
e2c7d025
EA
400}
401
402static void vfio_listener_region_add(MemoryListener *listener,
403 MemoryRegionSection *section)
404{
ee0bf0e5 405 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
e2c7d025 406 hwaddr iova, end;
55efcc53 407 Int128 llend, llsize;
e2c7d025
EA
408 void *vaddr;
409 int ret;
f4ec5e26
AK
410 VFIOHostDMAWindow *hostwin;
411 bool hostwin_found;
e2c7d025
EA
412
413 if (vfio_listener_skipped_section(section)) {
414 trace_vfio_listener_region_add_skip(
415 section->offset_within_address_space,
416 section->offset_within_address_space +
417 int128_get64(int128_sub(section->size, int128_one())));
418 return;
419 }
420
421 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
422 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
423 error_report("%s received unaligned region", __func__);
424 return;
425 }
426
427 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
428 llend = int128_make64(section->offset_within_address_space);
429 llend = int128_add(llend, section->size);
430 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
431
432 if (int128_ge(int128_make64(iova), llend)) {
433 return;
434 }
55efcc53 435 end = int128_get64(int128_sub(llend, int128_one()));
3898aad3 436
2e4109de
AK
437 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
438 VFIOHostDMAWindow *hostwin;
439 hwaddr pgsize = 0;
440
441 /* For now intersections are not allowed, we may relax this later */
442 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
443 if (ranges_overlap(hostwin->min_iova,
444 hostwin->max_iova - hostwin->min_iova + 1,
445 section->offset_within_address_space,
446 int128_get64(section->size))) {
447 ret = -1;
448 goto fail;
449 }
450 }
451
452 ret = vfio_spapr_create_window(container, section, &pgsize);
453 if (ret) {
454 goto fail;
455 }
456
457 vfio_host_win_add(container, section->offset_within_address_space,
458 section->offset_within_address_space +
459 int128_get64(section->size) - 1, pgsize);
460 }
461
f4ec5e26
AK
462 hostwin_found = false;
463 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
464 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
465 hostwin_found = true;
466 break;
467 }
468 }
469
470 if (!hostwin_found) {
3898aad3
DG
471 error_report("vfio: IOMMU container %p can't map guest IOVA region"
472 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
55efcc53 473 container, iova, end);
3898aad3
DG
474 ret = -EFAULT;
475 goto fail;
476 }
e2c7d025
EA
477
478 memory_region_ref(section->mr);
479
480 if (memory_region_is_iommu(section->mr)) {
481 VFIOGuestIOMMU *giommu;
3df9d748 482 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
e2c7d025 483
55efcc53 484 trace_vfio_listener_region_add_iommu(iova, end);
e2c7d025 485 /*
e2c7d025
EA
486 * FIXME: For VFIO iommu types which have KVM acceleration to
487 * avoid bouncing all map/unmaps through qemu this way, this
488 * would be the right place to wire that up (tell the KVM
489 * device emulation the VFIO iommu handles to use).
490 */
e2c7d025 491 giommu = g_malloc0(sizeof(*giommu));
3df9d748 492 giommu->iommu = iommu_mr;
d78c19b5
AK
493 giommu->iommu_offset = section->offset_within_address_space -
494 section->offset_within_region;
e2c7d025 495 giommu->container = container;
698feb5e
PX
496 llend = int128_add(int128_make64(section->offset_within_region),
497 section->size);
498 llend = int128_sub(llend, int128_one());
499 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
500 IOMMU_NOTIFIER_ALL,
501 section->offset_within_region,
502 int128_get64(llend));
e2c7d025 503 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
508ce5eb 504
3df9d748 505 memory_region_register_iommu_notifier(section->mr, &giommu->n);
ad523590 506 memory_region_iommu_replay(giommu->iommu, &giommu->n);
e2c7d025
EA
507
508 return;
509 }
510
511 /* Here we assume that memory_region_is_ram(section->mr)==true */
512
e2c7d025
EA
513 vaddr = memory_region_get_ram_ptr(section->mr) +
514 section->offset_within_region +
515 (iova - section->offset_within_address_space);
516
55efcc53 517 trace_vfio_listener_region_add_ram(iova, end, vaddr);
e2c7d025 518
55efcc53
BD
519 llsize = int128_sub(llend, int128_make64(iova));
520
521 ret = vfio_dma_map(container, iova, int128_get64(llsize),
522 vaddr, section->readonly);
e2c7d025
EA
523 if (ret) {
524 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
525 "0x%"HWADDR_PRIx", %p) = %d (%m)",
55efcc53 526 container, iova, int128_get64(llsize), vaddr, ret);
ac6dc389
DG
527 goto fail;
528 }
e2c7d025 529
ac6dc389
DG
530 return;
531
532fail:
533 /*
534 * On the initfn path, store the first error in the container so we
535 * can gracefully fail. Runtime, there's not much we can do other
536 * than throw a hardware error.
537 */
538 if (!container->initialized) {
539 if (!container->error) {
540 container->error = ret;
e2c7d025 541 }
ac6dc389
DG
542 } else {
543 hw_error("vfio: DMA mapping failed, unable to continue");
e2c7d025
EA
544 }
545}
546
547static void vfio_listener_region_del(MemoryListener *listener,
548 MemoryRegionSection *section)
549{
ee0bf0e5 550 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
e2c7d025 551 hwaddr iova, end;
7a057b4f 552 Int128 llend, llsize;
e2c7d025
EA
553 int ret;
554
555 if (vfio_listener_skipped_section(section)) {
556 trace_vfio_listener_region_del_skip(
557 section->offset_within_address_space,
558 section->offset_within_address_space +
559 int128_get64(int128_sub(section->size, int128_one())));
560 return;
561 }
562
563 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
564 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
565 error_report("%s received unaligned region", __func__);
566 return;
567 }
568
569 if (memory_region_is_iommu(section->mr)) {
570 VFIOGuestIOMMU *giommu;
571
572 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
3df9d748 573 if (MEMORY_REGION(giommu->iommu) == section->mr &&
698feb5e 574 giommu->n.start == section->offset_within_region) {
3df9d748 575 memory_region_unregister_iommu_notifier(section->mr,
d22d8956 576 &giommu->n);
e2c7d025
EA
577 QLIST_REMOVE(giommu, giommu_next);
578 g_free(giommu);
579 break;
580 }
581 }
582
583 /*
584 * FIXME: We assume the one big unmap below is adequate to
585 * remove any individual page mappings in the IOMMU which
586 * might have been copied into VFIO. This works for a page table
587 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
588 * That may not be true for all IOMMU types.
589 */
590 }
591
592 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
7a057b4f
AK
593 llend = int128_make64(section->offset_within_address_space);
594 llend = int128_add(llend, section->size);
595 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
e2c7d025 596
7a057b4f 597 if (int128_ge(int128_make64(iova), llend)) {
e2c7d025
EA
598 return;
599 }
7a057b4f
AK
600 end = int128_get64(int128_sub(llend, int128_one()));
601
602 llsize = int128_sub(llend, int128_make64(iova));
e2c7d025 603
7a057b4f 604 trace_vfio_listener_region_del(iova, end);
e2c7d025 605
7a057b4f 606 ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
e2c7d025
EA
607 memory_region_unref(section->mr);
608 if (ret) {
609 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
610 "0x%"HWADDR_PRIx") = %d (%m)",
7a057b4f 611 container, iova, int128_get64(llsize), ret);
e2c7d025 612 }
2e4109de
AK
613
614 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
615 vfio_spapr_remove_window(container,
616 section->offset_within_address_space);
617 if (vfio_host_win_del(container,
618 section->offset_within_address_space,
619 section->offset_within_address_space +
620 int128_get64(section->size) - 1) < 0) {
621 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
622 __func__, section->offset_within_address_space);
623 }
624 }
e2c7d025
EA
625}
626
51b833f4 627static const MemoryListener vfio_memory_listener = {
e2c7d025
EA
628 .region_add = vfio_listener_region_add,
629 .region_del = vfio_listener_region_del,
630};
631
51b833f4 632static void vfio_listener_release(VFIOContainer *container)
e2c7d025 633{
ee0bf0e5 634 memory_listener_unregister(&container->listener);
318f67ce
AK
635 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
636 memory_listener_unregister(&container->prereg_listener);
637 }
e2c7d025
EA
638}
639
b53b0f69
AW
640static struct vfio_info_cap_header *
641vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
642{
643 struct vfio_info_cap_header *hdr;
644 void *ptr = info;
645
646 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
647 return NULL;
648 }
649
650 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
651 if (hdr->id == id) {
652 return hdr;
653 }
654 }
655
656 return NULL;
657}
658
24acf72b
AW
659static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
660 struct vfio_region_info *info)
b53b0f69
AW
661{
662 struct vfio_info_cap_header *hdr;
663 struct vfio_region_info_cap_sparse_mmap *sparse;
24acf72b 664 int i, j;
b53b0f69
AW
665
666 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
667 if (!hdr) {
24acf72b 668 return -ENODEV;
b53b0f69
AW
669 }
670
671 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
672
673 trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
674 region->nr, sparse->nr_areas);
675
24acf72b
AW
676 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
677
678 for (i = 0, j = 0; i < sparse->nr_areas; i++) {
679 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
680 sparse->areas[i].offset +
681 sparse->areas[i].size);
b53b0f69 682
24acf72b
AW
683 if (sparse->areas[i].size) {
684 region->mmaps[j].offset = sparse->areas[i].offset;
685 region->mmaps[j].size = sparse->areas[i].size;
686 j++;
687 }
b53b0f69 688 }
24acf72b
AW
689
690 region->nr_mmaps = j;
691 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
692
693 return 0;
b53b0f69
AW
694}
695
db0da029
AW
696int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
697 int index, const char *name)
e2c7d025 698{
db0da029
AW
699 struct vfio_region_info *info;
700 int ret;
701
702 ret = vfio_get_region_info(vbasedev, index, &info);
703 if (ret) {
704 return ret;
705 }
706
707 region->vbasedev = vbasedev;
708 region->flags = info->flags;
709 region->size = info->size;
710 region->fd_offset = info->offset;
711 region->nr = index;
712
713 if (region->size) {
714 region->mem = g_new0(MemoryRegion, 1);
715 memory_region_init_io(region->mem, obj, &vfio_region_ops,
716 region, name, region->size);
e2c7d025 717
db0da029 718 if (!vbasedev->no_mmap &&
95251725 719 region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
e2c7d025 720
24acf72b 721 ret = vfio_setup_region_sparse_mmaps(region, info);
db0da029 722
24acf72b 723 if (ret) {
b53b0f69
AW
724 region->nr_mmaps = 1;
725 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
726 region->mmaps[0].offset = 0;
727 region->mmaps[0].size = region->size;
728 }
e2c7d025 729 }
db0da029
AW
730 }
731
732 g_free(info);
733
734 trace_vfio_region_setup(vbasedev->name, index, name,
735 region->flags, region->fd_offset, region->size);
736 return 0;
737}
e2c7d025 738
db0da029
AW
739int vfio_region_mmap(VFIORegion *region)
740{
741 int i, prot = 0;
742 char *name;
743
744 if (!region->mem) {
745 return 0;
746 }
747
748 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
749 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
750
751 for (i = 0; i < region->nr_mmaps; i++) {
752 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
753 MAP_SHARED, region->vbasedev->fd,
754 region->fd_offset +
755 region->mmaps[i].offset);
756 if (region->mmaps[i].mmap == MAP_FAILED) {
757 int ret = -errno;
758
759 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
760 region->fd_offset +
761 region->mmaps[i].offset,
762 region->fd_offset +
763 region->mmaps[i].offset +
764 region->mmaps[i].size - 1, ret);
765
766 region->mmaps[i].mmap = NULL;
767
768 for (i--; i >= 0; i--) {
769 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
770 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
771 object_unparent(OBJECT(&region->mmaps[i].mem));
772 region->mmaps[i].mmap = NULL;
773 }
774
775 return ret;
e2c7d025
EA
776 }
777
db0da029
AW
778 name = g_strdup_printf("%s mmaps[%d]",
779 memory_region_name(region->mem), i);
21e00fa5
AW
780 memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
781 memory_region_owner(region->mem),
782 name, region->mmaps[i].size,
783 region->mmaps[i].mmap);
db0da029 784 g_free(name);
db0da029
AW
785 memory_region_add_subregion(region->mem, region->mmaps[i].offset,
786 &region->mmaps[i].mem);
787
788 trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
789 region->mmaps[i].offset,
790 region->mmaps[i].offset +
791 region->mmaps[i].size - 1);
792 }
793
794 return 0;
795}
796
797void vfio_region_exit(VFIORegion *region)
798{
799 int i;
800
801 if (!region->mem) {
802 return;
803 }
804
805 for (i = 0; i < region->nr_mmaps; i++) {
806 if (region->mmaps[i].mmap) {
807 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
e2c7d025 808 }
db0da029 809 }
e2c7d025 810
db0da029
AW
811 trace_vfio_region_exit(region->vbasedev->name, region->nr);
812}
813
814void vfio_region_finalize(VFIORegion *region)
815{
816 int i;
817
818 if (!region->mem) {
819 return;
e2c7d025
EA
820 }
821
db0da029
AW
822 for (i = 0; i < region->nr_mmaps; i++) {
823 if (region->mmaps[i].mmap) {
824 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
825 object_unparent(OBJECT(&region->mmaps[i].mem));
826 }
827 }
828
829 object_unparent(OBJECT(region->mem));
830
831 g_free(region->mem);
832 g_free(region->mmaps);
833
834 trace_vfio_region_finalize(region->vbasedev->name, region->nr);
835}
836
837void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
838{
839 int i;
840
841 if (!region->mem) {
842 return;
843 }
844
845 for (i = 0; i < region->nr_mmaps; i++) {
846 if (region->mmaps[i].mmap) {
847 memory_region_set_enabled(&region->mmaps[i].mem, enabled);
848 }
849 }
e2c7d025 850
db0da029
AW
851 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
852 enabled);
e2c7d025
EA
853}
854
855void vfio_reset_handler(void *opaque)
856{
857 VFIOGroup *group;
858 VFIODevice *vbasedev;
859
860 QLIST_FOREACH(group, &vfio_group_list, next) {
861 QLIST_FOREACH(vbasedev, &group->device_list, next) {
7da624e2
AW
862 if (vbasedev->dev->realized) {
863 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
864 }
e2c7d025
EA
865 }
866 }
867
868 QLIST_FOREACH(group, &vfio_group_list, next) {
869 QLIST_FOREACH(vbasedev, &group->device_list, next) {
7da624e2 870 if (vbasedev->dev->realized && vbasedev->needs_reset) {
e2c7d025
EA
871 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
872 }
873 }
874 }
875}
876
877static void vfio_kvm_device_add_group(VFIOGroup *group)
878{
879#ifdef CONFIG_KVM
880 struct kvm_device_attr attr = {
881 .group = KVM_DEV_VFIO_GROUP,
882 .attr = KVM_DEV_VFIO_GROUP_ADD,
883 .addr = (uint64_t)(unsigned long)&group->fd,
884 };
885
886 if (!kvm_enabled()) {
887 return;
888 }
889
890 if (vfio_kvm_device_fd < 0) {
891 struct kvm_create_device cd = {
892 .type = KVM_DEV_TYPE_VFIO,
893 };
894
895 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
78e5b17f 896 error_report("Failed to create KVM VFIO device: %m");
e2c7d025
EA
897 return;
898 }
899
900 vfio_kvm_device_fd = cd.fd;
901 }
902
903 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
904 error_report("Failed to add group %d to KVM VFIO device: %m",
905 group->groupid);
906 }
907#endif
908}
909
910static void vfio_kvm_device_del_group(VFIOGroup *group)
911{
912#ifdef CONFIG_KVM
913 struct kvm_device_attr attr = {
914 .group = KVM_DEV_VFIO_GROUP,
915 .attr = KVM_DEV_VFIO_GROUP_DEL,
916 .addr = (uint64_t)(unsigned long)&group->fd,
917 };
918
919 if (vfio_kvm_device_fd < 0) {
920 return;
921 }
922
923 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
924 error_report("Failed to remove group %d from KVM VFIO device: %m",
925 group->groupid);
926 }
927#endif
928}
929
930static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
931{
932 VFIOAddressSpace *space;
933
934 QLIST_FOREACH(space, &vfio_address_spaces, list) {
935 if (space->as == as) {
936 return space;
937 }
938 }
939
940 /* No suitable VFIOAddressSpace, create a new one */
941 space = g_malloc0(sizeof(*space));
942 space->as = as;
943 QLIST_INIT(&space->containers);
944
945 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
946
947 return space;
948}
949
950static void vfio_put_address_space(VFIOAddressSpace *space)
951{
952 if (QLIST_EMPTY(&space->containers)) {
953 QLIST_REMOVE(space, list);
954 g_free(space);
955 }
956}
957
01905f58
EA
958static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
959 Error **errp)
e2c7d025
EA
960{
961 VFIOContainer *container;
962 int ret, fd;
963 VFIOAddressSpace *space;
964
965 space = vfio_get_address_space(as);
966
967 QLIST_FOREACH(container, &space->containers, next) {
968 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
969 group->container = container;
970 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
971 return 0;
972 }
973 }
974
975 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
976 if (fd < 0) {
01905f58 977 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
e2c7d025
EA
978 ret = -errno;
979 goto put_space_exit;
980 }
981
982 ret = ioctl(fd, VFIO_GET_API_VERSION);
983 if (ret != VFIO_API_VERSION) {
01905f58
EA
984 error_setg(errp, "supported vfio version: %d, "
985 "reported version: %d", VFIO_API_VERSION, ret);
e2c7d025
EA
986 ret = -EINVAL;
987 goto close_fd_exit;
988 }
989
990 container = g_malloc0(sizeof(*container));
991 container->space = space;
992 container->fd = fd;
2e6e697e
AW
993 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
994 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
995 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
7a140a57 996 struct vfio_iommu_type1_info info;
2e6e697e 997
e2c7d025
EA
998 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
999 if (ret) {
01905f58 1000 error_setg_errno(errp, errno, "failed to set group container");
e2c7d025
EA
1001 ret = -errno;
1002 goto free_container_exit;
1003 }
1004
318f67ce
AK
1005 container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU;
1006 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
e2c7d025 1007 if (ret) {
01905f58 1008 error_setg_errno(errp, errno, "failed to set iommu for container");
e2c7d025
EA
1009 ret = -errno;
1010 goto free_container_exit;
1011 }
3898aad3
DG
1012
1013 /*
1014 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
1015 * IOVA whatsoever. That's not actually true, but the current
1016 * kernel interface doesn't tell us what it can map, and the
1017 * existing Type1 IOMMUs generally support any IOVA we're
1018 * going to actually try in practice.
1019 */
7a140a57
DG
1020 info.argsz = sizeof(info);
1021 ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
1022 /* Ignore errors */
f4ec5e26
AK
1023 if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
1024 /* Assume 4k IOVA page size */
1025 info.iova_pgsizes = 4096;
7a140a57 1026 }
f4ec5e26 1027 vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
318f67ce
AK
1028 } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
1029 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
3898aad3 1030 struct vfio_iommu_spapr_tce_info info;
318f67ce 1031 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU);
3898aad3 1032
e2c7d025
EA
1033 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
1034 if (ret) {
01905f58 1035 error_setg_errno(errp, errno, "failed to set group container");
e2c7d025
EA
1036 ret = -errno;
1037 goto free_container_exit;
1038 }
318f67ce
AK
1039 container->iommu_type =
1040 v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU;
1041 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
e2c7d025 1042 if (ret) {
01905f58 1043 error_setg_errno(errp, errno, "failed to set iommu for container");
e2c7d025
EA
1044 ret = -errno;
1045 goto free_container_exit;
1046 }
1047
1048 /*
1049 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1050 * when container fd is closed so we do not call it explicitly
1051 * in this file.
1052 */
318f67ce
AK
1053 if (!v2) {
1054 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
1055 if (ret) {
01905f58 1056 error_setg_errno(errp, errno, "failed to enable container");
318f67ce
AK
1057 ret = -errno;
1058 goto free_container_exit;
1059 }
1060 } else {
1061 container->prereg_listener = vfio_prereg_listener;
1062
1063 memory_listener_register(&container->prereg_listener,
1064 &address_space_memory);
1065 if (container->error) {
1066 memory_listener_unregister(&container->prereg_listener);
01905f58
EA
1067 ret = container->error;
1068 error_setg(errp,
1069 "RAM memory listener initialization failed for container");
318f67ce
AK
1070 goto free_container_exit;
1071 }
e2c7d025 1072 }
3898aad3 1073
3898aad3
DG
1074 info.argsz = sizeof(info);
1075 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1076 if (ret) {
01905f58
EA
1077 error_setg_errno(errp, errno,
1078 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
3898aad3 1079 ret = -errno;
318f67ce
AK
1080 if (v2) {
1081 memory_listener_unregister(&container->prereg_listener);
1082 }
3898aad3
DG
1083 goto free_container_exit;
1084 }
7a140a57 1085
2e4109de
AK
1086 if (v2) {
1087 /*
1088 * There is a default window in just created container.
1089 * To make region_add/del simpler, we better remove this
1090 * window now and let those iommu_listener callbacks
1091 * create/remove them when needed.
1092 */
1093 ret = vfio_spapr_remove_window(container, info.dma32_window_start);
1094 if (ret) {
01905f58
EA
1095 error_setg_errno(errp, -ret,
1096 "failed to remove existing window");
2e4109de
AK
1097 goto free_container_exit;
1098 }
1099 } else {
1100 /* The default table uses 4K pages */
1101 vfio_host_win_add(container, info.dma32_window_start,
1102 info.dma32_window_start +
1103 info.dma32_window_size - 1,
1104 0x1000);
1105 }
e2c7d025 1106 } else {
01905f58 1107 error_setg(errp, "No available IOMMU models");
e2c7d025
EA
1108 ret = -EINVAL;
1109 goto free_container_exit;
1110 }
1111
ee0bf0e5
DG
1112 container->listener = vfio_memory_listener;
1113
1114 memory_listener_register(&container->listener, container->space->as);
1115
1116 if (container->error) {
1117 ret = container->error;
01905f58
EA
1118 error_setg_errno(errp, -ret,
1119 "memory listener initialization failed for container");
ee0bf0e5
DG
1120 goto listener_release_exit;
1121 }
1122
1123 container->initialized = true;
1124
e2c7d025
EA
1125 QLIST_INIT(&container->group_list);
1126 QLIST_INSERT_HEAD(&space->containers, container, next);
1127
1128 group->container = container;
1129 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1130
1131 return 0;
1132listener_release_exit:
1133 vfio_listener_release(container);
1134
1135free_container_exit:
1136 g_free(container);
1137
1138close_fd_exit:
1139 close(fd);
1140
1141put_space_exit:
1142 vfio_put_address_space(space);
1143
1144 return ret;
1145}
1146
1147static void vfio_disconnect_container(VFIOGroup *group)
1148{
1149 VFIOContainer *container = group->container;
1150
1151 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
1152 error_report("vfio: error disconnecting group %d from container",
1153 group->groupid);
1154 }
1155
1156 QLIST_REMOVE(group, container_next);
1157 group->container = NULL;
1158
1159 if (QLIST_EMPTY(&container->group_list)) {
1160 VFIOAddressSpace *space = container->space;
f8d8a944 1161 VFIOGuestIOMMU *giommu, *tmp;
e2c7d025 1162
ee0bf0e5 1163 vfio_listener_release(container);
e2c7d025 1164 QLIST_REMOVE(container, next);
f8d8a944
AK
1165
1166 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
3df9d748
AK
1167 memory_region_unregister_iommu_notifier(
1168 MEMORY_REGION(giommu->iommu), &giommu->n);
f8d8a944
AK
1169 QLIST_REMOVE(giommu, giommu_next);
1170 g_free(giommu);
1171 }
1172
e2c7d025
EA
1173 trace_vfio_disconnect_container(container->fd);
1174 close(container->fd);
1175 g_free(container);
1176
1177 vfio_put_address_space(space);
1178 }
1179}
1180
1b808d5b 1181VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
e2c7d025
EA
1182{
1183 VFIOGroup *group;
1184 char path[32];
1185 struct vfio_group_status status = { .argsz = sizeof(status) };
1186
1187 QLIST_FOREACH(group, &vfio_group_list, next) {
1188 if (group->groupid == groupid) {
1189 /* Found it. Now is it already in the right context? */
1190 if (group->container->space->as == as) {
1191 return group;
1192 } else {
1b808d5b
EA
1193 error_setg(errp, "group %d used in multiple address spaces",
1194 group->groupid);
e2c7d025
EA
1195 return NULL;
1196 }
1197 }
1198 }
1199
1200 group = g_malloc0(sizeof(*group));
1201
1202 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1203 group->fd = qemu_open(path, O_RDWR);
1204 if (group->fd < 0) {
1b808d5b 1205 error_setg_errno(errp, errno, "failed to open %s", path);
e2c7d025
EA
1206 goto free_group_exit;
1207 }
1208
1209 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1b808d5b 1210 error_setg_errno(errp, errno, "failed to get group %d status", groupid);
e2c7d025
EA
1211 goto close_fd_exit;
1212 }
1213
1214 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1b808d5b
EA
1215 error_setg(errp, "group %d is not viable", groupid);
1216 error_append_hint(errp,
1217 "Please ensure all devices within the iommu_group "
1218 "are bound to their vfio bus driver.\n");
e2c7d025
EA
1219 goto close_fd_exit;
1220 }
1221
1222 group->groupid = groupid;
1223 QLIST_INIT(&group->device_list);
1224
1b808d5b
EA
1225 if (vfio_connect_container(group, as, errp)) {
1226 error_prepend(errp, "failed to setup container for group %d: ",
1227 groupid);
e2c7d025
EA
1228 goto close_fd_exit;
1229 }
1230
1231 if (QLIST_EMPTY(&vfio_group_list)) {
1232 qemu_register_reset(vfio_reset_handler, NULL);
1233 }
1234
1235 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1236
1237 vfio_kvm_device_add_group(group);
1238
1239 return group;
1240
1241close_fd_exit:
1242 close(group->fd);
1243
1244free_group_exit:
1245 g_free(group);
1246
1247 return NULL;
1248}
1249
1250void vfio_put_group(VFIOGroup *group)
1251{
77a10d04 1252 if (!group || !QLIST_EMPTY(&group->device_list)) {
e2c7d025
EA
1253 return;
1254 }
1255
1256 vfio_kvm_device_del_group(group);
1257 vfio_disconnect_container(group);
1258 QLIST_REMOVE(group, next);
1259 trace_vfio_put_group(group->fd);
1260 close(group->fd);
1261 g_free(group);
1262
1263 if (QLIST_EMPTY(&vfio_group_list)) {
1264 qemu_unregister_reset(vfio_reset_handler, NULL);
1265 }
1266}
1267
1268int vfio_get_device(VFIOGroup *group, const char *name,
59f7d674 1269 VFIODevice *vbasedev, Error **errp)
e2c7d025
EA
1270{
1271 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
217e9fdc 1272 int ret, fd;
e2c7d025 1273
217e9fdc
PB
1274 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1275 if (fd < 0) {
59f7d674
EA
1276 error_setg_errno(errp, errno, "error getting device from group %d",
1277 group->groupid);
1278 error_append_hint(errp,
1279 "Verify all devices in group %d are bound to vfio-<bus> "
1280 "or pci-stub and not already in use\n", group->groupid);
217e9fdc 1281 return fd;
e2c7d025
EA
1282 }
1283
217e9fdc 1284 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
e2c7d025 1285 if (ret) {
59f7d674 1286 error_setg_errno(errp, errno, "error getting device info");
217e9fdc
PB
1287 close(fd);
1288 return ret;
e2c7d025
EA
1289 }
1290
217e9fdc
PB
1291 vbasedev->fd = fd;
1292 vbasedev->group = group;
1293 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1294
e2c7d025
EA
1295 vbasedev->num_irqs = dev_info.num_irqs;
1296 vbasedev->num_regions = dev_info.num_regions;
1297 vbasedev->flags = dev_info.flags;
1298
1299 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1300 dev_info.num_irqs);
1301
1302 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
217e9fdc 1303 return 0;
e2c7d025
EA
1304}
1305
1306void vfio_put_base_device(VFIODevice *vbasedev)
1307{
77a10d04
PB
1308 if (!vbasedev->group) {
1309 return;
1310 }
e2c7d025
EA
1311 QLIST_REMOVE(vbasedev, next);
1312 vbasedev->group = NULL;
1313 trace_vfio_put_base_device(vbasedev->fd);
1314 close(vbasedev->fd);
1315}
1316
46900226
AW
1317int vfio_get_region_info(VFIODevice *vbasedev, int index,
1318 struct vfio_region_info **info)
1319{
1320 size_t argsz = sizeof(struct vfio_region_info);
1321
1322 *info = g_malloc0(argsz);
1323
1324 (*info)->index = index;
b53b0f69 1325retry:
46900226
AW
1326 (*info)->argsz = argsz;
1327
1328 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1329 g_free(*info);
e61a424f 1330 *info = NULL;
46900226
AW
1331 return -errno;
1332 }
1333
b53b0f69
AW
1334 if ((*info)->argsz > argsz) {
1335 argsz = (*info)->argsz;
1336 *info = g_realloc(*info, argsz);
1337
1338 goto retry;
1339 }
1340
46900226
AW
1341 return 0;
1342}
1343
e61a424f
AW
1344int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
1345 uint32_t subtype, struct vfio_region_info **info)
1346{
1347 int i;
1348
1349 for (i = 0; i < vbasedev->num_regions; i++) {
1350 struct vfio_info_cap_header *hdr;
1351 struct vfio_region_info_cap_type *cap_type;
1352
1353 if (vfio_get_region_info(vbasedev, i, info)) {
1354 continue;
1355 }
1356
1357 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
1358 if (!hdr) {
1359 g_free(*info);
1360 continue;
1361 }
1362
1363 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
1364
1365 trace_vfio_get_dev_region(vbasedev->name, i,
1366 cap_type->type, cap_type->subtype);
1367
1368 if (cap_type->type == type && cap_type->subtype == subtype) {
1369 return 0;
1370 }
1371
1372 g_free(*info);
1373 }
1374
1375 *info = NULL;
1376 return -ENODEV;
1377}
1378
3153119e
DG
1379/*
1380 * Interfaces for IBM EEH (Enhanced Error Handling)
1381 */
1382static bool vfio_eeh_container_ok(VFIOContainer *container)
1383{
1384 /*
1385 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1386 * implementation is broken if there are multiple groups in a
1387 * container. The hardware works in units of Partitionable
1388 * Endpoints (== IOMMU groups) and the EEH operations naively
1389 * iterate across all groups in the container, without any logic
1390 * to make sure the groups have their state synchronized. For
1391 * certain operations (ENABLE) that might be ok, until an error
1392 * occurs, but for others (GET_STATE) it's clearly broken.
1393 */
1394
1395 /*
1396 * XXX Once fixed kernels exist, test for them here
1397 */
1398
1399 if (QLIST_EMPTY(&container->group_list)) {
1400 return false;
1401 }
1402
1403 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1404 return false;
1405 }
1406
1407 return true;
1408}
1409
1410static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1411{
1412 struct vfio_eeh_pe_op pe_op = {
1413 .argsz = sizeof(pe_op),
1414 .op = op,
1415 };
1416 int ret;
1417
1418 if (!vfio_eeh_container_ok(container)) {
1419 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1420 "kernel requires a container with exactly one group", op);
1421 return -EPERM;
1422 }
1423
1424 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1425 if (ret < 0) {
1426 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1427 return -errno;
1428 }
1429
d917e88d 1430 return ret;
3153119e
DG
1431}
1432
1433static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1434{
1435 VFIOAddressSpace *space = vfio_get_address_space(as);
1436 VFIOContainer *container = NULL;
1437
1438 if (QLIST_EMPTY(&space->containers)) {
1439 /* No containers to act on */
1440 goto out;
1441 }
1442
1443 container = QLIST_FIRST(&space->containers);
1444
1445 if (QLIST_NEXT(container, next)) {
1446 /* We don't yet have logic to synchronize EEH state across
1447 * multiple containers */
1448 container = NULL;
1449 goto out;
1450 }
1451
1452out:
1453 vfio_put_address_space(space);
1454 return container;
1455}
1456
1457bool vfio_eeh_as_ok(AddressSpace *as)
1458{
1459 VFIOContainer *container = vfio_eeh_as_container(as);
1460
1461 return (container != NULL) && vfio_eeh_container_ok(container);
1462}
1463
1464int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1465{
1466 VFIOContainer *container = vfio_eeh_as_container(as);
1467
1468 if (!container) {
1469 return -ENODEV;
1470 }
1471 return vfio_eeh_container_op(container, op);
1472}