2 * VFIO: IOMMU DMA mapping support for Type1 IOMMU
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
15 * We arbitrarily define a Type1 IOMMU as one matching the below code.
16 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
17 * VT-d, but that makes it harder to re-use as theoretically anyone
18 * implementing a similar IOMMU could make use of this. We expect the
19 * IOMMU to support the IOMMU API and have few to no restrictions around
20 * the IOVA range that can be mapped. The Type1 IOMMU is currently
21 * optimized for relatively static mappings of a userspace process with
22 * userpsace pages pinned into memory. We also assume devices and IOMMU
23 * domains are PCI based as the IOMMU API is still centered around a
24 * device/bus interface rather than a group interface.
27 #include <linux/compat.h>
28 #include <linux/device.h>
30 #include <linux/iommu.h>
31 #include <linux/module.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched/signal.h>
35 #include <linux/sched/mm.h>
36 #include <linux/slab.h>
37 #include <linux/uaccess.h>
38 #include <linux/vfio.h>
39 #include <linux/workqueue.h>
40 #include <linux/mdev.h>
41 #include <linux/notifier.h>
42 #include <linux/dma-iommu.h>
43 #include <linux/irqdomain.h>
45 #define DRIVER_VERSION "0.2"
46 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
47 #define DRIVER_DESC "Type1 IOMMU driver for VFIO"
49 static bool allow_unsafe_interrupts
;
50 module_param_named(allow_unsafe_interrupts
,
51 allow_unsafe_interrupts
, bool, S_IRUGO
| S_IWUSR
);
52 MODULE_PARM_DESC(allow_unsafe_interrupts
,
53 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
55 static bool disable_hugepages
;
56 module_param_named(disable_hugepages
,
57 disable_hugepages
, bool, S_IRUGO
| S_IWUSR
);
58 MODULE_PARM_DESC(disable_hugepages
,
59 "Disable VFIO IOMMU support for IOMMU hugepages.");
61 static unsigned int dma_entry_limit __read_mostly
= U16_MAX
;
62 module_param_named(dma_entry_limit
, dma_entry_limit
, uint
, 0644);
63 MODULE_PARM_DESC(dma_entry_limit
,
64 "Maximum number of user DMA mappings per container (65535).");
67 struct list_head domain_list
;
68 struct vfio_domain
*external_domain
; /* domain for external user */
70 struct rb_root dma_list
;
71 struct blocking_notifier_head notifier
;
72 unsigned int dma_avail
;
78 struct iommu_domain
*domain
;
79 struct list_head next
;
80 struct list_head group_list
;
81 int prot
; /* IOMMU_CACHE */
82 bool fgsp
; /* Fine-grained super pages */
87 dma_addr_t iova
; /* Device address */
88 unsigned long vaddr
; /* Process virtual addr */
89 size_t size
; /* Map size (bytes) */
90 int prot
; /* IOMMU_READ/WRITE */
92 bool lock_cap
; /* capable(CAP_IPC_LOCK) */
93 struct task_struct
*task
;
94 struct rb_root pfn_list
; /* Ex-user pinned pfn list */
98 struct iommu_group
*iommu_group
;
99 struct list_head next
;
100 bool mdev_group
; /* An mdev group */
104 * Guest RAM pinning working set or DMA target
108 dma_addr_t iova
; /* Device address */
109 unsigned long pfn
; /* Host pfn */
113 struct vfio_regions
{
114 struct list_head list
;
120 #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
121 (!list_empty(&iommu->domain_list))
123 static int put_pfn(unsigned long pfn
, int prot
);
126 * This code handles mapping and unmapping of user data buffers
127 * into DMA'ble space using the IOMMU
130 static struct vfio_dma
*vfio_find_dma(struct vfio_iommu
*iommu
,
131 dma_addr_t start
, size_t size
)
133 struct rb_node
*node
= iommu
->dma_list
.rb_node
;
136 struct vfio_dma
*dma
= rb_entry(node
, struct vfio_dma
, node
);
138 if (start
+ size
<= dma
->iova
)
139 node
= node
->rb_left
;
140 else if (start
>= dma
->iova
+ dma
->size
)
141 node
= node
->rb_right
;
149 static void vfio_link_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*new)
151 struct rb_node
**link
= &iommu
->dma_list
.rb_node
, *parent
= NULL
;
152 struct vfio_dma
*dma
;
156 dma
= rb_entry(parent
, struct vfio_dma
, node
);
158 if (new->iova
+ new->size
<= dma
->iova
)
159 link
= &(*link
)->rb_left
;
161 link
= &(*link
)->rb_right
;
164 rb_link_node(&new->node
, parent
, link
);
165 rb_insert_color(&new->node
, &iommu
->dma_list
);
168 static void vfio_unlink_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*old
)
170 rb_erase(&old
->node
, &iommu
->dma_list
);
174 * Helper Functions for host iova-pfn list
176 static struct vfio_pfn
*vfio_find_vpfn(struct vfio_dma
*dma
, dma_addr_t iova
)
178 struct vfio_pfn
*vpfn
;
179 struct rb_node
*node
= dma
->pfn_list
.rb_node
;
182 vpfn
= rb_entry(node
, struct vfio_pfn
, node
);
184 if (iova
< vpfn
->iova
)
185 node
= node
->rb_left
;
186 else if (iova
> vpfn
->iova
)
187 node
= node
->rb_right
;
194 static void vfio_link_pfn(struct vfio_dma
*dma
,
195 struct vfio_pfn
*new)
197 struct rb_node
**link
, *parent
= NULL
;
198 struct vfio_pfn
*vpfn
;
200 link
= &dma
->pfn_list
.rb_node
;
203 vpfn
= rb_entry(parent
, struct vfio_pfn
, node
);
205 if (new->iova
< vpfn
->iova
)
206 link
= &(*link
)->rb_left
;
208 link
= &(*link
)->rb_right
;
211 rb_link_node(&new->node
, parent
, link
);
212 rb_insert_color(&new->node
, &dma
->pfn_list
);
215 static void vfio_unlink_pfn(struct vfio_dma
*dma
, struct vfio_pfn
*old
)
217 rb_erase(&old
->node
, &dma
->pfn_list
);
220 static int vfio_add_to_pfn_list(struct vfio_dma
*dma
, dma_addr_t iova
,
223 struct vfio_pfn
*vpfn
;
225 vpfn
= kzalloc(sizeof(*vpfn
), GFP_KERNEL
);
231 atomic_set(&vpfn
->ref_count
, 1);
232 vfio_link_pfn(dma
, vpfn
);
236 static void vfio_remove_from_pfn_list(struct vfio_dma
*dma
,
237 struct vfio_pfn
*vpfn
)
239 vfio_unlink_pfn(dma
, vpfn
);
243 static struct vfio_pfn
*vfio_iova_get_vfio_pfn(struct vfio_dma
*dma
,
246 struct vfio_pfn
*vpfn
= vfio_find_vpfn(dma
, iova
);
249 atomic_inc(&vpfn
->ref_count
);
253 static int vfio_iova_put_vfio_pfn(struct vfio_dma
*dma
, struct vfio_pfn
*vpfn
)
257 if (atomic_dec_and_test(&vpfn
->ref_count
)) {
258 ret
= put_pfn(vpfn
->pfn
, dma
->prot
);
259 vfio_remove_from_pfn_list(dma
, vpfn
);
264 static int vfio_lock_acct(struct vfio_dma
*dma
, long npage
, bool async
)
266 struct mm_struct
*mm
;
272 mm
= async
? get_task_mm(dma
->task
) : dma
->task
->mm
;
274 return -ESRCH
; /* process exited */
276 ret
= down_write_killable(&mm
->mmap_sem
);
279 if (!dma
->lock_cap
) {
282 limit
= task_rlimit(dma
->task
,
283 RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
285 if (mm
->locked_vm
+ npage
> limit
)
291 mm
->locked_vm
+= npage
;
293 up_write(&mm
->mmap_sem
);
303 * Some mappings aren't backed by a struct page, for example an mmap'd
304 * MMIO range for our own or another device. These use a different
305 * pfn conversion and shouldn't be tracked as locked pages.
307 static bool is_invalid_reserved_pfn(unsigned long pfn
)
309 if (pfn_valid(pfn
)) {
311 struct page
*tail
= pfn_to_page(pfn
);
312 struct page
*head
= compound_head(tail
);
313 reserved
= !!(PageReserved(head
));
316 * "head" is not a dangling pointer
317 * (compound_head takes care of that)
318 * but the hugepage may have been split
319 * from under us (and we may not hold a
320 * reference count on the head page so it can
321 * be reused before we run PageReferenced), so
322 * we've to check PageTail before returning
329 return PageReserved(tail
);
335 static int put_pfn(unsigned long pfn
, int prot
)
337 if (!is_invalid_reserved_pfn(pfn
)) {
338 struct page
*page
= pfn_to_page(pfn
);
339 if (prot
& IOMMU_WRITE
)
347 static int vaddr_get_pfn(struct mm_struct
*mm
, unsigned long vaddr
,
348 int prot
, unsigned long *pfn
)
350 struct page
*page
[1];
351 struct vm_area_struct
*vma
;
352 struct vm_area_struct
*vmas
[1];
353 unsigned int flags
= 0;
356 if (prot
& IOMMU_WRITE
)
359 down_read(&mm
->mmap_sem
);
360 if (mm
== current
->mm
) {
361 ret
= get_user_pages(vaddr
, 1, flags
| FOLL_LONGTERM
, page
,
364 ret
= get_user_pages_remote(NULL
, mm
, vaddr
, 1, flags
, page
,
367 * The lifetime of a vaddr_get_pfn() page pin is
368 * userspace-controlled. In the fs-dax case this could
369 * lead to indefinite stalls in filesystem operations.
370 * Disallow attempts to pin fs-dax pages via this
373 if (ret
> 0 && vma_is_fsdax(vmas
[0])) {
378 up_read(&mm
->mmap_sem
);
381 *pfn
= page_to_pfn(page
[0]);
385 down_read(&mm
->mmap_sem
);
387 vma
= find_vma_intersection(mm
, vaddr
, vaddr
+ 1);
389 if (vma
&& vma
->vm_flags
& VM_PFNMAP
) {
390 *pfn
= ((vaddr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
391 if (is_invalid_reserved_pfn(*pfn
))
395 up_read(&mm
->mmap_sem
);
400 * Attempt to pin pages. We really don't want to track all the pfns and
401 * the iommu can only map chunks of consecutive pfns anyway, so get the
402 * first page and all consecutive pages with the same locking.
404 static long vfio_pin_pages_remote(struct vfio_dma
*dma
, unsigned long vaddr
,
405 long npage
, unsigned long *pfn_base
,
408 unsigned long pfn
= 0;
409 long ret
, pinned
= 0, lock_acct
= 0;
411 dma_addr_t iova
= vaddr
- dma
->vaddr
+ dma
->iova
;
413 /* This code path is only user initiated */
417 ret
= vaddr_get_pfn(current
->mm
, vaddr
, dma
->prot
, pfn_base
);
422 rsvd
= is_invalid_reserved_pfn(*pfn_base
);
425 * Reserved pages aren't counted against the user, externally pinned
426 * pages are already counted against the user.
428 if (!rsvd
&& !vfio_find_vpfn(dma
, iova
)) {
429 if (!dma
->lock_cap
&& current
->mm
->locked_vm
+ 1 > limit
) {
430 put_pfn(*pfn_base
, dma
->prot
);
431 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__
,
432 limit
<< PAGE_SHIFT
);
438 if (unlikely(disable_hugepages
))
441 /* Lock all the consecutive pages from pfn_base */
442 for (vaddr
+= PAGE_SIZE
, iova
+= PAGE_SIZE
; pinned
< npage
;
443 pinned
++, vaddr
+= PAGE_SIZE
, iova
+= PAGE_SIZE
) {
444 ret
= vaddr_get_pfn(current
->mm
, vaddr
, dma
->prot
, &pfn
);
448 if (pfn
!= *pfn_base
+ pinned
||
449 rsvd
!= is_invalid_reserved_pfn(pfn
)) {
450 put_pfn(pfn
, dma
->prot
);
454 if (!rsvd
&& !vfio_find_vpfn(dma
, iova
)) {
455 if (!dma
->lock_cap
&&
456 current
->mm
->locked_vm
+ lock_acct
+ 1 > limit
) {
457 put_pfn(pfn
, dma
->prot
);
458 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
459 __func__
, limit
<< PAGE_SHIFT
);
468 ret
= vfio_lock_acct(dma
, lock_acct
, false);
473 for (pfn
= *pfn_base
; pinned
; pfn
++, pinned
--)
474 put_pfn(pfn
, dma
->prot
);
483 static long vfio_unpin_pages_remote(struct vfio_dma
*dma
, dma_addr_t iova
,
484 unsigned long pfn
, long npage
,
487 long unlocked
= 0, locked
= 0;
490 for (i
= 0; i
< npage
; i
++, iova
+= PAGE_SIZE
) {
491 if (put_pfn(pfn
++, dma
->prot
)) {
493 if (vfio_find_vpfn(dma
, iova
))
499 vfio_lock_acct(dma
, locked
- unlocked
, true);
504 static int vfio_pin_page_external(struct vfio_dma
*dma
, unsigned long vaddr
,
505 unsigned long *pfn_base
, bool do_accounting
)
507 struct mm_struct
*mm
;
510 mm
= get_task_mm(dma
->task
);
514 ret
= vaddr_get_pfn(mm
, vaddr
, dma
->prot
, pfn_base
);
515 if (!ret
&& do_accounting
&& !is_invalid_reserved_pfn(*pfn_base
)) {
516 ret
= vfio_lock_acct(dma
, 1, true);
518 put_pfn(*pfn_base
, dma
->prot
);
520 pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK "
521 "(%ld) exceeded\n", __func__
,
522 dma
->task
->comm
, task_pid_nr(dma
->task
),
523 task_rlimit(dma
->task
, RLIMIT_MEMLOCK
));
531 static int vfio_unpin_page_external(struct vfio_dma
*dma
, dma_addr_t iova
,
535 struct vfio_pfn
*vpfn
= vfio_find_vpfn(dma
, iova
);
540 unlocked
= vfio_iova_put_vfio_pfn(dma
, vpfn
);
543 vfio_lock_acct(dma
, -unlocked
, true);
548 static int vfio_iommu_type1_pin_pages(void *iommu_data
,
549 unsigned long *user_pfn
,
551 unsigned long *phys_pfn
)
553 struct vfio_iommu
*iommu
= iommu_data
;
555 unsigned long remote_vaddr
;
556 struct vfio_dma
*dma
;
559 if (!iommu
|| !user_pfn
|| !phys_pfn
)
562 /* Supported for v2 version only */
566 mutex_lock(&iommu
->lock
);
568 /* Fail if notifier list is empty */
569 if (!iommu
->notifier
.head
) {
575 * If iommu capable domain exist in the container then all pages are
576 * already pinned and accounted. Accouting should be done if there is no
577 * iommu capable domain in the container.
579 do_accounting
= !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu
);
581 for (i
= 0; i
< npage
; i
++) {
583 struct vfio_pfn
*vpfn
;
585 iova
= user_pfn
[i
] << PAGE_SHIFT
;
586 dma
= vfio_find_dma(iommu
, iova
, PAGE_SIZE
);
592 if ((dma
->prot
& prot
) != prot
) {
597 vpfn
= vfio_iova_get_vfio_pfn(dma
, iova
);
599 phys_pfn
[i
] = vpfn
->pfn
;
603 remote_vaddr
= dma
->vaddr
+ iova
- dma
->iova
;
604 ret
= vfio_pin_page_external(dma
, remote_vaddr
, &phys_pfn
[i
],
609 ret
= vfio_add_to_pfn_list(dma
, iova
, phys_pfn
[i
]);
611 vfio_unpin_page_external(dma
, iova
, do_accounting
);
621 for (j
= 0; j
< i
; j
++) {
624 iova
= user_pfn
[j
] << PAGE_SHIFT
;
625 dma
= vfio_find_dma(iommu
, iova
, PAGE_SIZE
);
626 vfio_unpin_page_external(dma
, iova
, do_accounting
);
630 mutex_unlock(&iommu
->lock
);
634 static int vfio_iommu_type1_unpin_pages(void *iommu_data
,
635 unsigned long *user_pfn
,
638 struct vfio_iommu
*iommu
= iommu_data
;
642 if (!iommu
|| !user_pfn
)
645 /* Supported for v2 version only */
649 mutex_lock(&iommu
->lock
);
651 do_accounting
= !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu
);
652 for (i
= 0; i
< npage
; i
++) {
653 struct vfio_dma
*dma
;
656 iova
= user_pfn
[i
] << PAGE_SHIFT
;
657 dma
= vfio_find_dma(iommu
, iova
, PAGE_SIZE
);
660 vfio_unpin_page_external(dma
, iova
, do_accounting
);
664 mutex_unlock(&iommu
->lock
);
665 return i
> npage
? npage
: (i
> 0 ? i
: -EINVAL
);
668 static long vfio_sync_unpin(struct vfio_dma
*dma
, struct vfio_domain
*domain
,
669 struct list_head
*regions
)
672 struct vfio_regions
*entry
, *next
;
674 iommu_tlb_sync(domain
->domain
);
676 list_for_each_entry_safe(entry
, next
, regions
, list
) {
677 unlocked
+= vfio_unpin_pages_remote(dma
,
679 entry
->phys
>> PAGE_SHIFT
,
680 entry
->len
>> PAGE_SHIFT
,
682 list_del(&entry
->list
);
692 * Generally, VFIO needs to unpin remote pages after each IOTLB flush.
693 * Therefore, when using IOTLB flush sync interface, VFIO need to keep track
694 * of these regions (currently using a list).
696 * This value specifies maximum number of regions for each IOTLB flush sync.
698 #define VFIO_IOMMU_TLB_SYNC_MAX 512
700 static size_t unmap_unpin_fast(struct vfio_domain
*domain
,
701 struct vfio_dma
*dma
, dma_addr_t
*iova
,
702 size_t len
, phys_addr_t phys
, long *unlocked
,
703 struct list_head
*unmapped_list
,
707 struct vfio_regions
*entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
710 unmapped
= iommu_unmap_fast(domain
->domain
, *iova
, len
);
715 iommu_tlb_range_add(domain
->domain
, *iova
, unmapped
);
718 entry
->len
= unmapped
;
719 list_add_tail(&entry
->list
, unmapped_list
);
727 * Sync if the number of fast-unmap regions hits the limit
728 * or in case of errors.
730 if (*unmapped_cnt
>= VFIO_IOMMU_TLB_SYNC_MAX
|| !unmapped
) {
731 *unlocked
+= vfio_sync_unpin(dma
, domain
,
739 static size_t unmap_unpin_slow(struct vfio_domain
*domain
,
740 struct vfio_dma
*dma
, dma_addr_t
*iova
,
741 size_t len
, phys_addr_t phys
,
744 size_t unmapped
= iommu_unmap(domain
->domain
, *iova
, len
);
747 *unlocked
+= vfio_unpin_pages_remote(dma
, *iova
,
749 unmapped
>> PAGE_SHIFT
,
757 static long vfio_unmap_unpin(struct vfio_iommu
*iommu
, struct vfio_dma
*dma
,
760 dma_addr_t iova
= dma
->iova
, end
= dma
->iova
+ dma
->size
;
761 struct vfio_domain
*domain
, *d
;
762 LIST_HEAD(unmapped_region_list
);
763 int unmapped_region_cnt
= 0;
769 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu
))
773 * We use the IOMMU to track the physical addresses, otherwise we'd
774 * need a much more complicated tracking system. Unfortunately that
775 * means we need to use one of the iommu domains to figure out the
776 * pfns to unpin. The rest need to be unmapped in advance so we have
777 * no iommu translations remaining when the pages are unpinned.
779 domain
= d
= list_first_entry(&iommu
->domain_list
,
780 struct vfio_domain
, next
);
782 list_for_each_entry_continue(d
, &iommu
->domain_list
, next
) {
783 iommu_unmap(d
->domain
, dma
->iova
, dma
->size
);
788 size_t unmapped
, len
;
789 phys_addr_t phys
, next
;
791 phys
= iommu_iova_to_phys(domain
->domain
, iova
);
792 if (WARN_ON(!phys
)) {
798 * To optimize for fewer iommu_unmap() calls, each of which
799 * may require hardware cache flushing, try to find the
800 * largest contiguous physical memory chunk to unmap.
802 for (len
= PAGE_SIZE
;
803 !domain
->fgsp
&& iova
+ len
< end
; len
+= PAGE_SIZE
) {
804 next
= iommu_iova_to_phys(domain
->domain
, iova
+ len
);
805 if (next
!= phys
+ len
)
810 * First, try to use fast unmap/unpin. In case of failure,
811 * switch to slow unmap/unpin path.
813 unmapped
= unmap_unpin_fast(domain
, dma
, &iova
, len
, phys
,
814 &unlocked
, &unmapped_region_list
,
815 &unmapped_region_cnt
);
817 unmapped
= unmap_unpin_slow(domain
, dma
, &iova
, len
,
819 if (WARN_ON(!unmapped
))
824 dma
->iommu_mapped
= false;
826 if (unmapped_region_cnt
)
827 unlocked
+= vfio_sync_unpin(dma
, domain
, &unmapped_region_list
);
830 vfio_lock_acct(dma
, -unlocked
, true);
836 static void vfio_remove_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*dma
)
838 vfio_unmap_unpin(iommu
, dma
, true);
839 vfio_unlink_dma(iommu
, dma
);
840 put_task_struct(dma
->task
);
845 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu
*iommu
)
847 struct vfio_domain
*domain
;
848 unsigned long bitmap
= ULONG_MAX
;
850 mutex_lock(&iommu
->lock
);
851 list_for_each_entry(domain
, &iommu
->domain_list
, next
)
852 bitmap
&= domain
->domain
->pgsize_bitmap
;
853 mutex_unlock(&iommu
->lock
);
856 * In case the IOMMU supports page sizes smaller than PAGE_SIZE
857 * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes.
858 * That way the user will be able to map/unmap buffers whose size/
859 * start address is aligned with PAGE_SIZE. Pinning code uses that
860 * granularity while iommu driver can use the sub-PAGE_SIZE size
863 if (bitmap
& ~PAGE_MASK
) {
871 static int vfio_dma_do_unmap(struct vfio_iommu
*iommu
,
872 struct vfio_iommu_type1_dma_unmap
*unmap
)
875 struct vfio_dma
*dma
, *dma_last
= NULL
;
877 int ret
= 0, retries
= 0;
879 mask
= ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu
))) - 1;
881 if (unmap
->iova
& mask
)
883 if (!unmap
->size
|| unmap
->size
& mask
)
885 if (unmap
->iova
+ unmap
->size
- 1 < unmap
->iova
||
886 unmap
->size
> SIZE_MAX
)
889 WARN_ON(mask
& PAGE_MASK
);
891 mutex_lock(&iommu
->lock
);
894 * vfio-iommu-type1 (v1) - User mappings were coalesced together to
895 * avoid tracking individual mappings. This means that the granularity
896 * of the original mapping was lost and the user was allowed to attempt
897 * to unmap any range. Depending on the contiguousness of physical
898 * memory and page sizes supported by the IOMMU, arbitrary unmaps may
899 * or may not have worked. We only guaranteed unmap granularity
900 * matching the original mapping; even though it was untracked here,
901 * the original mappings are reflected in IOMMU mappings. This
902 * resulted in a couple unusual behaviors. First, if a range is not
903 * able to be unmapped, ex. a set of 4k pages that was mapped as a
904 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
905 * a zero sized unmap. Also, if an unmap request overlaps the first
906 * address of a hugepage, the IOMMU will unmap the entire hugepage.
907 * This also returns success and the returned unmap size reflects the
908 * actual size unmapped.
910 * We attempt to maintain compatibility with this "v1" interface, but
911 * we take control out of the hands of the IOMMU. Therefore, an unmap
912 * request offset from the beginning of the original mapping will
913 * return success with zero sized unmap. And an unmap request covering
914 * the first iova of mapping will unmap the entire range.
916 * The v2 version of this interface intends to be more deterministic.
917 * Unmap requests must fully cover previous mappings. Multiple
918 * mappings may still be unmaped by specifying large ranges, but there
919 * must not be any previous mappings bisected by the range. An error
920 * will be returned if these conditions are not met. The v2 interface
921 * will only return success and a size of zero if there were no
922 * mappings within the range.
925 dma
= vfio_find_dma(iommu
, unmap
->iova
, 1);
926 if (dma
&& dma
->iova
!= unmap
->iova
) {
930 dma
= vfio_find_dma(iommu
, unmap
->iova
+ unmap
->size
- 1, 0);
931 if (dma
&& dma
->iova
+ dma
->size
!= unmap
->iova
+ unmap
->size
) {
937 while ((dma
= vfio_find_dma(iommu
, unmap
->iova
, unmap
->size
))) {
938 if (!iommu
->v2
&& unmap
->iova
> dma
->iova
)
941 * Task with same address space who mapped this iova range is
942 * allowed to unmap the iova range.
944 if (dma
->task
->mm
!= current
->mm
)
947 if (!RB_EMPTY_ROOT(&dma
->pfn_list
)) {
948 struct vfio_iommu_type1_dma_unmap nb_unmap
;
950 if (dma_last
== dma
) {
951 BUG_ON(++retries
> 10);
957 nb_unmap
.iova
= dma
->iova
;
958 nb_unmap
.size
= dma
->size
;
961 * Notify anyone (mdev vendor drivers) to invalidate and
962 * unmap iovas within the range we're about to unmap.
963 * Vendor drivers MUST unpin pages in response to an
966 mutex_unlock(&iommu
->lock
);
967 blocking_notifier_call_chain(&iommu
->notifier
,
968 VFIO_IOMMU_NOTIFY_DMA_UNMAP
,
972 unmapped
+= dma
->size
;
973 vfio_remove_dma(iommu
, dma
);
977 mutex_unlock(&iommu
->lock
);
979 /* Report how much was unmapped */
980 unmap
->size
= unmapped
;
985 static int vfio_iommu_map(struct vfio_iommu
*iommu
, dma_addr_t iova
,
986 unsigned long pfn
, long npage
, int prot
)
988 struct vfio_domain
*d
;
991 list_for_each_entry(d
, &iommu
->domain_list
, next
) {
992 ret
= iommu_map(d
->domain
, iova
, (phys_addr_t
)pfn
<< PAGE_SHIFT
,
993 npage
<< PAGE_SHIFT
, prot
| d
->prot
);
1003 list_for_each_entry_continue_reverse(d
, &iommu
->domain_list
, next
)
1004 iommu_unmap(d
->domain
, iova
, npage
<< PAGE_SHIFT
);
1009 static int vfio_pin_map_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*dma
,
1012 dma_addr_t iova
= dma
->iova
;
1013 unsigned long vaddr
= dma
->vaddr
;
1014 size_t size
= map_size
;
1016 unsigned long pfn
, limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
1020 /* Pin a contiguous chunk of memory */
1021 npage
= vfio_pin_pages_remote(dma
, vaddr
+ dma
->size
,
1022 size
>> PAGE_SHIFT
, &pfn
, limit
);
1030 ret
= vfio_iommu_map(iommu
, iova
+ dma
->size
, pfn
, npage
,
1033 vfio_unpin_pages_remote(dma
, iova
+ dma
->size
, pfn
,
1038 size
-= npage
<< PAGE_SHIFT
;
1039 dma
->size
+= npage
<< PAGE_SHIFT
;
1042 dma
->iommu_mapped
= true;
1045 vfio_remove_dma(iommu
, dma
);
1050 static int vfio_dma_do_map(struct vfio_iommu
*iommu
,
1051 struct vfio_iommu_type1_dma_map
*map
)
1053 dma_addr_t iova
= map
->iova
;
1054 unsigned long vaddr
= map
->vaddr
;
1055 size_t size
= map
->size
;
1056 int ret
= 0, prot
= 0;
1058 struct vfio_dma
*dma
;
1060 /* Verify that none of our __u64 fields overflow */
1061 if (map
->size
!= size
|| map
->vaddr
!= vaddr
|| map
->iova
!= iova
)
1064 mask
= ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu
))) - 1;
1066 WARN_ON(mask
& PAGE_MASK
);
1068 /* READ/WRITE from device perspective */
1069 if (map
->flags
& VFIO_DMA_MAP_FLAG_WRITE
)
1070 prot
|= IOMMU_WRITE
;
1071 if (map
->flags
& VFIO_DMA_MAP_FLAG_READ
)
1074 if (!prot
|| !size
|| (size
| iova
| vaddr
) & mask
)
1077 /* Don't allow IOVA or virtual address wrap */
1078 if (iova
+ size
- 1 < iova
|| vaddr
+ size
- 1 < vaddr
)
1081 mutex_lock(&iommu
->lock
);
1083 if (vfio_find_dma(iommu
, iova
, size
)) {
1088 if (!iommu
->dma_avail
) {
1093 dma
= kzalloc(sizeof(*dma
), GFP_KERNEL
);
1105 * We need to be able to both add to a task's locked memory and test
1106 * against the locked memory limit and we need to be able to do both
1107 * outside of this call path as pinning can be asynchronous via the
1108 * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a
1109 * task_struct and VM locked pages requires an mm_struct, however
1110 * holding an indefinite mm reference is not recommended, therefore we
1111 * only hold a reference to a task. We could hold a reference to
1112 * current, however QEMU uses this call path through vCPU threads,
1113 * which can be killed resulting in a NULL mm and failure in the unmap
1114 * path when called via a different thread. Avoid this problem by
1115 * using the group_leader as threads within the same group require
1116 * both CLONE_THREAD and CLONE_VM and will therefore use the same
1119 * Previously we also used the task for testing CAP_IPC_LOCK at the
1120 * time of pinning and accounting, however has_capability() makes use
1121 * of real_cred, a copy-on-write field, so we can't guarantee that it
1122 * matches group_leader, or in fact that it might not change by the
1123 * time it's evaluated. If a process were to call MAP_DMA with
1124 * CAP_IPC_LOCK but later drop it, it doesn't make sense that they
1125 * possibly see different results for an iommu_mapped vfio_dma vs
1126 * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the
1127 * time of calling MAP_DMA.
1129 get_task_struct(current
->group_leader
);
1130 dma
->task
= current
->group_leader
;
1131 dma
->lock_cap
= capable(CAP_IPC_LOCK
);
1133 dma
->pfn_list
= RB_ROOT
;
1135 /* Insert zero-sized and grow as we map chunks of it */
1136 vfio_link_dma(iommu
, dma
);
1138 /* Don't pin and map if container doesn't contain IOMMU capable domain*/
1139 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu
))
1142 ret
= vfio_pin_map_dma(iommu
, dma
, size
);
1145 mutex_unlock(&iommu
->lock
);
1149 static int vfio_bus_type(struct device
*dev
, void *data
)
1151 struct bus_type
**bus
= data
;
1153 if (*bus
&& *bus
!= dev
->bus
)
1161 static int vfio_iommu_replay(struct vfio_iommu
*iommu
,
1162 struct vfio_domain
*domain
)
1164 struct vfio_domain
*d
;
1166 unsigned long limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
1169 /* Arbitrarily pick the first domain in the list for lookups */
1170 d
= list_first_entry(&iommu
->domain_list
, struct vfio_domain
, next
);
1171 n
= rb_first(&iommu
->dma_list
);
1173 for (; n
; n
= rb_next(n
)) {
1174 struct vfio_dma
*dma
;
1177 dma
= rb_entry(n
, struct vfio_dma
, node
);
1180 while (iova
< dma
->iova
+ dma
->size
) {
1184 if (dma
->iommu_mapped
) {
1188 phys
= iommu_iova_to_phys(d
->domain
, iova
);
1190 if (WARN_ON(!phys
)) {
1198 while (i
< dma
->iova
+ dma
->size
&&
1199 p
== iommu_iova_to_phys(d
->domain
, i
)) {
1206 unsigned long vaddr
= dma
->vaddr
+
1208 size_t n
= dma
->iova
+ dma
->size
- iova
;
1211 npage
= vfio_pin_pages_remote(dma
, vaddr
,
1220 phys
= pfn
<< PAGE_SHIFT
;
1221 size
= npage
<< PAGE_SHIFT
;
1224 ret
= iommu_map(domain
->domain
, iova
, phys
,
1225 size
, dma
->prot
| domain
->prot
);
1231 dma
->iommu_mapped
= true;
1237 * We change our unmap behavior slightly depending on whether the IOMMU
1238 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
1239 * for practically any contiguous power-of-two mapping we give it. This means
1240 * we don't need to look for contiguous chunks ourselves to make unmapping
1241 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
1242 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
1243 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
1244 * hugetlbfs is in use.
1246 static void vfio_test_domain_fgsp(struct vfio_domain
*domain
)
1249 int ret
, order
= get_order(PAGE_SIZE
* 2);
1251 pages
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
1255 ret
= iommu_map(domain
->domain
, 0, page_to_phys(pages
), PAGE_SIZE
* 2,
1256 IOMMU_READ
| IOMMU_WRITE
| domain
->prot
);
1258 size_t unmapped
= iommu_unmap(domain
->domain
, 0, PAGE_SIZE
);
1260 if (unmapped
== PAGE_SIZE
)
1261 iommu_unmap(domain
->domain
, PAGE_SIZE
, PAGE_SIZE
);
1263 domain
->fgsp
= true;
1266 __free_pages(pages
, order
);
1269 static struct vfio_group
*find_iommu_group(struct vfio_domain
*domain
,
1270 struct iommu_group
*iommu_group
)
1272 struct vfio_group
*g
;
1274 list_for_each_entry(g
, &domain
->group_list
, next
) {
1275 if (g
->iommu_group
== iommu_group
)
1282 static bool vfio_iommu_has_sw_msi(struct iommu_group
*group
, phys_addr_t
*base
)
1284 struct list_head group_resv_regions
;
1285 struct iommu_resv_region
*region
, *next
;
1288 INIT_LIST_HEAD(&group_resv_regions
);
1289 iommu_get_group_resv_regions(group
, &group_resv_regions
);
1290 list_for_each_entry(region
, &group_resv_regions
, list
) {
1292 * The presence of any 'real' MSI regions should take
1293 * precedence over the software-managed one if the
1294 * IOMMU driver happens to advertise both types.
1296 if (region
->type
== IOMMU_RESV_MSI
) {
1301 if (region
->type
== IOMMU_RESV_SW_MSI
) {
1302 *base
= region
->start
;
1306 list_for_each_entry_safe(region
, next
, &group_resv_regions
, list
)
1311 static struct device
*vfio_mdev_get_iommu_device(struct device
*dev
)
1313 struct device
*(*fn
)(struct device
*dev
);
1314 struct device
*iommu_device
;
1316 fn
= symbol_get(mdev_get_iommu_device
);
1318 iommu_device
= fn(dev
);
1319 symbol_put(mdev_get_iommu_device
);
1321 return iommu_device
;
1327 static int vfio_mdev_attach_domain(struct device
*dev
, void *data
)
1329 struct iommu_domain
*domain
= data
;
1330 struct device
*iommu_device
;
1332 iommu_device
= vfio_mdev_get_iommu_device(dev
);
1334 if (iommu_dev_feature_enabled(iommu_device
, IOMMU_DEV_FEAT_AUX
))
1335 return iommu_aux_attach_device(domain
, iommu_device
);
1337 return iommu_attach_device(domain
, iommu_device
);
1343 static int vfio_mdev_detach_domain(struct device
*dev
, void *data
)
1345 struct iommu_domain
*domain
= data
;
1346 struct device
*iommu_device
;
1348 iommu_device
= vfio_mdev_get_iommu_device(dev
);
1350 if (iommu_dev_feature_enabled(iommu_device
, IOMMU_DEV_FEAT_AUX
))
1351 iommu_aux_detach_device(domain
, iommu_device
);
1353 iommu_detach_device(domain
, iommu_device
);
1359 static int vfio_iommu_attach_group(struct vfio_domain
*domain
,
1360 struct vfio_group
*group
)
1362 if (group
->mdev_group
)
1363 return iommu_group_for_each_dev(group
->iommu_group
,
1365 vfio_mdev_attach_domain
);
1367 return iommu_attach_group(domain
->domain
, group
->iommu_group
);
1370 static void vfio_iommu_detach_group(struct vfio_domain
*domain
,
1371 struct vfio_group
*group
)
1373 if (group
->mdev_group
)
1374 iommu_group_for_each_dev(group
->iommu_group
, domain
->domain
,
1375 vfio_mdev_detach_domain
);
1377 iommu_detach_group(domain
->domain
, group
->iommu_group
);
1380 static bool vfio_bus_is_mdev(struct bus_type
*bus
)
1382 struct bus_type
*mdev_bus
;
1385 mdev_bus
= symbol_get(mdev_bus_type
);
1387 ret
= (bus
== mdev_bus
);
1388 symbol_put(mdev_bus_type
);
1394 static int vfio_mdev_iommu_device(struct device
*dev
, void *data
)
1396 struct device
**old
= data
, *new;
1398 new = vfio_mdev_get_iommu_device(dev
);
1399 if (!new || (*old
&& *old
!= new))
1407 static int vfio_iommu_type1_attach_group(void *iommu_data
,
1408 struct iommu_group
*iommu_group
)
1410 struct vfio_iommu
*iommu
= iommu_data
;
1411 struct vfio_group
*group
;
1412 struct vfio_domain
*domain
, *d
;
1413 struct bus_type
*bus
= NULL
;
1415 bool resv_msi
, msi_remap
;
1416 phys_addr_t resv_msi_base
;
1418 mutex_lock(&iommu
->lock
);
1420 list_for_each_entry(d
, &iommu
->domain_list
, next
) {
1421 if (find_iommu_group(d
, iommu_group
)) {
1422 mutex_unlock(&iommu
->lock
);
1427 if (iommu
->external_domain
) {
1428 if (find_iommu_group(iommu
->external_domain
, iommu_group
)) {
1429 mutex_unlock(&iommu
->lock
);
1434 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
1435 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
1436 if (!group
|| !domain
) {
1441 group
->iommu_group
= iommu_group
;
1443 /* Determine bus_type in order to allocate a domain */
1444 ret
= iommu_group_for_each_dev(iommu_group
, &bus
, vfio_bus_type
);
1448 if (vfio_bus_is_mdev(bus
)) {
1449 struct device
*iommu_device
= NULL
;
1451 group
->mdev_group
= true;
1453 /* Determine the isolation type */
1454 ret
= iommu_group_for_each_dev(iommu_group
, &iommu_device
,
1455 vfio_mdev_iommu_device
);
1456 if (ret
|| !iommu_device
) {
1457 if (!iommu
->external_domain
) {
1458 INIT_LIST_HEAD(&domain
->group_list
);
1459 iommu
->external_domain
= domain
;
1464 list_add(&group
->next
,
1465 &iommu
->external_domain
->group_list
);
1466 mutex_unlock(&iommu
->lock
);
1471 bus
= iommu_device
->bus
;
1474 domain
->domain
= iommu_domain_alloc(bus
);
1475 if (!domain
->domain
) {
1480 if (iommu
->nesting
) {
1483 ret
= iommu_domain_set_attr(domain
->domain
, DOMAIN_ATTR_NESTING
,
1489 ret
= vfio_iommu_attach_group(domain
, group
);
1493 resv_msi
= vfio_iommu_has_sw_msi(iommu_group
, &resv_msi_base
);
1495 INIT_LIST_HEAD(&domain
->group_list
);
1496 list_add(&group
->next
, &domain
->group_list
);
1498 msi_remap
= irq_domain_check_msi_remap() ||
1499 iommu_capable(bus
, IOMMU_CAP_INTR_REMAP
);
1501 if (!allow_unsafe_interrupts
&& !msi_remap
) {
1502 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
1508 if (iommu_capable(bus
, IOMMU_CAP_CACHE_COHERENCY
))
1509 domain
->prot
|= IOMMU_CACHE
;
1512 * Try to match an existing compatible domain. We don't want to
1513 * preclude an IOMMU driver supporting multiple bus_types and being
1514 * able to include different bus_types in the same IOMMU domain, so
1515 * we test whether the domains use the same iommu_ops rather than
1516 * testing if they're on the same bus_type.
1518 list_for_each_entry(d
, &iommu
->domain_list
, next
) {
1519 if (d
->domain
->ops
== domain
->domain
->ops
&&
1520 d
->prot
== domain
->prot
) {
1521 vfio_iommu_detach_group(domain
, group
);
1522 if (!vfio_iommu_attach_group(d
, group
)) {
1523 list_add(&group
->next
, &d
->group_list
);
1524 iommu_domain_free(domain
->domain
);
1526 mutex_unlock(&iommu
->lock
);
1530 ret
= vfio_iommu_attach_group(domain
, group
);
1536 vfio_test_domain_fgsp(domain
);
1538 /* replay mappings on new domains */
1539 ret
= vfio_iommu_replay(iommu
, domain
);
1544 ret
= iommu_get_msi_cookie(domain
->domain
, resv_msi_base
);
1549 list_add(&domain
->next
, &iommu
->domain_list
);
1551 mutex_unlock(&iommu
->lock
);
1556 vfio_iommu_detach_group(domain
, group
);
1558 iommu_domain_free(domain
->domain
);
1562 mutex_unlock(&iommu
->lock
);
1566 static void vfio_iommu_unmap_unpin_all(struct vfio_iommu
*iommu
)
1568 struct rb_node
*node
;
1570 while ((node
= rb_first(&iommu
->dma_list
)))
1571 vfio_remove_dma(iommu
, rb_entry(node
, struct vfio_dma
, node
));
1574 static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu
*iommu
)
1576 struct rb_node
*n
, *p
;
1578 n
= rb_first(&iommu
->dma_list
);
1579 for (; n
; n
= rb_next(n
)) {
1580 struct vfio_dma
*dma
;
1581 long locked
= 0, unlocked
= 0;
1583 dma
= rb_entry(n
, struct vfio_dma
, node
);
1584 unlocked
+= vfio_unmap_unpin(iommu
, dma
, false);
1585 p
= rb_first(&dma
->pfn_list
);
1586 for (; p
; p
= rb_next(p
)) {
1587 struct vfio_pfn
*vpfn
= rb_entry(p
, struct vfio_pfn
,
1590 if (!is_invalid_reserved_pfn(vpfn
->pfn
))
1593 vfio_lock_acct(dma
, locked
- unlocked
, true);
1597 static void vfio_sanity_check_pfn_list(struct vfio_iommu
*iommu
)
1601 n
= rb_first(&iommu
->dma_list
);
1602 for (; n
; n
= rb_next(n
)) {
1603 struct vfio_dma
*dma
;
1605 dma
= rb_entry(n
, struct vfio_dma
, node
);
1607 if (WARN_ON(!RB_EMPTY_ROOT(&dma
->pfn_list
)))
1610 /* mdev vendor driver must unregister notifier */
1611 WARN_ON(iommu
->notifier
.head
);
1614 static void vfio_iommu_type1_detach_group(void *iommu_data
,
1615 struct iommu_group
*iommu_group
)
1617 struct vfio_iommu
*iommu
= iommu_data
;
1618 struct vfio_domain
*domain
;
1619 struct vfio_group
*group
;
1621 mutex_lock(&iommu
->lock
);
1623 if (iommu
->external_domain
) {
1624 group
= find_iommu_group(iommu
->external_domain
, iommu_group
);
1626 list_del(&group
->next
);
1629 if (list_empty(&iommu
->external_domain
->group_list
)) {
1630 vfio_sanity_check_pfn_list(iommu
);
1632 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu
))
1633 vfio_iommu_unmap_unpin_all(iommu
);
1635 kfree(iommu
->external_domain
);
1636 iommu
->external_domain
= NULL
;
1638 goto detach_group_done
;
1642 list_for_each_entry(domain
, &iommu
->domain_list
, next
) {
1643 group
= find_iommu_group(domain
, iommu_group
);
1647 vfio_iommu_detach_group(domain
, group
);
1648 list_del(&group
->next
);
1651 * Group ownership provides privilege, if the group list is
1652 * empty, the domain goes away. If it's the last domain with
1653 * iommu and external domain doesn't exist, then all the
1654 * mappings go away too. If it's the last domain with iommu and
1655 * external domain exist, update accounting
1657 if (list_empty(&domain
->group_list
)) {
1658 if (list_is_singular(&iommu
->domain_list
)) {
1659 if (!iommu
->external_domain
)
1660 vfio_iommu_unmap_unpin_all(iommu
);
1662 vfio_iommu_unmap_unpin_reaccount(iommu
);
1664 iommu_domain_free(domain
->domain
);
1665 list_del(&domain
->next
);
1672 mutex_unlock(&iommu
->lock
);
1675 static void *vfio_iommu_type1_open(unsigned long arg
)
1677 struct vfio_iommu
*iommu
;
1679 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
1681 return ERR_PTR(-ENOMEM
);
1684 case VFIO_TYPE1_IOMMU
:
1686 case VFIO_TYPE1_NESTING_IOMMU
:
1687 iommu
->nesting
= true;
1689 case VFIO_TYPE1v2_IOMMU
:
1694 return ERR_PTR(-EINVAL
);
1697 INIT_LIST_HEAD(&iommu
->domain_list
);
1698 iommu
->dma_list
= RB_ROOT
;
1699 iommu
->dma_avail
= dma_entry_limit
;
1700 mutex_init(&iommu
->lock
);
1701 BLOCKING_INIT_NOTIFIER_HEAD(&iommu
->notifier
);
1706 static void vfio_release_domain(struct vfio_domain
*domain
, bool external
)
1708 struct vfio_group
*group
, *group_tmp
;
1710 list_for_each_entry_safe(group
, group_tmp
,
1711 &domain
->group_list
, next
) {
1713 vfio_iommu_detach_group(domain
, group
);
1714 list_del(&group
->next
);
1719 iommu_domain_free(domain
->domain
);
1722 static void vfio_iommu_type1_release(void *iommu_data
)
1724 struct vfio_iommu
*iommu
= iommu_data
;
1725 struct vfio_domain
*domain
, *domain_tmp
;
1727 if (iommu
->external_domain
) {
1728 vfio_release_domain(iommu
->external_domain
, true);
1729 vfio_sanity_check_pfn_list(iommu
);
1730 kfree(iommu
->external_domain
);
1733 vfio_iommu_unmap_unpin_all(iommu
);
1735 list_for_each_entry_safe(domain
, domain_tmp
,
1736 &iommu
->domain_list
, next
) {
1737 vfio_release_domain(domain
, false);
1738 list_del(&domain
->next
);
1744 static int vfio_domains_have_iommu_cache(struct vfio_iommu
*iommu
)
1746 struct vfio_domain
*domain
;
1749 mutex_lock(&iommu
->lock
);
1750 list_for_each_entry(domain
, &iommu
->domain_list
, next
) {
1751 if (!(domain
->prot
& IOMMU_CACHE
)) {
1756 mutex_unlock(&iommu
->lock
);
1761 static long vfio_iommu_type1_ioctl(void *iommu_data
,
1762 unsigned int cmd
, unsigned long arg
)
1764 struct vfio_iommu
*iommu
= iommu_data
;
1765 unsigned long minsz
;
1767 if (cmd
== VFIO_CHECK_EXTENSION
) {
1769 case VFIO_TYPE1_IOMMU
:
1770 case VFIO_TYPE1v2_IOMMU
:
1771 case VFIO_TYPE1_NESTING_IOMMU
:
1773 case VFIO_DMA_CC_IOMMU
:
1776 return vfio_domains_have_iommu_cache(iommu
);
1780 } else if (cmd
== VFIO_IOMMU_GET_INFO
) {
1781 struct vfio_iommu_type1_info info
;
1783 minsz
= offsetofend(struct vfio_iommu_type1_info
, iova_pgsizes
);
1785 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1788 if (info
.argsz
< minsz
)
1791 info
.flags
= VFIO_IOMMU_INFO_PGSIZES
;
1793 info
.iova_pgsizes
= vfio_pgsize_bitmap(iommu
);
1795 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
1798 } else if (cmd
== VFIO_IOMMU_MAP_DMA
) {
1799 struct vfio_iommu_type1_dma_map map
;
1800 uint32_t mask
= VFIO_DMA_MAP_FLAG_READ
|
1801 VFIO_DMA_MAP_FLAG_WRITE
;
1803 minsz
= offsetofend(struct vfio_iommu_type1_dma_map
, size
);
1805 if (copy_from_user(&map
, (void __user
*)arg
, minsz
))
1808 if (map
.argsz
< minsz
|| map
.flags
& ~mask
)
1811 return vfio_dma_do_map(iommu
, &map
);
1813 } else if (cmd
== VFIO_IOMMU_UNMAP_DMA
) {
1814 struct vfio_iommu_type1_dma_unmap unmap
;
1817 minsz
= offsetofend(struct vfio_iommu_type1_dma_unmap
, size
);
1819 if (copy_from_user(&unmap
, (void __user
*)arg
, minsz
))
1822 if (unmap
.argsz
< minsz
|| unmap
.flags
)
1825 ret
= vfio_dma_do_unmap(iommu
, &unmap
);
1829 return copy_to_user((void __user
*)arg
, &unmap
, minsz
) ?
1836 static int vfio_iommu_type1_register_notifier(void *iommu_data
,
1837 unsigned long *events
,
1838 struct notifier_block
*nb
)
1840 struct vfio_iommu
*iommu
= iommu_data
;
1842 /* clear known events */
1843 *events
&= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP
;
1845 /* refuse to register if still events remaining */
1849 return blocking_notifier_chain_register(&iommu
->notifier
, nb
);
1852 static int vfio_iommu_type1_unregister_notifier(void *iommu_data
,
1853 struct notifier_block
*nb
)
1855 struct vfio_iommu
*iommu
= iommu_data
;
1857 return blocking_notifier_chain_unregister(&iommu
->notifier
, nb
);
1860 static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1
= {
1861 .name
= "vfio-iommu-type1",
1862 .owner
= THIS_MODULE
,
1863 .open
= vfio_iommu_type1_open
,
1864 .release
= vfio_iommu_type1_release
,
1865 .ioctl
= vfio_iommu_type1_ioctl
,
1866 .attach_group
= vfio_iommu_type1_attach_group
,
1867 .detach_group
= vfio_iommu_type1_detach_group
,
1868 .pin_pages
= vfio_iommu_type1_pin_pages
,
1869 .unpin_pages
= vfio_iommu_type1_unpin_pages
,
1870 .register_notifier
= vfio_iommu_type1_register_notifier
,
1871 .unregister_notifier
= vfio_iommu_type1_unregister_notifier
,
1874 static int __init
vfio_iommu_type1_init(void)
1876 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1
);
1879 static void __exit
vfio_iommu_type1_cleanup(void)
1881 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1
);
1884 module_init(vfio_iommu_type1_init
);
1885 module_exit(vfio_iommu_type1_cleanup
);
1887 MODULE_VERSION(DRIVER_VERSION
);
1888 MODULE_LICENSE("GPL v2");
1889 MODULE_AUTHOR(DRIVER_AUTHOR
);
1890 MODULE_DESCRIPTION(DRIVER_DESC
);