2 * VFIO: IOMMU DMA mapping support for Type1 IOMMU
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
15 * We arbitrarily define a Type1 IOMMU as one matching the below code.
16 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
17 * VT-d, but that makes it harder to re-use as theoretically anyone
18 * implementing a similar IOMMU could make use of this. We expect the
19 * IOMMU to support the IOMMU API and have few to no restrictions around
20 * the IOVA range that can be mapped. The Type1 IOMMU is currently
21 * optimized for relatively static mappings of a userspace process with
22 * userpsace pages pinned into memory. We also assume devices and IOMMU
23 * domains are PCI based as the IOMMU API is still centered around a
24 * device/bus interface rather than a group interface.
27 #include <linux/compat.h>
28 #include <linux/device.h>
30 #include <linux/iommu.h>
31 #include <linux/module.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/uaccess.h>
37 #include <linux/vfio.h>
38 #include <linux/workqueue.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/mdev.h>
42 #define DRIVER_VERSION "0.2"
43 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
44 #define DRIVER_DESC "Type1 IOMMU driver for VFIO"
46 static bool allow_unsafe_interrupts
;
47 module_param_named(allow_unsafe_interrupts
,
48 allow_unsafe_interrupts
, bool, S_IRUGO
| S_IWUSR
);
49 MODULE_PARM_DESC(allow_unsafe_interrupts
,
50 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
52 static bool disable_hugepages
;
53 module_param_named(disable_hugepages
,
54 disable_hugepages
, bool, S_IRUGO
| S_IWUSR
);
55 MODULE_PARM_DESC(disable_hugepages
,
56 "Disable VFIO IOMMU support for IOMMU hugepages.");
59 struct list_head domain_list
;
60 struct vfio_domain
*external_domain
; /* domain for external user */
62 struct rb_root dma_list
;
68 struct iommu_domain
*domain
;
69 struct list_head next
;
70 struct list_head group_list
;
71 int prot
; /* IOMMU_CACHE */
72 bool fgsp
; /* Fine-grained super pages */
77 dma_addr_t iova
; /* Device address */
78 unsigned long vaddr
; /* Process virtual addr */
79 size_t size
; /* Map size (bytes) */
80 int prot
; /* IOMMU_READ/WRITE */
82 struct task_struct
*task
;
83 struct rb_root pfn_list
; /* Ex-user pinned pfn list */
87 struct iommu_group
*iommu_group
;
88 struct list_head next
;
92 * Guest RAM pinning working set or DMA target
96 dma_addr_t iova
; /* Device address */
97 unsigned long pfn
; /* Host pfn */
101 #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
102 (!list_empty(&iommu->domain_list))
104 static int put_pfn(unsigned long pfn
, int prot
);
107 * This code handles mapping and unmapping of user data buffers
108 * into DMA'ble space using the IOMMU
111 static struct vfio_dma
*vfio_find_dma(struct vfio_iommu
*iommu
,
112 dma_addr_t start
, size_t size
)
114 struct rb_node
*node
= iommu
->dma_list
.rb_node
;
117 struct vfio_dma
*dma
= rb_entry(node
, struct vfio_dma
, node
);
119 if (start
+ size
<= dma
->iova
)
120 node
= node
->rb_left
;
121 else if (start
>= dma
->iova
+ dma
->size
)
122 node
= node
->rb_right
;
130 static void vfio_link_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*new)
132 struct rb_node
**link
= &iommu
->dma_list
.rb_node
, *parent
= NULL
;
133 struct vfio_dma
*dma
;
137 dma
= rb_entry(parent
, struct vfio_dma
, node
);
139 if (new->iova
+ new->size
<= dma
->iova
)
140 link
= &(*link
)->rb_left
;
142 link
= &(*link
)->rb_right
;
145 rb_link_node(&new->node
, parent
, link
);
146 rb_insert_color(&new->node
, &iommu
->dma_list
);
149 static void vfio_unlink_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*old
)
151 rb_erase(&old
->node
, &iommu
->dma_list
);
155 * Helper Functions for host iova-pfn list
157 static struct vfio_pfn
*vfio_find_vpfn(struct vfio_dma
*dma
, dma_addr_t iova
)
159 struct vfio_pfn
*vpfn
;
160 struct rb_node
*node
= dma
->pfn_list
.rb_node
;
163 vpfn
= rb_entry(node
, struct vfio_pfn
, node
);
165 if (iova
< vpfn
->iova
)
166 node
= node
->rb_left
;
167 else if (iova
> vpfn
->iova
)
168 node
= node
->rb_right
;
175 static void vfio_link_pfn(struct vfio_dma
*dma
,
176 struct vfio_pfn
*new)
178 struct rb_node
**link
, *parent
= NULL
;
179 struct vfio_pfn
*vpfn
;
181 link
= &dma
->pfn_list
.rb_node
;
184 vpfn
= rb_entry(parent
, struct vfio_pfn
, node
);
186 if (new->iova
< vpfn
->iova
)
187 link
= &(*link
)->rb_left
;
189 link
= &(*link
)->rb_right
;
192 rb_link_node(&new->node
, parent
, link
);
193 rb_insert_color(&new->node
, &dma
->pfn_list
);
196 static void vfio_unlink_pfn(struct vfio_dma
*dma
, struct vfio_pfn
*old
)
198 rb_erase(&old
->node
, &dma
->pfn_list
);
201 static int vfio_add_to_pfn_list(struct vfio_dma
*dma
, dma_addr_t iova
,
204 struct vfio_pfn
*vpfn
;
206 vpfn
= kzalloc(sizeof(*vpfn
), GFP_KERNEL
);
212 atomic_set(&vpfn
->ref_count
, 1);
213 vfio_link_pfn(dma
, vpfn
);
217 static void vfio_remove_from_pfn_list(struct vfio_dma
*dma
,
218 struct vfio_pfn
*vpfn
)
220 vfio_unlink_pfn(dma
, vpfn
);
224 static struct vfio_pfn
*vfio_iova_get_vfio_pfn(struct vfio_dma
*dma
,
227 struct vfio_pfn
*vpfn
= vfio_find_vpfn(dma
, iova
);
230 atomic_inc(&vpfn
->ref_count
);
234 static int vfio_iova_put_vfio_pfn(struct vfio_dma
*dma
, struct vfio_pfn
*vpfn
)
238 if (atomic_dec_and_test(&vpfn
->ref_count
)) {
239 ret
= put_pfn(vpfn
->pfn
, dma
->prot
);
240 vfio_remove_from_pfn_list(dma
, vpfn
);
246 struct mm_struct
*mm
;
248 struct work_struct work
;
251 /* delayed decrement/increment for locked_vm */
252 static void vfio_lock_acct_bg(struct work_struct
*work
)
254 struct vwork
*vwork
= container_of(work
, struct vwork
, work
);
255 struct mm_struct
*mm
;
258 down_write(&mm
->mmap_sem
);
259 mm
->locked_vm
+= vwork
->npage
;
260 up_write(&mm
->mmap_sem
);
265 static void vfio_lock_acct(struct task_struct
*task
, long npage
)
268 struct mm_struct
*mm
;
273 mm
= get_task_mm(task
);
275 return; /* process exited or nothing to do */
277 if (down_write_trylock(&mm
->mmap_sem
)) {
278 mm
->locked_vm
+= npage
;
279 up_write(&mm
->mmap_sem
);
285 * Couldn't get mmap_sem lock, so must setup to update
286 * mm->locked_vm later. If locked_vm were atomic, we
287 * wouldn't need this silliness
289 vwork
= kmalloc(sizeof(struct vwork
), GFP_KERNEL
);
294 INIT_WORK(&vwork
->work
, vfio_lock_acct_bg
);
296 vwork
->npage
= npage
;
297 schedule_work(&vwork
->work
);
301 * Some mappings aren't backed by a struct page, for example an mmap'd
302 * MMIO range for our own or another device. These use a different
303 * pfn conversion and shouldn't be tracked as locked pages.
305 static bool is_invalid_reserved_pfn(unsigned long pfn
)
307 if (pfn_valid(pfn
)) {
309 struct page
*tail
= pfn_to_page(pfn
);
310 struct page
*head
= compound_head(tail
);
311 reserved
= !!(PageReserved(head
));
314 * "head" is not a dangling pointer
315 * (compound_head takes care of that)
316 * but the hugepage may have been split
317 * from under us (and we may not hold a
318 * reference count on the head page so it can
319 * be reused before we run PageReferenced), so
320 * we've to check PageTail before returning
327 return PageReserved(tail
);
333 static int put_pfn(unsigned long pfn
, int prot
)
335 if (!is_invalid_reserved_pfn(pfn
)) {
336 struct page
*page
= pfn_to_page(pfn
);
337 if (prot
& IOMMU_WRITE
)
345 static int vaddr_get_pfn(struct mm_struct
*mm
, unsigned long vaddr
,
346 int prot
, unsigned long *pfn
)
348 struct page
*page
[1];
349 struct vm_area_struct
*vma
;
352 if (mm
== current
->mm
) {
353 ret
= get_user_pages_fast(vaddr
, 1, !!(prot
& IOMMU_WRITE
),
356 unsigned int flags
= 0;
358 if (prot
& IOMMU_WRITE
)
361 down_read(&mm
->mmap_sem
);
362 ret
= get_user_pages_remote(NULL
, mm
, vaddr
, 1, flags
, page
,
364 up_read(&mm
->mmap_sem
);
368 *pfn
= page_to_pfn(page
[0]);
372 down_read(&mm
->mmap_sem
);
374 vma
= find_vma_intersection(mm
, vaddr
, vaddr
+ 1);
376 if (vma
&& vma
->vm_flags
& VM_PFNMAP
) {
377 *pfn
= ((vaddr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
378 if (is_invalid_reserved_pfn(*pfn
))
382 up_read(&mm
->mmap_sem
);
387 * Attempt to pin pages. We really don't want to track all the pfns and
388 * the iommu can only map chunks of consecutive pfns anyway, so get the
389 * first page and all consecutive pages with the same locking.
391 static long vfio_pin_pages_remote(struct vfio_dma
*dma
, unsigned long vaddr
,
392 long npage
, unsigned long *pfn_base
)
395 bool lock_cap
= ns_capable(task_active_pid_ns(dma
->task
)->user_ns
,
397 struct mm_struct
*mm
;
398 long ret
, i
= 0, lock_acct
= 0;
400 dma_addr_t iova
= vaddr
- dma
->vaddr
+ dma
->iova
;
402 mm
= get_task_mm(dma
->task
);
406 ret
= vaddr_get_pfn(mm
, vaddr
, dma
->prot
, pfn_base
);
408 goto pin_pg_remote_exit
;
410 rsvd
= is_invalid_reserved_pfn(*pfn_base
);
411 limit
= task_rlimit(dma
->task
, RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
414 * Reserved pages aren't counted against the user, externally pinned
415 * pages are already counted against the user.
417 if (!rsvd
&& !vfio_find_vpfn(dma
, iova
)) {
418 if (!lock_cap
&& mm
->locked_vm
+ 1 > limit
) {
419 put_pfn(*pfn_base
, dma
->prot
);
420 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__
,
421 limit
<< PAGE_SHIFT
);
423 goto pin_pg_remote_exit
;
429 if (likely(!disable_hugepages
)) {
430 /* Lock all the consecutive pages from pfn_base */
431 for (vaddr
+= PAGE_SIZE
, iova
+= PAGE_SIZE
; i
< npage
;
432 i
++, vaddr
+= PAGE_SIZE
, iova
+= PAGE_SIZE
) {
433 unsigned long pfn
= 0;
435 ret
= vaddr_get_pfn(mm
, vaddr
, dma
->prot
, &pfn
);
439 if (pfn
!= *pfn_base
+ i
||
440 rsvd
!= is_invalid_reserved_pfn(pfn
)) {
441 put_pfn(pfn
, dma
->prot
);
445 if (!rsvd
&& !vfio_find_vpfn(dma
, iova
)) {
447 mm
->locked_vm
+ lock_acct
+ 1 > limit
) {
448 put_pfn(pfn
, dma
->prot
);
449 pr_warn("%s: RLIMIT_MEMLOCK (%ld) "
450 "exceeded\n", __func__
,
451 limit
<< PAGE_SHIFT
);
459 vfio_lock_acct(dma
->task
, lock_acct
);
467 static long vfio_unpin_pages_remote(struct vfio_dma
*dma
, dma_addr_t iova
,
468 unsigned long pfn
, long npage
,
471 long unlocked
= 0, locked
= 0;
474 for (i
= 0; i
< npage
; i
++) {
475 if (put_pfn(pfn
++, dma
->prot
)) {
477 if (vfio_find_vpfn(dma
, iova
+ (i
<< PAGE_SHIFT
)))
483 vfio_lock_acct(dma
->task
, locked
- unlocked
);
488 static int vfio_pin_page_external(struct vfio_dma
*dma
, unsigned long vaddr
,
489 unsigned long *pfn_base
, bool do_accounting
)
492 bool lock_cap
= ns_capable(task_active_pid_ns(dma
->task
)->user_ns
,
494 struct mm_struct
*mm
;
498 mm
= get_task_mm(dma
->task
);
502 ret
= vaddr_get_pfn(mm
, vaddr
, dma
->prot
, pfn_base
);
506 rsvd
= is_invalid_reserved_pfn(*pfn_base
);
507 limit
= task_rlimit(dma
->task
, RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
509 if (!rsvd
&& !lock_cap
&& mm
->locked_vm
+ 1 > limit
) {
510 put_pfn(*pfn_base
, dma
->prot
);
511 pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK (%ld) exceeded\n",
512 __func__
, dma
->task
->comm
, task_pid_nr(dma
->task
),
513 limit
<< PAGE_SHIFT
);
518 if (!rsvd
&& do_accounting
)
519 vfio_lock_acct(dma
->task
, 1);
527 static int vfio_unpin_page_external(struct vfio_dma
*dma
, dma_addr_t iova
,
531 struct vfio_pfn
*vpfn
= vfio_find_vpfn(dma
, iova
);
536 unlocked
= vfio_iova_put_vfio_pfn(dma
, vpfn
);
539 vfio_lock_acct(dma
->task
, -unlocked
);
544 static int vfio_iommu_type1_pin_pages(void *iommu_data
,
545 unsigned long *user_pfn
,
547 unsigned long *phys_pfn
)
549 struct vfio_iommu
*iommu
= iommu_data
;
551 unsigned long remote_vaddr
;
552 struct vfio_dma
*dma
;
555 if (!iommu
|| !user_pfn
|| !phys_pfn
)
558 /* Supported for v2 version only */
562 mutex_lock(&iommu
->lock
);
564 if (!iommu
->external_domain
) {
570 * If iommu capable domain exist in the container then all pages are
571 * already pinned and accounted. Accouting should be done if there is no
572 * iommu capable domain in the container.
574 do_accounting
= !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu
);
576 for (i
= 0; i
< npage
; i
++) {
578 struct vfio_pfn
*vpfn
;
580 iova
= user_pfn
[i
] << PAGE_SHIFT
;
581 dma
= vfio_find_dma(iommu
, iova
, 0);
587 if ((dma
->prot
& prot
) != prot
) {
592 vpfn
= vfio_iova_get_vfio_pfn(dma
, iova
);
594 phys_pfn
[i
] = vpfn
->pfn
;
598 remote_vaddr
= dma
->vaddr
+ iova
- dma
->iova
;
599 ret
= vfio_pin_page_external(dma
, remote_vaddr
, &phys_pfn
[i
],
606 ret
= vfio_add_to_pfn_list(dma
, iova
, phys_pfn
[i
]);
608 vfio_unpin_page_external(dma
, iova
, do_accounting
);
618 for (j
= 0; j
< i
; j
++) {
621 iova
= user_pfn
[j
] << PAGE_SHIFT
;
622 dma
= vfio_find_dma(iommu
, iova
, 0);
623 vfio_unpin_page_external(dma
, iova
, do_accounting
);
627 mutex_unlock(&iommu
->lock
);
631 static int vfio_iommu_type1_unpin_pages(void *iommu_data
,
632 unsigned long *user_pfn
,
635 struct vfio_iommu
*iommu
= iommu_data
;
639 if (!iommu
|| !user_pfn
)
642 /* Supported for v2 version only */
646 mutex_lock(&iommu
->lock
);
648 if (!iommu
->external_domain
) {
649 mutex_unlock(&iommu
->lock
);
653 do_accounting
= !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu
);
654 for (i
= 0; i
< npage
; i
++) {
655 struct vfio_dma
*dma
;
658 iova
= user_pfn
[i
] << PAGE_SHIFT
;
659 dma
= vfio_find_dma(iommu
, iova
, 0);
662 vfio_unpin_page_external(dma
, iova
, do_accounting
);
666 mutex_unlock(&iommu
->lock
);
667 return i
> npage
? npage
: (i
> 0 ? i
: -EINVAL
);
670 static long vfio_unmap_unpin(struct vfio_iommu
*iommu
, struct vfio_dma
*dma
,
673 dma_addr_t iova
= dma
->iova
, end
= dma
->iova
+ dma
->size
;
674 struct vfio_domain
*domain
, *d
;
680 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu
))
684 * We use the IOMMU to track the physical addresses, otherwise we'd
685 * need a much more complicated tracking system. Unfortunately that
686 * means we need to use one of the iommu domains to figure out the
687 * pfns to unpin. The rest need to be unmapped in advance so we have
688 * no iommu translations remaining when the pages are unpinned.
690 domain
= d
= list_first_entry(&iommu
->domain_list
,
691 struct vfio_domain
, next
);
693 list_for_each_entry_continue(d
, &iommu
->domain_list
, next
) {
694 iommu_unmap(d
->domain
, dma
->iova
, dma
->size
);
699 size_t unmapped
, len
;
700 phys_addr_t phys
, next
;
702 phys
= iommu_iova_to_phys(domain
->domain
, iova
);
703 if (WARN_ON(!phys
)) {
709 * To optimize for fewer iommu_unmap() calls, each of which
710 * may require hardware cache flushing, try to find the
711 * largest contiguous physical memory chunk to unmap.
713 for (len
= PAGE_SIZE
;
714 !domain
->fgsp
&& iova
+ len
< end
; len
+= PAGE_SIZE
) {
715 next
= iommu_iova_to_phys(domain
->domain
, iova
+ len
);
716 if (next
!= phys
+ len
)
720 unmapped
= iommu_unmap(domain
->domain
, iova
, len
);
721 if (WARN_ON(!unmapped
))
724 unlocked
+= vfio_unpin_pages_remote(dma
, iova
,
726 unmapped
>> PAGE_SHIFT
,
733 dma
->iommu_mapped
= false;
735 vfio_lock_acct(dma
->task
, -unlocked
);
741 static void vfio_remove_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*dma
)
743 vfio_unmap_unpin(iommu
, dma
, true);
744 vfio_unlink_dma(iommu
, dma
);
745 put_task_struct(dma
->task
);
749 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu
*iommu
)
751 struct vfio_domain
*domain
;
752 unsigned long bitmap
= ULONG_MAX
;
754 mutex_lock(&iommu
->lock
);
755 list_for_each_entry(domain
, &iommu
->domain_list
, next
)
756 bitmap
&= domain
->domain
->pgsize_bitmap
;
757 mutex_unlock(&iommu
->lock
);
760 * In case the IOMMU supports page sizes smaller than PAGE_SIZE
761 * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes.
762 * That way the user will be able to map/unmap buffers whose size/
763 * start address is aligned with PAGE_SIZE. Pinning code uses that
764 * granularity while iommu driver can use the sub-PAGE_SIZE size
767 if (bitmap
& ~PAGE_MASK
) {
775 static int vfio_dma_do_unmap(struct vfio_iommu
*iommu
,
776 struct vfio_iommu_type1_dma_unmap
*unmap
)
779 struct vfio_dma
*dma
;
783 mask
= ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu
))) - 1;
785 if (unmap
->iova
& mask
)
787 if (!unmap
->size
|| unmap
->size
& mask
)
790 WARN_ON(mask
& PAGE_MASK
);
792 mutex_lock(&iommu
->lock
);
795 * vfio-iommu-type1 (v1) - User mappings were coalesced together to
796 * avoid tracking individual mappings. This means that the granularity
797 * of the original mapping was lost and the user was allowed to attempt
798 * to unmap any range. Depending on the contiguousness of physical
799 * memory and page sizes supported by the IOMMU, arbitrary unmaps may
800 * or may not have worked. We only guaranteed unmap granularity
801 * matching the original mapping; even though it was untracked here,
802 * the original mappings are reflected in IOMMU mappings. This
803 * resulted in a couple unusual behaviors. First, if a range is not
804 * able to be unmapped, ex. a set of 4k pages that was mapped as a
805 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
806 * a zero sized unmap. Also, if an unmap request overlaps the first
807 * address of a hugepage, the IOMMU will unmap the entire hugepage.
808 * This also returns success and the returned unmap size reflects the
809 * actual size unmapped.
811 * We attempt to maintain compatibility with this "v1" interface, but
812 * we take control out of the hands of the IOMMU. Therefore, an unmap
813 * request offset from the beginning of the original mapping will
814 * return success with zero sized unmap. And an unmap request covering
815 * the first iova of mapping will unmap the entire range.
817 * The v2 version of this interface intends to be more deterministic.
818 * Unmap requests must fully cover previous mappings. Multiple
819 * mappings may still be unmaped by specifying large ranges, but there
820 * must not be any previous mappings bisected by the range. An error
821 * will be returned if these conditions are not met. The v2 interface
822 * will only return success and a size of zero if there were no
823 * mappings within the range.
826 dma
= vfio_find_dma(iommu
, unmap
->iova
, 0);
827 if (dma
&& dma
->iova
!= unmap
->iova
) {
831 dma
= vfio_find_dma(iommu
, unmap
->iova
+ unmap
->size
- 1, 0);
832 if (dma
&& dma
->iova
+ dma
->size
!= unmap
->iova
+ unmap
->size
) {
838 while ((dma
= vfio_find_dma(iommu
, unmap
->iova
, unmap
->size
))) {
839 if (!iommu
->v2
&& unmap
->iova
> dma
->iova
)
842 * Task with same address space who mapped this iova range is
843 * allowed to unmap the iova range.
845 if (dma
->task
->mm
!= current
->mm
)
847 unmapped
+= dma
->size
;
848 vfio_remove_dma(iommu
, dma
);
852 mutex_unlock(&iommu
->lock
);
854 /* Report how much was unmapped */
855 unmap
->size
= unmapped
;
861 * Turns out AMD IOMMU has a page table bug where it won't map large pages
862 * to a region that previously mapped smaller pages. This should be fixed
863 * soon, so this is just a temporary workaround to break mappings down into
864 * PAGE_SIZE. Better to map smaller pages than nothing.
866 static int map_try_harder(struct vfio_domain
*domain
, dma_addr_t iova
,
867 unsigned long pfn
, long npage
, int prot
)
872 for (i
= 0; i
< npage
; i
++, pfn
++, iova
+= PAGE_SIZE
) {
873 ret
= iommu_map(domain
->domain
, iova
,
874 (phys_addr_t
)pfn
<< PAGE_SHIFT
,
875 PAGE_SIZE
, prot
| domain
->prot
);
880 for (; i
< npage
&& i
> 0; i
--, iova
-= PAGE_SIZE
)
881 iommu_unmap(domain
->domain
, iova
, PAGE_SIZE
);
886 static int vfio_iommu_map(struct vfio_iommu
*iommu
, dma_addr_t iova
,
887 unsigned long pfn
, long npage
, int prot
)
889 struct vfio_domain
*d
;
892 list_for_each_entry(d
, &iommu
->domain_list
, next
) {
893 ret
= iommu_map(d
->domain
, iova
, (phys_addr_t
)pfn
<< PAGE_SHIFT
,
894 npage
<< PAGE_SHIFT
, prot
| d
->prot
);
897 map_try_harder(d
, iova
, pfn
, npage
, prot
))
907 list_for_each_entry_continue_reverse(d
, &iommu
->domain_list
, next
)
908 iommu_unmap(d
->domain
, iova
, npage
<< PAGE_SHIFT
);
913 static int vfio_pin_map_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*dma
,
916 dma_addr_t iova
= dma
->iova
;
917 unsigned long vaddr
= dma
->vaddr
;
918 size_t size
= map_size
;
924 /* Pin a contiguous chunk of memory */
925 npage
= vfio_pin_pages_remote(dma
, vaddr
+ dma
->size
,
926 size
>> PAGE_SHIFT
, &pfn
);
934 ret
= vfio_iommu_map(iommu
, iova
+ dma
->size
, pfn
, npage
,
937 vfio_unpin_pages_remote(dma
, iova
+ dma
->size
, pfn
,
942 size
-= npage
<< PAGE_SHIFT
;
943 dma
->size
+= npage
<< PAGE_SHIFT
;
946 dma
->iommu_mapped
= true;
949 vfio_remove_dma(iommu
, dma
);
954 static int vfio_dma_do_map(struct vfio_iommu
*iommu
,
955 struct vfio_iommu_type1_dma_map
*map
)
957 dma_addr_t iova
= map
->iova
;
958 unsigned long vaddr
= map
->vaddr
;
959 size_t size
= map
->size
;
960 int ret
= 0, prot
= 0;
962 struct vfio_dma
*dma
;
964 /* Verify that none of our __u64 fields overflow */
965 if (map
->size
!= size
|| map
->vaddr
!= vaddr
|| map
->iova
!= iova
)
968 mask
= ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu
))) - 1;
970 WARN_ON(mask
& PAGE_MASK
);
972 /* READ/WRITE from device perspective */
973 if (map
->flags
& VFIO_DMA_MAP_FLAG_WRITE
)
975 if (map
->flags
& VFIO_DMA_MAP_FLAG_READ
)
978 if (!prot
|| !size
|| (size
| iova
| vaddr
) & mask
)
981 /* Don't allow IOVA or virtual address wrap */
982 if (iova
+ size
- 1 < iova
|| vaddr
+ size
- 1 < vaddr
)
985 mutex_lock(&iommu
->lock
);
987 if (vfio_find_dma(iommu
, iova
, size
)) {
992 dma
= kzalloc(sizeof(*dma
), GFP_KERNEL
);
1001 get_task_struct(current
);
1002 dma
->task
= current
;
1003 dma
->pfn_list
= RB_ROOT
;
1005 /* Insert zero-sized and grow as we map chunks of it */
1006 vfio_link_dma(iommu
, dma
);
1008 /* Don't pin and map if container doesn't contain IOMMU capable domain*/
1009 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu
))
1012 ret
= vfio_pin_map_dma(iommu
, dma
, size
);
1015 mutex_unlock(&iommu
->lock
);
1019 static int vfio_bus_type(struct device
*dev
, void *data
)
1021 struct bus_type
**bus
= data
;
1023 if (*bus
&& *bus
!= dev
->bus
)
1031 static int vfio_iommu_replay(struct vfio_iommu
*iommu
,
1032 struct vfio_domain
*domain
)
1034 struct vfio_domain
*d
;
1038 /* Arbitrarily pick the first domain in the list for lookups */
1039 d
= list_first_entry(&iommu
->domain_list
, struct vfio_domain
, next
);
1040 n
= rb_first(&iommu
->dma_list
);
1042 for (; n
; n
= rb_next(n
)) {
1043 struct vfio_dma
*dma
;
1046 dma
= rb_entry(n
, struct vfio_dma
, node
);
1049 while (iova
< dma
->iova
+ dma
->size
) {
1053 if (dma
->iommu_mapped
) {
1057 phys
= iommu_iova_to_phys(d
->domain
, iova
);
1059 if (WARN_ON(!phys
)) {
1067 while (i
< dma
->iova
+ dma
->size
&&
1068 p
== iommu_iova_to_phys(d
->domain
, i
)) {
1075 unsigned long vaddr
= dma
->vaddr
+
1077 size_t n
= dma
->iova
+ dma
->size
- iova
;
1080 npage
= vfio_pin_pages_remote(dma
, vaddr
,
1089 phys
= pfn
<< PAGE_SHIFT
;
1090 size
= npage
<< PAGE_SHIFT
;
1093 ret
= iommu_map(domain
->domain
, iova
, phys
,
1094 size
, dma
->prot
| domain
->prot
);
1100 dma
->iommu_mapped
= true;
1106 * We change our unmap behavior slightly depending on whether the IOMMU
1107 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
1108 * for practically any contiguous power-of-two mapping we give it. This means
1109 * we don't need to look for contiguous chunks ourselves to make unmapping
1110 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
1111 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
1112 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
1113 * hugetlbfs is in use.
1115 static void vfio_test_domain_fgsp(struct vfio_domain
*domain
)
1118 int ret
, order
= get_order(PAGE_SIZE
* 2);
1120 pages
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
1124 ret
= iommu_map(domain
->domain
, 0, page_to_phys(pages
), PAGE_SIZE
* 2,
1125 IOMMU_READ
| IOMMU_WRITE
| domain
->prot
);
1127 size_t unmapped
= iommu_unmap(domain
->domain
, 0, PAGE_SIZE
);
1129 if (unmapped
== PAGE_SIZE
)
1130 iommu_unmap(domain
->domain
, PAGE_SIZE
, PAGE_SIZE
);
1132 domain
->fgsp
= true;
1135 __free_pages(pages
, order
);
1138 static struct vfio_group
*find_iommu_group(struct vfio_domain
*domain
,
1139 struct iommu_group
*iommu_group
)
1141 struct vfio_group
*g
;
1143 list_for_each_entry(g
, &domain
->group_list
, next
) {
1144 if (g
->iommu_group
== iommu_group
)
1151 static int vfio_iommu_type1_attach_group(void *iommu_data
,
1152 struct iommu_group
*iommu_group
)
1154 struct vfio_iommu
*iommu
= iommu_data
;
1155 struct vfio_group
*group
;
1156 struct vfio_domain
*domain
, *d
;
1157 struct bus_type
*bus
= NULL
, *mdev_bus
;
1160 mutex_lock(&iommu
->lock
);
1162 list_for_each_entry(d
, &iommu
->domain_list
, next
) {
1163 if (find_iommu_group(d
, iommu_group
)) {
1164 mutex_unlock(&iommu
->lock
);
1169 if (iommu
->external_domain
) {
1170 if (find_iommu_group(iommu
->external_domain
, iommu_group
)) {
1171 mutex_unlock(&iommu
->lock
);
1176 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
1177 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
1178 if (!group
|| !domain
) {
1183 group
->iommu_group
= iommu_group
;
1185 /* Determine bus_type in order to allocate a domain */
1186 ret
= iommu_group_for_each_dev(iommu_group
, &bus
, vfio_bus_type
);
1190 mdev_bus
= symbol_get(mdev_bus_type
);
1193 if ((bus
== mdev_bus
) && !iommu_present(bus
)) {
1194 symbol_put(mdev_bus_type
);
1195 if (!iommu
->external_domain
) {
1196 INIT_LIST_HEAD(&domain
->group_list
);
1197 iommu
->external_domain
= domain
;
1201 list_add(&group
->next
,
1202 &iommu
->external_domain
->group_list
);
1203 mutex_unlock(&iommu
->lock
);
1206 symbol_put(mdev_bus_type
);
1209 domain
->domain
= iommu_domain_alloc(bus
);
1210 if (!domain
->domain
) {
1215 if (iommu
->nesting
) {
1218 ret
= iommu_domain_set_attr(domain
->domain
, DOMAIN_ATTR_NESTING
,
1224 ret
= iommu_attach_group(domain
->domain
, iommu_group
);
1228 INIT_LIST_HEAD(&domain
->group_list
);
1229 list_add(&group
->next
, &domain
->group_list
);
1231 if (!allow_unsafe_interrupts
&&
1232 !iommu_capable(bus
, IOMMU_CAP_INTR_REMAP
)) {
1233 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
1239 if (iommu_capable(bus
, IOMMU_CAP_CACHE_COHERENCY
))
1240 domain
->prot
|= IOMMU_CACHE
;
1243 * Try to match an existing compatible domain. We don't want to
1244 * preclude an IOMMU driver supporting multiple bus_types and being
1245 * able to include different bus_types in the same IOMMU domain, so
1246 * we test whether the domains use the same iommu_ops rather than
1247 * testing if they're on the same bus_type.
1249 list_for_each_entry(d
, &iommu
->domain_list
, next
) {
1250 if (d
->domain
->ops
== domain
->domain
->ops
&&
1251 d
->prot
== domain
->prot
) {
1252 iommu_detach_group(domain
->domain
, iommu_group
);
1253 if (!iommu_attach_group(d
->domain
, iommu_group
)) {
1254 list_add(&group
->next
, &d
->group_list
);
1255 iommu_domain_free(domain
->domain
);
1257 mutex_unlock(&iommu
->lock
);
1261 ret
= iommu_attach_group(domain
->domain
, iommu_group
);
1267 vfio_test_domain_fgsp(domain
);
1269 /* replay mappings on new domains */
1270 ret
= vfio_iommu_replay(iommu
, domain
);
1274 list_add(&domain
->next
, &iommu
->domain_list
);
1276 mutex_unlock(&iommu
->lock
);
1281 iommu_detach_group(domain
->domain
, iommu_group
);
1283 iommu_domain_free(domain
->domain
);
1287 mutex_unlock(&iommu
->lock
);
1291 static void vfio_iommu_unmap_unpin_all(struct vfio_iommu
*iommu
)
1293 struct rb_node
*node
;
1295 while ((node
= rb_first(&iommu
->dma_list
)))
1296 vfio_remove_dma(iommu
, rb_entry(node
, struct vfio_dma
, node
));
1299 static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu
*iommu
)
1301 struct rb_node
*n
, *p
;
1303 n
= rb_first(&iommu
->dma_list
);
1304 for (; n
; n
= rb_next(n
)) {
1305 struct vfio_dma
*dma
;
1306 long locked
= 0, unlocked
= 0;
1308 dma
= rb_entry(n
, struct vfio_dma
, node
);
1309 unlocked
+= vfio_unmap_unpin(iommu
, dma
, false);
1310 p
= rb_first(&dma
->pfn_list
);
1311 for (; p
; p
= rb_next(p
)) {
1312 struct vfio_pfn
*vpfn
= rb_entry(p
, struct vfio_pfn
,
1315 if (!is_invalid_reserved_pfn(vpfn
->pfn
))
1318 vfio_lock_acct(dma
->task
, locked
- unlocked
);
1322 static void vfio_sanity_check_pfn_list(struct vfio_iommu
*iommu
)
1326 n
= rb_first(&iommu
->dma_list
);
1327 for (; n
; n
= rb_next(n
)) {
1328 struct vfio_dma
*dma
;
1330 dma
= rb_entry(n
, struct vfio_dma
, node
);
1332 if (WARN_ON(!RB_EMPTY_ROOT(&dma
->pfn_list
)))
1337 static void vfio_iommu_type1_detach_group(void *iommu_data
,
1338 struct iommu_group
*iommu_group
)
1340 struct vfio_iommu
*iommu
= iommu_data
;
1341 struct vfio_domain
*domain
;
1342 struct vfio_group
*group
;
1344 mutex_lock(&iommu
->lock
);
1346 if (iommu
->external_domain
) {
1347 group
= find_iommu_group(iommu
->external_domain
, iommu_group
);
1349 list_del(&group
->next
);
1352 if (list_empty(&iommu
->external_domain
->group_list
)) {
1353 vfio_sanity_check_pfn_list(iommu
);
1355 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu
))
1356 vfio_iommu_unmap_unpin_all(iommu
);
1358 kfree(iommu
->external_domain
);
1359 iommu
->external_domain
= NULL
;
1361 goto detach_group_done
;
1365 list_for_each_entry(domain
, &iommu
->domain_list
, next
) {
1366 group
= find_iommu_group(domain
, iommu_group
);
1370 iommu_detach_group(domain
->domain
, iommu_group
);
1371 list_del(&group
->next
);
1374 * Group ownership provides privilege, if the group list is
1375 * empty, the domain goes away. If it's the last domain with
1376 * iommu and external domain doesn't exist, then all the
1377 * mappings go away too. If it's the last domain with iommu and
1378 * external domain exist, update accounting
1380 if (list_empty(&domain
->group_list
)) {
1381 if (list_is_singular(&iommu
->domain_list
)) {
1382 if (!iommu
->external_domain
)
1383 vfio_iommu_unmap_unpin_all(iommu
);
1385 vfio_iommu_unmap_unpin_reaccount(iommu
);
1387 iommu_domain_free(domain
->domain
);
1388 list_del(&domain
->next
);
1395 mutex_unlock(&iommu
->lock
);
1398 static void *vfio_iommu_type1_open(unsigned long arg
)
1400 struct vfio_iommu
*iommu
;
1402 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
1404 return ERR_PTR(-ENOMEM
);
1407 case VFIO_TYPE1_IOMMU
:
1409 case VFIO_TYPE1_NESTING_IOMMU
:
1410 iommu
->nesting
= true;
1411 case VFIO_TYPE1v2_IOMMU
:
1416 return ERR_PTR(-EINVAL
);
1419 INIT_LIST_HEAD(&iommu
->domain_list
);
1420 iommu
->dma_list
= RB_ROOT
;
1421 mutex_init(&iommu
->lock
);
1426 static void vfio_release_domain(struct vfio_domain
*domain
, bool external
)
1428 struct vfio_group
*group
, *group_tmp
;
1430 list_for_each_entry_safe(group
, group_tmp
,
1431 &domain
->group_list
, next
) {
1433 iommu_detach_group(domain
->domain
, group
->iommu_group
);
1434 list_del(&group
->next
);
1439 iommu_domain_free(domain
->domain
);
1442 static void vfio_iommu_type1_release(void *iommu_data
)
1444 struct vfio_iommu
*iommu
= iommu_data
;
1445 struct vfio_domain
*domain
, *domain_tmp
;
1447 if (iommu
->external_domain
) {
1448 vfio_release_domain(iommu
->external_domain
, true);
1449 vfio_sanity_check_pfn_list(iommu
);
1450 kfree(iommu
->external_domain
);
1453 vfio_iommu_unmap_unpin_all(iommu
);
1455 list_for_each_entry_safe(domain
, domain_tmp
,
1456 &iommu
->domain_list
, next
) {
1457 vfio_release_domain(domain
, false);
1458 list_del(&domain
->next
);
1464 static int vfio_domains_have_iommu_cache(struct vfio_iommu
*iommu
)
1466 struct vfio_domain
*domain
;
1469 mutex_lock(&iommu
->lock
);
1470 list_for_each_entry(domain
, &iommu
->domain_list
, next
) {
1471 if (!(domain
->prot
& IOMMU_CACHE
)) {
1476 mutex_unlock(&iommu
->lock
);
1481 static long vfio_iommu_type1_ioctl(void *iommu_data
,
1482 unsigned int cmd
, unsigned long arg
)
1484 struct vfio_iommu
*iommu
= iommu_data
;
1485 unsigned long minsz
;
1487 if (cmd
== VFIO_CHECK_EXTENSION
) {
1489 case VFIO_TYPE1_IOMMU
:
1490 case VFIO_TYPE1v2_IOMMU
:
1491 case VFIO_TYPE1_NESTING_IOMMU
:
1493 case VFIO_DMA_CC_IOMMU
:
1496 return vfio_domains_have_iommu_cache(iommu
);
1500 } else if (cmd
== VFIO_IOMMU_GET_INFO
) {
1501 struct vfio_iommu_type1_info info
;
1503 minsz
= offsetofend(struct vfio_iommu_type1_info
, iova_pgsizes
);
1505 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1508 if (info
.argsz
< minsz
)
1511 info
.flags
= VFIO_IOMMU_INFO_PGSIZES
;
1513 info
.iova_pgsizes
= vfio_pgsize_bitmap(iommu
);
1515 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
1518 } else if (cmd
== VFIO_IOMMU_MAP_DMA
) {
1519 struct vfio_iommu_type1_dma_map map
;
1520 uint32_t mask
= VFIO_DMA_MAP_FLAG_READ
|
1521 VFIO_DMA_MAP_FLAG_WRITE
;
1523 minsz
= offsetofend(struct vfio_iommu_type1_dma_map
, size
);
1525 if (copy_from_user(&map
, (void __user
*)arg
, minsz
))
1528 if (map
.argsz
< minsz
|| map
.flags
& ~mask
)
1531 return vfio_dma_do_map(iommu
, &map
);
1533 } else if (cmd
== VFIO_IOMMU_UNMAP_DMA
) {
1534 struct vfio_iommu_type1_dma_unmap unmap
;
1537 minsz
= offsetofend(struct vfio_iommu_type1_dma_unmap
, size
);
1539 if (copy_from_user(&unmap
, (void __user
*)arg
, minsz
))
1542 if (unmap
.argsz
< minsz
|| unmap
.flags
)
1545 ret
= vfio_dma_do_unmap(iommu
, &unmap
);
1549 return copy_to_user((void __user
*)arg
, &unmap
, minsz
) ?
1556 static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1
= {
1557 .name
= "vfio-iommu-type1",
1558 .owner
= THIS_MODULE
,
1559 .open
= vfio_iommu_type1_open
,
1560 .release
= vfio_iommu_type1_release
,
1561 .ioctl
= vfio_iommu_type1_ioctl
,
1562 .attach_group
= vfio_iommu_type1_attach_group
,
1563 .detach_group
= vfio_iommu_type1_detach_group
,
1564 .pin_pages
= vfio_iommu_type1_pin_pages
,
1565 .unpin_pages
= vfio_iommu_type1_unpin_pages
,
1568 static int __init
vfio_iommu_type1_init(void)
1570 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1
);
1573 static void __exit
vfio_iommu_type1_cleanup(void)
1575 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1
);
1578 module_init(vfio_iommu_type1_init
);
1579 module_exit(vfio_iommu_type1_cleanup
);
1581 MODULE_VERSION(DRIVER_VERSION
);
1582 MODULE_LICENSE("GPL v2");
1583 MODULE_AUTHOR(DRIVER_AUTHOR
);
1584 MODULE_DESCRIPTION(DRIVER_DESC
);