]>
Commit | Line | Data |
---|---|---|
73fa0d10 AW |
1 | /* |
2 | * VFIO: IOMMU DMA mapping support for Type1 IOMMU | |
3 | * | |
4 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
5 | * Author: Alex Williamson <alex.williamson@redhat.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * Derived from original vfio: | |
12 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. | |
13 | * Author: Tom Lyon, pugs@cisco.com | |
14 | * | |
15 | * We arbitrarily define a Type1 IOMMU as one matching the below code. | |
16 | * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel | |
17 | * VT-d, but that makes it harder to re-use as theoretically anyone | |
18 | * implementing a similar IOMMU could make use of this. We expect the | |
19 | * IOMMU to support the IOMMU API and have few to no restrictions around | |
20 | * the IOVA range that can be mapped. The Type1 IOMMU is currently | |
21 | * optimized for relatively static mappings of a userspace process with | |
22 | * userpsace pages pinned into memory. We also assume devices and IOMMU | |
23 | * domains are PCI based as the IOMMU API is still centered around a | |
24 | * device/bus interface rather than a group interface. | |
25 | */ | |
26 | ||
27 | #include <linux/compat.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/fs.h> | |
30 | #include <linux/iommu.h> | |
31 | #include <linux/module.h> | |
32 | #include <linux/mm.h> | |
cd9b2268 | 33 | #include <linux/rbtree.h> |
3f07c014 | 34 | #include <linux/sched/signal.h> |
6e84f315 | 35 | #include <linux/sched/mm.h> |
73fa0d10 AW |
36 | #include <linux/slab.h> |
37 | #include <linux/uaccess.h> | |
38 | #include <linux/vfio.h> | |
39 | #include <linux/workqueue.h> | |
a54eb550 | 40 | #include <linux/mdev.h> |
c086de81 | 41 | #include <linux/notifier.h> |
5d704992 | 42 | #include <linux/dma-iommu.h> |
9d72f87b | 43 | #include <linux/irqdomain.h> |
73fa0d10 AW |
44 | |
45 | #define DRIVER_VERSION "0.2" | |
46 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" | |
47 | #define DRIVER_DESC "Type1 IOMMU driver for VFIO" | |
48 | ||
49 | static bool allow_unsafe_interrupts; | |
50 | module_param_named(allow_unsafe_interrupts, | |
51 | allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR); | |
52 | MODULE_PARM_DESC(allow_unsafe_interrupts, | |
53 | "Enable VFIO IOMMU support for on platforms without interrupt remapping support."); | |
54 | ||
5c6c2b21 AW |
55 | static bool disable_hugepages; |
56 | module_param_named(disable_hugepages, | |
57 | disable_hugepages, bool, S_IRUGO | S_IWUSR); | |
58 | MODULE_PARM_DESC(disable_hugepages, | |
59 | "Disable VFIO IOMMU support for IOMMU hugepages."); | |
60 | ||
73fa0d10 | 61 | struct vfio_iommu { |
1ef3e2bc | 62 | struct list_head domain_list; |
a54eb550 | 63 | struct vfio_domain *external_domain; /* domain for external user */ |
73fa0d10 | 64 | struct mutex lock; |
cd9b2268 | 65 | struct rb_root dma_list; |
c086de81 | 66 | struct blocking_notifier_head notifier; |
f5c9eceb WD |
67 | bool v2; |
68 | bool nesting; | |
1ef3e2bc AW |
69 | }; |
70 | ||
71 | struct vfio_domain { | |
72 | struct iommu_domain *domain; | |
73 | struct list_head next; | |
73fa0d10 | 74 | struct list_head group_list; |
1ef3e2bc | 75 | int prot; /* IOMMU_CACHE */ |
6fe1010d | 76 | bool fgsp; /* Fine-grained super pages */ |
73fa0d10 AW |
77 | }; |
78 | ||
79 | struct vfio_dma { | |
cd9b2268 | 80 | struct rb_node node; |
73fa0d10 AW |
81 | dma_addr_t iova; /* Device address */ |
82 | unsigned long vaddr; /* Process virtual addr */ | |
166fd7d9 | 83 | size_t size; /* Map size (bytes) */ |
73fa0d10 | 84 | int prot; /* IOMMU_READ/WRITE */ |
a54eb550 | 85 | bool iommu_mapped; |
48d8476b | 86 | bool lock_cap; /* capable(CAP_IPC_LOCK) */ |
8f0d5bb9 | 87 | struct task_struct *task; |
a54eb550 | 88 | struct rb_root pfn_list; /* Ex-user pinned pfn list */ |
73fa0d10 AW |
89 | }; |
90 | ||
91 | struct vfio_group { | |
92 | struct iommu_group *iommu_group; | |
93 | struct list_head next; | |
94 | }; | |
95 | ||
a54eb550 KW |
96 | /* |
97 | * Guest RAM pinning working set or DMA target | |
98 | */ | |
99 | struct vfio_pfn { | |
100 | struct rb_node node; | |
101 | dma_addr_t iova; /* Device address */ | |
102 | unsigned long pfn; /* Host pfn */ | |
103 | atomic_t ref_count; | |
104 | }; | |
105 | ||
6bd06f5a SS |
106 | struct vfio_regions { |
107 | struct list_head list; | |
108 | dma_addr_t iova; | |
109 | phys_addr_t phys; | |
110 | size_t len; | |
111 | }; | |
112 | ||
a54eb550 KW |
113 | #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ |
114 | (!list_empty(&iommu->domain_list)) | |
115 | ||
116 | static int put_pfn(unsigned long pfn, int prot); | |
117 | ||
73fa0d10 AW |
118 | /* |
119 | * This code handles mapping and unmapping of user data buffers | |
120 | * into DMA'ble space using the IOMMU | |
121 | */ | |
122 | ||
cd9b2268 AW |
123 | static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, |
124 | dma_addr_t start, size_t size) | |
125 | { | |
126 | struct rb_node *node = iommu->dma_list.rb_node; | |
127 | ||
128 | while (node) { | |
129 | struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); | |
130 | ||
131 | if (start + size <= dma->iova) | |
132 | node = node->rb_left; | |
166fd7d9 | 133 | else if (start >= dma->iova + dma->size) |
cd9b2268 AW |
134 | node = node->rb_right; |
135 | else | |
136 | return dma; | |
137 | } | |
138 | ||
139 | return NULL; | |
140 | } | |
141 | ||
1ef3e2bc | 142 | static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) |
cd9b2268 AW |
143 | { |
144 | struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; | |
145 | struct vfio_dma *dma; | |
146 | ||
147 | while (*link) { | |
148 | parent = *link; | |
149 | dma = rb_entry(parent, struct vfio_dma, node); | |
150 | ||
166fd7d9 | 151 | if (new->iova + new->size <= dma->iova) |
cd9b2268 AW |
152 | link = &(*link)->rb_left; |
153 | else | |
154 | link = &(*link)->rb_right; | |
155 | } | |
156 | ||
157 | rb_link_node(&new->node, parent, link); | |
158 | rb_insert_color(&new->node, &iommu->dma_list); | |
159 | } | |
160 | ||
1ef3e2bc | 161 | static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) |
cd9b2268 AW |
162 | { |
163 | rb_erase(&old->node, &iommu->dma_list); | |
164 | } | |
165 | ||
a54eb550 KW |
166 | /* |
167 | * Helper Functions for host iova-pfn list | |
168 | */ | |
169 | static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) | |
170 | { | |
171 | struct vfio_pfn *vpfn; | |
172 | struct rb_node *node = dma->pfn_list.rb_node; | |
173 | ||
174 | while (node) { | |
175 | vpfn = rb_entry(node, struct vfio_pfn, node); | |
176 | ||
177 | if (iova < vpfn->iova) | |
178 | node = node->rb_left; | |
179 | else if (iova > vpfn->iova) | |
180 | node = node->rb_right; | |
181 | else | |
182 | return vpfn; | |
183 | } | |
184 | return NULL; | |
185 | } | |
186 | ||
187 | static void vfio_link_pfn(struct vfio_dma *dma, | |
188 | struct vfio_pfn *new) | |
189 | { | |
190 | struct rb_node **link, *parent = NULL; | |
191 | struct vfio_pfn *vpfn; | |
192 | ||
193 | link = &dma->pfn_list.rb_node; | |
194 | while (*link) { | |
195 | parent = *link; | |
196 | vpfn = rb_entry(parent, struct vfio_pfn, node); | |
197 | ||
198 | if (new->iova < vpfn->iova) | |
199 | link = &(*link)->rb_left; | |
200 | else | |
201 | link = &(*link)->rb_right; | |
202 | } | |
203 | ||
204 | rb_link_node(&new->node, parent, link); | |
205 | rb_insert_color(&new->node, &dma->pfn_list); | |
206 | } | |
207 | ||
208 | static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old) | |
209 | { | |
210 | rb_erase(&old->node, &dma->pfn_list); | |
211 | } | |
212 | ||
213 | static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova, | |
214 | unsigned long pfn) | |
215 | { | |
216 | struct vfio_pfn *vpfn; | |
217 | ||
218 | vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL); | |
219 | if (!vpfn) | |
220 | return -ENOMEM; | |
221 | ||
222 | vpfn->iova = iova; | |
223 | vpfn->pfn = pfn; | |
224 | atomic_set(&vpfn->ref_count, 1); | |
225 | vfio_link_pfn(dma, vpfn); | |
226 | return 0; | |
227 | } | |
228 | ||
229 | static void vfio_remove_from_pfn_list(struct vfio_dma *dma, | |
230 | struct vfio_pfn *vpfn) | |
231 | { | |
232 | vfio_unlink_pfn(dma, vpfn); | |
233 | kfree(vpfn); | |
234 | } | |
235 | ||
236 | static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma, | |
237 | unsigned long iova) | |
238 | { | |
239 | struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); | |
240 | ||
241 | if (vpfn) | |
242 | atomic_inc(&vpfn->ref_count); | |
243 | return vpfn; | |
244 | } | |
245 | ||
246 | static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) | |
247 | { | |
248 | int ret = 0; | |
249 | ||
250 | if (atomic_dec_and_test(&vpfn->ref_count)) { | |
251 | ret = put_pfn(vpfn->pfn, dma->prot); | |
252 | vfio_remove_from_pfn_list(dma, vpfn); | |
253 | } | |
254 | return ret; | |
255 | } | |
256 | ||
48d8476b | 257 | static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) |
73fa0d10 | 258 | { |
73fa0d10 | 259 | struct mm_struct *mm; |
0cfef2b7 | 260 | int ret; |
73fa0d10 | 261 | |
3624a248 | 262 | if (!npage) |
0cfef2b7 | 263 | return 0; |
3624a248 | 264 | |
48d8476b | 265 | mm = async ? get_task_mm(dma->task) : dma->task->mm; |
3624a248 | 266 | if (!mm) |
0cfef2b7 | 267 | return -ESRCH; /* process exited */ |
73fa0d10 | 268 | |
0cfef2b7 AW |
269 | ret = down_write_killable(&mm->mmap_sem); |
270 | if (!ret) { | |
271 | if (npage > 0) { | |
48d8476b | 272 | if (!dma->lock_cap) { |
0cfef2b7 AW |
273 | unsigned long limit; |
274 | ||
48d8476b | 275 | limit = task_rlimit(dma->task, |
0cfef2b7 AW |
276 | RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
277 | ||
278 | if (mm->locked_vm + npage > limit) | |
279 | ret = -ENOMEM; | |
280 | } | |
281 | } | |
282 | ||
283 | if (!ret) | |
284 | mm->locked_vm += npage; | |
73fa0d10 | 285 | |
0cfef2b7 | 286 | up_write(&mm->mmap_sem); |
6c38c055 AW |
287 | } |
288 | ||
48d8476b | 289 | if (async) |
3624a248 | 290 | mmput(mm); |
0cfef2b7 AW |
291 | |
292 | return ret; | |
73fa0d10 AW |
293 | } |
294 | ||
295 | /* | |
296 | * Some mappings aren't backed by a struct page, for example an mmap'd | |
297 | * MMIO range for our own or another device. These use a different | |
298 | * pfn conversion and shouldn't be tracked as locked pages. | |
299 | */ | |
300 | static bool is_invalid_reserved_pfn(unsigned long pfn) | |
301 | { | |
302 | if (pfn_valid(pfn)) { | |
303 | bool reserved; | |
304 | struct page *tail = pfn_to_page(pfn); | |
668f9abb | 305 | struct page *head = compound_head(tail); |
73fa0d10 AW |
306 | reserved = !!(PageReserved(head)); |
307 | if (head != tail) { | |
308 | /* | |
309 | * "head" is not a dangling pointer | |
668f9abb | 310 | * (compound_head takes care of that) |
73fa0d10 AW |
311 | * but the hugepage may have been split |
312 | * from under us (and we may not hold a | |
313 | * reference count on the head page so it can | |
314 | * be reused before we run PageReferenced), so | |
315 | * we've to check PageTail before returning | |
316 | * what we just read. | |
317 | */ | |
318 | smp_rmb(); | |
319 | if (PageTail(tail)) | |
320 | return reserved; | |
321 | } | |
322 | return PageReserved(tail); | |
323 | } | |
324 | ||
325 | return true; | |
326 | } | |
327 | ||
328 | static int put_pfn(unsigned long pfn, int prot) | |
329 | { | |
330 | if (!is_invalid_reserved_pfn(pfn)) { | |
331 | struct page *page = pfn_to_page(pfn); | |
332 | if (prot & IOMMU_WRITE) | |
333 | SetPageDirty(page); | |
334 | put_page(page); | |
335 | return 1; | |
336 | } | |
337 | return 0; | |
338 | } | |
339 | ||
ea85cf35 KW |
340 | static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, |
341 | int prot, unsigned long *pfn) | |
73fa0d10 AW |
342 | { |
343 | struct page *page[1]; | |
344 | struct vm_area_struct *vma; | |
94db151d | 345 | struct vm_area_struct *vmas[1]; |
bb94b55a | 346 | unsigned int flags = 0; |
ea85cf35 | 347 | int ret; |
73fa0d10 | 348 | |
bb94b55a JG |
349 | if (prot & IOMMU_WRITE) |
350 | flags |= FOLL_WRITE; | |
351 | ||
352 | down_read(&mm->mmap_sem); | |
ea85cf35 | 353 | if (mm == current->mm) { |
bb94b55a | 354 | ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas); |
ea85cf35 | 355 | } else { |
ea85cf35 | 356 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, |
94db151d DW |
357 | vmas, NULL); |
358 | /* | |
359 | * The lifetime of a vaddr_get_pfn() page pin is | |
360 | * userspace-controlled. In the fs-dax case this could | |
361 | * lead to indefinite stalls in filesystem operations. | |
362 | * Disallow attempts to pin fs-dax pages via this | |
363 | * interface. | |
364 | */ | |
365 | if (ret > 0 && vma_is_fsdax(vmas[0])) { | |
366 | ret = -EOPNOTSUPP; | |
367 | put_page(page[0]); | |
368 | } | |
ea85cf35 | 369 | } |
bb94b55a | 370 | up_read(&mm->mmap_sem); |
ea85cf35 KW |
371 | |
372 | if (ret == 1) { | |
73fa0d10 AW |
373 | *pfn = page_to_pfn(page[0]); |
374 | return 0; | |
375 | } | |
376 | ||
ea85cf35 | 377 | down_read(&mm->mmap_sem); |
73fa0d10 | 378 | |
ea85cf35 | 379 | vma = find_vma_intersection(mm, vaddr, vaddr + 1); |
73fa0d10 AW |
380 | |
381 | if (vma && vma->vm_flags & VM_PFNMAP) { | |
382 | *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | |
383 | if (is_invalid_reserved_pfn(*pfn)) | |
384 | ret = 0; | |
385 | } | |
386 | ||
ea85cf35 | 387 | up_read(&mm->mmap_sem); |
73fa0d10 AW |
388 | return ret; |
389 | } | |
390 | ||
166fd7d9 AW |
391 | /* |
392 | * Attempt to pin pages. We really don't want to track all the pfns and | |
393 | * the iommu can only map chunks of consecutive pfns anyway, so get the | |
394 | * first page and all consecutive pages with the same locking. | |
395 | */ | |
8f0d5bb9 | 396 | static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, |
7cb671e7 | 397 | long npage, unsigned long *pfn_base, |
48d8476b | 398 | unsigned long limit) |
73fa0d10 | 399 | { |
7cb671e7 | 400 | unsigned long pfn = 0; |
6c38c055 | 401 | long ret, pinned = 0, lock_acct = 0; |
89c29def | 402 | bool rsvd; |
a54eb550 | 403 | dma_addr_t iova = vaddr - dma->vaddr + dma->iova; |
73fa0d10 | 404 | |
6c38c055 AW |
405 | /* This code path is only user initiated */ |
406 | if (!current->mm) | |
166fd7d9 | 407 | return -ENODEV; |
73fa0d10 | 408 | |
6c38c055 | 409 | ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); |
166fd7d9 | 410 | if (ret) |
6c38c055 | 411 | return ret; |
73fa0d10 | 412 | |
6c38c055 | 413 | pinned++; |
89c29def | 414 | rsvd = is_invalid_reserved_pfn(*pfn_base); |
73fa0d10 | 415 | |
a54eb550 KW |
416 | /* |
417 | * Reserved pages aren't counted against the user, externally pinned | |
418 | * pages are already counted against the user. | |
419 | */ | |
89c29def | 420 | if (!rsvd && !vfio_find_vpfn(dma, iova)) { |
48d8476b | 421 | if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) { |
a54eb550 KW |
422 | put_pfn(*pfn_base, dma->prot); |
423 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, | |
424 | limit << PAGE_SHIFT); | |
6c38c055 | 425 | return -ENOMEM; |
a54eb550 KW |
426 | } |
427 | lock_acct++; | |
5c6c2b21 AW |
428 | } |
429 | ||
6c38c055 AW |
430 | if (unlikely(disable_hugepages)) |
431 | goto out; | |
73fa0d10 | 432 | |
6c38c055 AW |
433 | /* Lock all the consecutive pages from pfn_base */ |
434 | for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; | |
435 | pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { | |
6c38c055 AW |
436 | ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); |
437 | if (ret) | |
438 | break; | |
439 | ||
89c29def AW |
440 | if (pfn != *pfn_base + pinned || |
441 | rsvd != is_invalid_reserved_pfn(pfn)) { | |
6c38c055 AW |
442 | put_pfn(pfn, dma->prot); |
443 | break; | |
444 | } | |
166fd7d9 | 445 | |
89c29def | 446 | if (!rsvd && !vfio_find_vpfn(dma, iova)) { |
48d8476b | 447 | if (!dma->lock_cap && |
6c38c055 | 448 | current->mm->locked_vm + lock_acct + 1 > limit) { |
a54eb550 | 449 | put_pfn(pfn, dma->prot); |
6c38c055 AW |
450 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", |
451 | __func__, limit << PAGE_SHIFT); | |
0cfef2b7 AW |
452 | ret = -ENOMEM; |
453 | goto unpin_out; | |
a54eb550 | 454 | } |
6c38c055 | 455 | lock_acct++; |
166fd7d9 AW |
456 | } |
457 | } | |
458 | ||
6c38c055 | 459 | out: |
48d8476b | 460 | ret = vfio_lock_acct(dma, lock_acct, false); |
0cfef2b7 AW |
461 | |
462 | unpin_out: | |
463 | if (ret) { | |
89c29def AW |
464 | if (!rsvd) { |
465 | for (pfn = *pfn_base ; pinned ; pfn++, pinned--) | |
466 | put_pfn(pfn, dma->prot); | |
467 | } | |
0cfef2b7 AW |
468 | |
469 | return ret; | |
470 | } | |
166fd7d9 | 471 | |
6c38c055 | 472 | return pinned; |
166fd7d9 AW |
473 | } |
474 | ||
a54eb550 KW |
475 | static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, |
476 | unsigned long pfn, long npage, | |
477 | bool do_accounting) | |
166fd7d9 | 478 | { |
a54eb550 | 479 | long unlocked = 0, locked = 0; |
166fd7d9 AW |
480 | long i; |
481 | ||
6c38c055 | 482 | for (i = 0; i < npage; i++, iova += PAGE_SIZE) { |
a54eb550 KW |
483 | if (put_pfn(pfn++, dma->prot)) { |
484 | unlocked++; | |
6c38c055 | 485 | if (vfio_find_vpfn(dma, iova)) |
a54eb550 KW |
486 | locked++; |
487 | } | |
488 | } | |
489 | ||
490 | if (do_accounting) | |
48d8476b | 491 | vfio_lock_acct(dma, locked - unlocked, true); |
a54eb550 KW |
492 | |
493 | return unlocked; | |
494 | } | |
495 | ||
496 | static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, | |
497 | unsigned long *pfn_base, bool do_accounting) | |
498 | { | |
a54eb550 KW |
499 | struct mm_struct *mm; |
500 | int ret; | |
a54eb550 KW |
501 | |
502 | mm = get_task_mm(dma->task); | |
503 | if (!mm) | |
504 | return -ENODEV; | |
505 | ||
506 | ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); | |
80dbe1fb | 507 | if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { |
48d8476b | 508 | ret = vfio_lock_acct(dma, 1, true); |
0cfef2b7 AW |
509 | if (ret) { |
510 | put_pfn(*pfn_base, dma->prot); | |
80dbe1fb AW |
511 | if (ret == -ENOMEM) |
512 | pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK " | |
513 | "(%ld) exceeded\n", __func__, | |
514 | dma->task->comm, task_pid_nr(dma->task), | |
515 | task_rlimit(dma->task, RLIMIT_MEMLOCK)); | |
0cfef2b7 AW |
516 | } |
517 | } | |
518 | ||
a54eb550 KW |
519 | mmput(mm); |
520 | return ret; | |
521 | } | |
522 | ||
523 | static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, | |
524 | bool do_accounting) | |
525 | { | |
526 | int unlocked; | |
527 | struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); | |
528 | ||
529 | if (!vpfn) | |
530 | return 0; | |
531 | ||
532 | unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); | |
166fd7d9 AW |
533 | |
534 | if (do_accounting) | |
48d8476b | 535 | vfio_lock_acct(dma, -unlocked, true); |
166fd7d9 AW |
536 | |
537 | return unlocked; | |
538 | } | |
539 | ||
a54eb550 KW |
540 | static int vfio_iommu_type1_pin_pages(void *iommu_data, |
541 | unsigned long *user_pfn, | |
542 | int npage, int prot, | |
543 | unsigned long *phys_pfn) | |
544 | { | |
545 | struct vfio_iommu *iommu = iommu_data; | |
546 | int i, j, ret; | |
547 | unsigned long remote_vaddr; | |
548 | struct vfio_dma *dma; | |
549 | bool do_accounting; | |
550 | ||
551 | if (!iommu || !user_pfn || !phys_pfn) | |
552 | return -EINVAL; | |
553 | ||
554 | /* Supported for v2 version only */ | |
555 | if (!iommu->v2) | |
556 | return -EACCES; | |
557 | ||
558 | mutex_lock(&iommu->lock); | |
559 | ||
c086de81 KW |
560 | /* Fail if notifier list is empty */ |
561 | if ((!iommu->external_domain) || (!iommu->notifier.head)) { | |
a54eb550 KW |
562 | ret = -EINVAL; |
563 | goto pin_done; | |
564 | } | |
565 | ||
566 | /* | |
567 | * If iommu capable domain exist in the container then all pages are | |
568 | * already pinned and accounted. Accouting should be done if there is no | |
569 | * iommu capable domain in the container. | |
570 | */ | |
571 | do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); | |
572 | ||
573 | for (i = 0; i < npage; i++) { | |
574 | dma_addr_t iova; | |
575 | struct vfio_pfn *vpfn; | |
576 | ||
577 | iova = user_pfn[i] << PAGE_SHIFT; | |
2b8bb1d7 | 578 | dma = vfio_find_dma(iommu, iova, PAGE_SIZE); |
a54eb550 KW |
579 | if (!dma) { |
580 | ret = -EINVAL; | |
581 | goto pin_unwind; | |
582 | } | |
583 | ||
584 | if ((dma->prot & prot) != prot) { | |
585 | ret = -EPERM; | |
586 | goto pin_unwind; | |
587 | } | |
588 | ||
589 | vpfn = vfio_iova_get_vfio_pfn(dma, iova); | |
590 | if (vpfn) { | |
591 | phys_pfn[i] = vpfn->pfn; | |
592 | continue; | |
593 | } | |
594 | ||
595 | remote_vaddr = dma->vaddr + iova - dma->iova; | |
596 | ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], | |
597 | do_accounting); | |
80dbe1fb | 598 | if (ret) |
a54eb550 | 599 | goto pin_unwind; |
a54eb550 KW |
600 | |
601 | ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]); | |
602 | if (ret) { | |
603 | vfio_unpin_page_external(dma, iova, do_accounting); | |
604 | goto pin_unwind; | |
605 | } | |
606 | } | |
607 | ||
608 | ret = i; | |
609 | goto pin_done; | |
610 | ||
611 | pin_unwind: | |
612 | phys_pfn[i] = 0; | |
613 | for (j = 0; j < i; j++) { | |
614 | dma_addr_t iova; | |
615 | ||
616 | iova = user_pfn[j] << PAGE_SHIFT; | |
2b8bb1d7 | 617 | dma = vfio_find_dma(iommu, iova, PAGE_SIZE); |
a54eb550 KW |
618 | vfio_unpin_page_external(dma, iova, do_accounting); |
619 | phys_pfn[j] = 0; | |
620 | } | |
621 | pin_done: | |
622 | mutex_unlock(&iommu->lock); | |
623 | return ret; | |
624 | } | |
625 | ||
626 | static int vfio_iommu_type1_unpin_pages(void *iommu_data, | |
627 | unsigned long *user_pfn, | |
628 | int npage) | |
629 | { | |
630 | struct vfio_iommu *iommu = iommu_data; | |
631 | bool do_accounting; | |
632 | int i; | |
633 | ||
634 | if (!iommu || !user_pfn) | |
635 | return -EINVAL; | |
636 | ||
637 | /* Supported for v2 version only */ | |
638 | if (!iommu->v2) | |
639 | return -EACCES; | |
640 | ||
641 | mutex_lock(&iommu->lock); | |
642 | ||
643 | if (!iommu->external_domain) { | |
644 | mutex_unlock(&iommu->lock); | |
645 | return -EINVAL; | |
646 | } | |
647 | ||
648 | do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); | |
649 | for (i = 0; i < npage; i++) { | |
650 | struct vfio_dma *dma; | |
651 | dma_addr_t iova; | |
652 | ||
653 | iova = user_pfn[i] << PAGE_SHIFT; | |
2b8bb1d7 | 654 | dma = vfio_find_dma(iommu, iova, PAGE_SIZE); |
a54eb550 KW |
655 | if (!dma) |
656 | goto unpin_exit; | |
657 | vfio_unpin_page_external(dma, iova, do_accounting); | |
658 | } | |
659 | ||
660 | unpin_exit: | |
661 | mutex_unlock(&iommu->lock); | |
662 | return i > npage ? npage : (i > 0 ? i : -EINVAL); | |
663 | } | |
664 | ||
6bd06f5a SS |
665 | static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, |
666 | struct list_head *regions) | |
667 | { | |
668 | long unlocked = 0; | |
669 | struct vfio_regions *entry, *next; | |
670 | ||
671 | iommu_tlb_sync(domain->domain); | |
672 | ||
673 | list_for_each_entry_safe(entry, next, regions, list) { | |
674 | unlocked += vfio_unpin_pages_remote(dma, | |
675 | entry->iova, | |
676 | entry->phys >> PAGE_SHIFT, | |
677 | entry->len >> PAGE_SHIFT, | |
678 | false); | |
679 | list_del(&entry->list); | |
680 | kfree(entry); | |
681 | } | |
682 | ||
683 | cond_resched(); | |
684 | ||
685 | return unlocked; | |
686 | } | |
687 | ||
688 | /* | |
689 | * Generally, VFIO needs to unpin remote pages after each IOTLB flush. | |
690 | * Therefore, when using IOTLB flush sync interface, VFIO need to keep track | |
691 | * of these regions (currently using a list). | |
692 | * | |
693 | * This value specifies maximum number of regions for each IOTLB flush sync. | |
694 | */ | |
695 | #define VFIO_IOMMU_TLB_SYNC_MAX 512 | |
696 | ||
697 | static size_t unmap_unpin_fast(struct vfio_domain *domain, | |
698 | struct vfio_dma *dma, dma_addr_t *iova, | |
699 | size_t len, phys_addr_t phys, long *unlocked, | |
700 | struct list_head *unmapped_list, | |
701 | int *unmapped_cnt) | |
702 | { | |
703 | size_t unmapped = 0; | |
704 | struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); | |
705 | ||
706 | if (entry) { | |
707 | unmapped = iommu_unmap_fast(domain->domain, *iova, len); | |
708 | ||
709 | if (!unmapped) { | |
710 | kfree(entry); | |
711 | } else { | |
712 | iommu_tlb_range_add(domain->domain, *iova, unmapped); | |
713 | entry->iova = *iova; | |
714 | entry->phys = phys; | |
715 | entry->len = unmapped; | |
716 | list_add_tail(&entry->list, unmapped_list); | |
717 | ||
718 | *iova += unmapped; | |
719 | (*unmapped_cnt)++; | |
720 | } | |
721 | } | |
722 | ||
723 | /* | |
724 | * Sync if the number of fast-unmap regions hits the limit | |
725 | * or in case of errors. | |
726 | */ | |
727 | if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { | |
728 | *unlocked += vfio_sync_unpin(dma, domain, | |
729 | unmapped_list); | |
730 | *unmapped_cnt = 0; | |
731 | } | |
732 | ||
733 | return unmapped; | |
734 | } | |
735 | ||
736 | static size_t unmap_unpin_slow(struct vfio_domain *domain, | |
737 | struct vfio_dma *dma, dma_addr_t *iova, | |
738 | size_t len, phys_addr_t phys, | |
739 | long *unlocked) | |
740 | { | |
741 | size_t unmapped = iommu_unmap(domain->domain, *iova, len); | |
742 | ||
743 | if (unmapped) { | |
744 | *unlocked += vfio_unpin_pages_remote(dma, *iova, | |
745 | phys >> PAGE_SHIFT, | |
746 | unmapped >> PAGE_SHIFT, | |
747 | false); | |
748 | *iova += unmapped; | |
749 | cond_resched(); | |
750 | } | |
751 | return unmapped; | |
752 | } | |
753 | ||
a54eb550 KW |
754 | static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, |
755 | bool do_accounting) | |
166fd7d9 | 756 | { |
1ef3e2bc AW |
757 | dma_addr_t iova = dma->iova, end = dma->iova + dma->size; |
758 | struct vfio_domain *domain, *d; | |
6bd06f5a SS |
759 | LIST_HEAD(unmapped_region_list); |
760 | int unmapped_region_cnt = 0; | |
166fd7d9 AW |
761 | long unlocked = 0; |
762 | ||
1ef3e2bc | 763 | if (!dma->size) |
a54eb550 KW |
764 | return 0; |
765 | ||
766 | if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) | |
767 | return 0; | |
768 | ||
1ef3e2bc AW |
769 | /* |
770 | * We use the IOMMU to track the physical addresses, otherwise we'd | |
771 | * need a much more complicated tracking system. Unfortunately that | |
772 | * means we need to use one of the iommu domains to figure out the | |
773 | * pfns to unpin. The rest need to be unmapped in advance so we have | |
774 | * no iommu translations remaining when the pages are unpinned. | |
775 | */ | |
776 | domain = d = list_first_entry(&iommu->domain_list, | |
777 | struct vfio_domain, next); | |
778 | ||
c5e66887 | 779 | list_for_each_entry_continue(d, &iommu->domain_list, next) { |
1ef3e2bc | 780 | iommu_unmap(d->domain, dma->iova, dma->size); |
c5e66887 AW |
781 | cond_resched(); |
782 | } | |
1ef3e2bc | 783 | |
166fd7d9 | 784 | while (iova < end) { |
6fe1010d AW |
785 | size_t unmapped, len; |
786 | phys_addr_t phys, next; | |
166fd7d9 | 787 | |
1ef3e2bc | 788 | phys = iommu_iova_to_phys(domain->domain, iova); |
166fd7d9 AW |
789 | if (WARN_ON(!phys)) { |
790 | iova += PAGE_SIZE; | |
791 | continue; | |
73fa0d10 | 792 | } |
166fd7d9 | 793 | |
6fe1010d AW |
794 | /* |
795 | * To optimize for fewer iommu_unmap() calls, each of which | |
796 | * may require hardware cache flushing, try to find the | |
797 | * largest contiguous physical memory chunk to unmap. | |
798 | */ | |
799 | for (len = PAGE_SIZE; | |
800 | !domain->fgsp && iova + len < end; len += PAGE_SIZE) { | |
801 | next = iommu_iova_to_phys(domain->domain, iova + len); | |
802 | if (next != phys + len) | |
803 | break; | |
804 | } | |
805 | ||
6bd06f5a SS |
806 | /* |
807 | * First, try to use fast unmap/unpin. In case of failure, | |
808 | * switch to slow unmap/unpin path. | |
809 | */ | |
810 | unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, | |
811 | &unlocked, &unmapped_region_list, | |
812 | &unmapped_region_cnt); | |
813 | if (!unmapped) { | |
814 | unmapped = unmap_unpin_slow(domain, dma, &iova, len, | |
815 | phys, &unlocked); | |
816 | if (WARN_ON(!unmapped)) | |
817 | break; | |
818 | } | |
73fa0d10 | 819 | } |
166fd7d9 | 820 | |
a54eb550 | 821 | dma->iommu_mapped = false; |
6bd06f5a SS |
822 | |
823 | if (unmapped_region_cnt) | |
824 | unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list); | |
825 | ||
a54eb550 | 826 | if (do_accounting) { |
48d8476b | 827 | vfio_lock_acct(dma, -unlocked, true); |
a54eb550 KW |
828 | return 0; |
829 | } | |
830 | return unlocked; | |
73fa0d10 AW |
831 | } |
832 | ||
1ef3e2bc | 833 | static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) |
73fa0d10 | 834 | { |
a54eb550 | 835 | vfio_unmap_unpin(iommu, dma, true); |
1ef3e2bc | 836 | vfio_unlink_dma(iommu, dma); |
8f0d5bb9 | 837 | put_task_struct(dma->task); |
1ef3e2bc AW |
838 | kfree(dma); |
839 | } | |
73fa0d10 | 840 | |
1ef3e2bc AW |
841 | static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) |
842 | { | |
843 | struct vfio_domain *domain; | |
4644321f | 844 | unsigned long bitmap = ULONG_MAX; |
166fd7d9 | 845 | |
1ef3e2bc AW |
846 | mutex_lock(&iommu->lock); |
847 | list_for_each_entry(domain, &iommu->domain_list, next) | |
d16e0faa | 848 | bitmap &= domain->domain->pgsize_bitmap; |
1ef3e2bc | 849 | mutex_unlock(&iommu->lock); |
73fa0d10 | 850 | |
4644321f EA |
851 | /* |
852 | * In case the IOMMU supports page sizes smaller than PAGE_SIZE | |
853 | * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes. | |
854 | * That way the user will be able to map/unmap buffers whose size/ | |
855 | * start address is aligned with PAGE_SIZE. Pinning code uses that | |
856 | * granularity while iommu driver can use the sub-PAGE_SIZE size | |
857 | * to map the buffer. | |
858 | */ | |
859 | if (bitmap & ~PAGE_MASK) { | |
860 | bitmap &= PAGE_MASK; | |
861 | bitmap |= PAGE_SIZE; | |
862 | } | |
863 | ||
1ef3e2bc | 864 | return bitmap; |
73fa0d10 AW |
865 | } |
866 | ||
867 | static int vfio_dma_do_unmap(struct vfio_iommu *iommu, | |
868 | struct vfio_iommu_type1_dma_unmap *unmap) | |
869 | { | |
73fa0d10 | 870 | uint64_t mask; |
c086de81 | 871 | struct vfio_dma *dma, *dma_last = NULL; |
1ef3e2bc | 872 | size_t unmapped = 0; |
c086de81 | 873 | int ret = 0, retries = 0; |
73fa0d10 | 874 | |
1ef3e2bc | 875 | mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; |
73fa0d10 AW |
876 | |
877 | if (unmap->iova & mask) | |
878 | return -EINVAL; | |
f5bfdbf2 | 879 | if (!unmap->size || unmap->size & mask) |
73fa0d10 | 880 | return -EINVAL; |
58fec830 | 881 | if (unmap->iova + unmap->size - 1 < unmap->iova || |
71a7d3d7 DC |
882 | unmap->size > SIZE_MAX) |
883 | return -EINVAL; | |
73fa0d10 | 884 | |
73fa0d10 | 885 | WARN_ON(mask & PAGE_MASK); |
c086de81 | 886 | again: |
73fa0d10 AW |
887 | mutex_lock(&iommu->lock); |
888 | ||
1ef3e2bc AW |
889 | /* |
890 | * vfio-iommu-type1 (v1) - User mappings were coalesced together to | |
891 | * avoid tracking individual mappings. This means that the granularity | |
892 | * of the original mapping was lost and the user was allowed to attempt | |
893 | * to unmap any range. Depending on the contiguousness of physical | |
894 | * memory and page sizes supported by the IOMMU, arbitrary unmaps may | |
895 | * or may not have worked. We only guaranteed unmap granularity | |
896 | * matching the original mapping; even though it was untracked here, | |
897 | * the original mappings are reflected in IOMMU mappings. This | |
898 | * resulted in a couple unusual behaviors. First, if a range is not | |
899 | * able to be unmapped, ex. a set of 4k pages that was mapped as a | |
900 | * 2M hugepage into the IOMMU, the unmap ioctl returns success but with | |
901 | * a zero sized unmap. Also, if an unmap request overlaps the first | |
902 | * address of a hugepage, the IOMMU will unmap the entire hugepage. | |
903 | * This also returns success and the returned unmap size reflects the | |
904 | * actual size unmapped. | |
905 | * | |
906 | * We attempt to maintain compatibility with this "v1" interface, but | |
907 | * we take control out of the hands of the IOMMU. Therefore, an unmap | |
908 | * request offset from the beginning of the original mapping will | |
909 | * return success with zero sized unmap. And an unmap request covering | |
910 | * the first iova of mapping will unmap the entire range. | |
911 | * | |
912 | * The v2 version of this interface intends to be more deterministic. | |
913 | * Unmap requests must fully cover previous mappings. Multiple | |
914 | * mappings may still be unmaped by specifying large ranges, but there | |
915 | * must not be any previous mappings bisected by the range. An error | |
916 | * will be returned if these conditions are not met. The v2 interface | |
917 | * will only return success and a size of zero if there were no | |
918 | * mappings within the range. | |
919 | */ | |
920 | if (iommu->v2) { | |
7c03f428 | 921 | dma = vfio_find_dma(iommu, unmap->iova, 1); |
1ef3e2bc AW |
922 | if (dma && dma->iova != unmap->iova) { |
923 | ret = -EINVAL; | |
924 | goto unlock; | |
925 | } | |
926 | dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0); | |
927 | if (dma && dma->iova + dma->size != unmap->iova + unmap->size) { | |
928 | ret = -EINVAL; | |
929 | goto unlock; | |
930 | } | |
931 | } | |
932 | ||
166fd7d9 | 933 | while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { |
1ef3e2bc | 934 | if (!iommu->v2 && unmap->iova > dma->iova) |
166fd7d9 | 935 | break; |
8f0d5bb9 KW |
936 | /* |
937 | * Task with same address space who mapped this iova range is | |
938 | * allowed to unmap the iova range. | |
939 | */ | |
940 | if (dma->task->mm != current->mm) | |
941 | break; | |
c086de81 KW |
942 | |
943 | if (!RB_EMPTY_ROOT(&dma->pfn_list)) { | |
944 | struct vfio_iommu_type1_dma_unmap nb_unmap; | |
945 | ||
946 | if (dma_last == dma) { | |
947 | BUG_ON(++retries > 10); | |
948 | } else { | |
949 | dma_last = dma; | |
950 | retries = 0; | |
951 | } | |
952 | ||
953 | nb_unmap.iova = dma->iova; | |
954 | nb_unmap.size = dma->size; | |
955 | ||
956 | /* | |
957 | * Notify anyone (mdev vendor drivers) to invalidate and | |
958 | * unmap iovas within the range we're about to unmap. | |
959 | * Vendor drivers MUST unpin pages in response to an | |
960 | * invalidation. | |
961 | */ | |
962 | mutex_unlock(&iommu->lock); | |
963 | blocking_notifier_call_chain(&iommu->notifier, | |
964 | VFIO_IOMMU_NOTIFY_DMA_UNMAP, | |
965 | &nb_unmap); | |
966 | goto again; | |
967 | } | |
1ef3e2bc AW |
968 | unmapped += dma->size; |
969 | vfio_remove_dma(iommu, dma); | |
166fd7d9 | 970 | } |
cd9b2268 | 971 | |
1ef3e2bc | 972 | unlock: |
73fa0d10 | 973 | mutex_unlock(&iommu->lock); |
166fd7d9 | 974 | |
1ef3e2bc | 975 | /* Report how much was unmapped */ |
166fd7d9 AW |
976 | unmap->size = unmapped; |
977 | ||
978 | return ret; | |
979 | } | |
980 | ||
1ef3e2bc AW |
981 | static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, |
982 | unsigned long pfn, long npage, int prot) | |
983 | { | |
984 | struct vfio_domain *d; | |
985 | int ret; | |
986 | ||
987 | list_for_each_entry(d, &iommu->domain_list, next) { | |
988 | ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, | |
989 | npage << PAGE_SHIFT, prot | d->prot); | |
7a30423a JR |
990 | if (ret) |
991 | goto unwind; | |
c5e66887 AW |
992 | |
993 | cond_resched(); | |
1ef3e2bc AW |
994 | } |
995 | ||
996 | return 0; | |
997 | ||
998 | unwind: | |
999 | list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) | |
1000 | iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); | |
166fd7d9 | 1001 | |
cd9b2268 | 1002 | return ret; |
73fa0d10 AW |
1003 | } |
1004 | ||
8f0d5bb9 KW |
1005 | static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, |
1006 | size_t map_size) | |
1007 | { | |
1008 | dma_addr_t iova = dma->iova; | |
1009 | unsigned long vaddr = dma->vaddr; | |
1010 | size_t size = map_size; | |
1011 | long npage; | |
7cb671e7 | 1012 | unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
8f0d5bb9 KW |
1013 | int ret = 0; |
1014 | ||
1015 | while (size) { | |
1016 | /* Pin a contiguous chunk of memory */ | |
1017 | npage = vfio_pin_pages_remote(dma, vaddr + dma->size, | |
48d8476b | 1018 | size >> PAGE_SHIFT, &pfn, limit); |
8f0d5bb9 KW |
1019 | if (npage <= 0) { |
1020 | WARN_ON(!npage); | |
1021 | ret = (int)npage; | |
1022 | break; | |
1023 | } | |
1024 | ||
1025 | /* Map it! */ | |
1026 | ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, | |
1027 | dma->prot); | |
1028 | if (ret) { | |
a54eb550 KW |
1029 | vfio_unpin_pages_remote(dma, iova + dma->size, pfn, |
1030 | npage, true); | |
8f0d5bb9 KW |
1031 | break; |
1032 | } | |
1033 | ||
1034 | size -= npage << PAGE_SHIFT; | |
1035 | dma->size += npage << PAGE_SHIFT; | |
1036 | } | |
1037 | ||
a54eb550 KW |
1038 | dma->iommu_mapped = true; |
1039 | ||
8f0d5bb9 KW |
1040 | if (ret) |
1041 | vfio_remove_dma(iommu, dma); | |
1042 | ||
1043 | return ret; | |
1044 | } | |
1045 | ||
73fa0d10 AW |
1046 | static int vfio_dma_do_map(struct vfio_iommu *iommu, |
1047 | struct vfio_iommu_type1_dma_map *map) | |
1048 | { | |
c8dbca16 | 1049 | dma_addr_t iova = map->iova; |
166fd7d9 | 1050 | unsigned long vaddr = map->vaddr; |
73fa0d10 AW |
1051 | size_t size = map->size; |
1052 | int ret = 0, prot = 0; | |
1053 | uint64_t mask; | |
1ef3e2bc | 1054 | struct vfio_dma *dma; |
166fd7d9 | 1055 | |
c8dbca16 AW |
1056 | /* Verify that none of our __u64 fields overflow */ |
1057 | if (map->size != size || map->vaddr != vaddr || map->iova != iova) | |
1058 | return -EINVAL; | |
73fa0d10 | 1059 | |
1ef3e2bc | 1060 | mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; |
73fa0d10 | 1061 | |
c8dbca16 AW |
1062 | WARN_ON(mask & PAGE_MASK); |
1063 | ||
73fa0d10 AW |
1064 | /* READ/WRITE from device perspective */ |
1065 | if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) | |
1066 | prot |= IOMMU_WRITE; | |
1067 | if (map->flags & VFIO_DMA_MAP_FLAG_READ) | |
1068 | prot |= IOMMU_READ; | |
1069 | ||
c8dbca16 | 1070 | if (!prot || !size || (size | iova | vaddr) & mask) |
73fa0d10 AW |
1071 | return -EINVAL; |
1072 | ||
c8dbca16 AW |
1073 | /* Don't allow IOVA or virtual address wrap */ |
1074 | if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) | |
73fa0d10 AW |
1075 | return -EINVAL; |
1076 | ||
1077 | mutex_lock(&iommu->lock); | |
1078 | ||
c8dbca16 | 1079 | if (vfio_find_dma(iommu, iova, size)) { |
8f0d5bb9 KW |
1080 | ret = -EEXIST; |
1081 | goto out_unlock; | |
73fa0d10 AW |
1082 | } |
1083 | ||
1ef3e2bc AW |
1084 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); |
1085 | if (!dma) { | |
8f0d5bb9 KW |
1086 | ret = -ENOMEM; |
1087 | goto out_unlock; | |
1ef3e2bc AW |
1088 | } |
1089 | ||
c8dbca16 AW |
1090 | dma->iova = iova; |
1091 | dma->vaddr = vaddr; | |
1ef3e2bc | 1092 | dma->prot = prot; |
48d8476b AW |
1093 | |
1094 | /* | |
1095 | * We need to be able to both add to a task's locked memory and test | |
1096 | * against the locked memory limit and we need to be able to do both | |
1097 | * outside of this call path as pinning can be asynchronous via the | |
1098 | * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a | |
1099 | * task_struct and VM locked pages requires an mm_struct, however | |
1100 | * holding an indefinite mm reference is not recommended, therefore we | |
1101 | * only hold a reference to a task. We could hold a reference to | |
1102 | * current, however QEMU uses this call path through vCPU threads, | |
1103 | * which can be killed resulting in a NULL mm and failure in the unmap | |
1104 | * path when called via a different thread. Avoid this problem by | |
1105 | * using the group_leader as threads within the same group require | |
1106 | * both CLONE_THREAD and CLONE_VM and will therefore use the same | |
1107 | * mm_struct. | |
1108 | * | |
1109 | * Previously we also used the task for testing CAP_IPC_LOCK at the | |
1110 | * time of pinning and accounting, however has_capability() makes use | |
1111 | * of real_cred, a copy-on-write field, so we can't guarantee that it | |
1112 | * matches group_leader, or in fact that it might not change by the | |
1113 | * time it's evaluated. If a process were to call MAP_DMA with | |
1114 | * CAP_IPC_LOCK but later drop it, it doesn't make sense that they | |
1115 | * possibly see different results for an iommu_mapped vfio_dma vs | |
1116 | * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the | |
1117 | * time of calling MAP_DMA. | |
1118 | */ | |
1119 | get_task_struct(current->group_leader); | |
1120 | dma->task = current->group_leader; | |
1121 | dma->lock_cap = capable(CAP_IPC_LOCK); | |
1122 | ||
a54eb550 | 1123 | dma->pfn_list = RB_ROOT; |
166fd7d9 | 1124 | |
1ef3e2bc AW |
1125 | /* Insert zero-sized and grow as we map chunks of it */ |
1126 | vfio_link_dma(iommu, dma); | |
166fd7d9 | 1127 | |
a54eb550 KW |
1128 | /* Don't pin and map if container doesn't contain IOMMU capable domain*/ |
1129 | if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) | |
1130 | dma->size = size; | |
1131 | else | |
1132 | ret = vfio_pin_map_dma(iommu, dma, size); | |
1133 | ||
8f0d5bb9 | 1134 | out_unlock: |
1ef3e2bc AW |
1135 | mutex_unlock(&iommu->lock); |
1136 | return ret; | |
1137 | } | |
1138 | ||
1139 | static int vfio_bus_type(struct device *dev, void *data) | |
1140 | { | |
1141 | struct bus_type **bus = data; | |
1142 | ||
1143 | if (*bus && *bus != dev->bus) | |
1144 | return -EINVAL; | |
1145 | ||
1146 | *bus = dev->bus; | |
1147 | ||
1148 | return 0; | |
1149 | } | |
1150 | ||
1151 | static int vfio_iommu_replay(struct vfio_iommu *iommu, | |
1152 | struct vfio_domain *domain) | |
1153 | { | |
1154 | struct vfio_domain *d; | |
1155 | struct rb_node *n; | |
7cb671e7 | 1156 | unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
1ef3e2bc AW |
1157 | int ret; |
1158 | ||
1159 | /* Arbitrarily pick the first domain in the list for lookups */ | |
1160 | d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); | |
1161 | n = rb_first(&iommu->dma_list); | |
1162 | ||
1ef3e2bc AW |
1163 | for (; n; n = rb_next(n)) { |
1164 | struct vfio_dma *dma; | |
1165 | dma_addr_t iova; | |
1166 | ||
1167 | dma = rb_entry(n, struct vfio_dma, node); | |
1168 | iova = dma->iova; | |
1169 | ||
1170 | while (iova < dma->iova + dma->size) { | |
a54eb550 | 1171 | phys_addr_t phys; |
1ef3e2bc | 1172 | size_t size; |
73fa0d10 | 1173 | |
a54eb550 KW |
1174 | if (dma->iommu_mapped) { |
1175 | phys_addr_t p; | |
1176 | dma_addr_t i; | |
1177 | ||
1178 | phys = iommu_iova_to_phys(d->domain, iova); | |
1179 | ||
1180 | if (WARN_ON(!phys)) { | |
1181 | iova += PAGE_SIZE; | |
1182 | continue; | |
1183 | } | |
1184 | ||
1185 | size = PAGE_SIZE; | |
1186 | p = phys + size; | |
1187 | i = iova + size; | |
1188 | while (i < dma->iova + dma->size && | |
1189 | p == iommu_iova_to_phys(d->domain, i)) { | |
1190 | size += PAGE_SIZE; | |
1191 | p += PAGE_SIZE; | |
1192 | i += PAGE_SIZE; | |
1193 | } | |
1194 | } else { | |
1195 | unsigned long pfn; | |
1196 | unsigned long vaddr = dma->vaddr + | |
1197 | (iova - dma->iova); | |
1198 | size_t n = dma->iova + dma->size - iova; | |
1199 | long npage; | |
1200 | ||
1201 | npage = vfio_pin_pages_remote(dma, vaddr, | |
1202 | n >> PAGE_SHIFT, | |
48d8476b | 1203 | &pfn, limit); |
a54eb550 KW |
1204 | if (npage <= 0) { |
1205 | WARN_ON(!npage); | |
1206 | ret = (int)npage; | |
1207 | return ret; | |
1208 | } | |
1209 | ||
1210 | phys = pfn << PAGE_SHIFT; | |
1211 | size = npage << PAGE_SHIFT; | |
166fd7d9 AW |
1212 | } |
1213 | ||
1ef3e2bc AW |
1214 | ret = iommu_map(domain->domain, iova, phys, |
1215 | size, dma->prot | domain->prot); | |
1216 | if (ret) | |
1217 | return ret; | |
d93b3ac0 | 1218 | |
1ef3e2bc AW |
1219 | iova += size; |
1220 | } | |
a54eb550 | 1221 | dma->iommu_mapped = true; |
166fd7d9 | 1222 | } |
1ef3e2bc | 1223 | return 0; |
73fa0d10 AW |
1224 | } |
1225 | ||
6fe1010d AW |
1226 | /* |
1227 | * We change our unmap behavior slightly depending on whether the IOMMU | |
1228 | * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage | |
1229 | * for practically any contiguous power-of-two mapping we give it. This means | |
1230 | * we don't need to look for contiguous chunks ourselves to make unmapping | |
1231 | * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d | |
1232 | * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks | |
1233 | * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when | |
1234 | * hugetlbfs is in use. | |
1235 | */ | |
1236 | static void vfio_test_domain_fgsp(struct vfio_domain *domain) | |
1237 | { | |
1238 | struct page *pages; | |
1239 | int ret, order = get_order(PAGE_SIZE * 2); | |
1240 | ||
1241 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); | |
1242 | if (!pages) | |
1243 | return; | |
1244 | ||
1245 | ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, | |
1246 | IOMMU_READ | IOMMU_WRITE | domain->prot); | |
1247 | if (!ret) { | |
1248 | size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); | |
1249 | ||
1250 | if (unmapped == PAGE_SIZE) | |
1251 | iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE); | |
1252 | else | |
1253 | domain->fgsp = true; | |
1254 | } | |
1255 | ||
1256 | __free_pages(pages, order); | |
1257 | } | |
1258 | ||
7896c998 KW |
1259 | static struct vfio_group *find_iommu_group(struct vfio_domain *domain, |
1260 | struct iommu_group *iommu_group) | |
1261 | { | |
1262 | struct vfio_group *g; | |
1263 | ||
1264 | list_for_each_entry(g, &domain->group_list, next) { | |
1265 | if (g->iommu_group == iommu_group) | |
1266 | return g; | |
1267 | } | |
1268 | ||
1269 | return NULL; | |
1270 | } | |
1271 | ||
9d3a4de4 | 1272 | static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) |
5d704992 EA |
1273 | { |
1274 | struct list_head group_resv_regions; | |
1275 | struct iommu_resv_region *region, *next; | |
1276 | bool ret = false; | |
1277 | ||
1278 | INIT_LIST_HEAD(&group_resv_regions); | |
1279 | iommu_get_group_resv_regions(group, &group_resv_regions); | |
1280 | list_for_each_entry(region, &group_resv_regions, list) { | |
f203f7f1 RM |
1281 | /* |
1282 | * The presence of any 'real' MSI regions should take | |
1283 | * precedence over the software-managed one if the | |
1284 | * IOMMU driver happens to advertise both types. | |
1285 | */ | |
1286 | if (region->type == IOMMU_RESV_MSI) { | |
1287 | ret = false; | |
1288 | break; | |
1289 | } | |
1290 | ||
9d3a4de4 | 1291 | if (region->type == IOMMU_RESV_SW_MSI) { |
5d704992 EA |
1292 | *base = region->start; |
1293 | ret = true; | |
5d704992 EA |
1294 | } |
1295 | } | |
5d704992 EA |
1296 | list_for_each_entry_safe(region, next, &group_resv_regions, list) |
1297 | kfree(region); | |
1298 | return ret; | |
1299 | } | |
1300 | ||
73fa0d10 AW |
1301 | static int vfio_iommu_type1_attach_group(void *iommu_data, |
1302 | struct iommu_group *iommu_group) | |
1303 | { | |
1304 | struct vfio_iommu *iommu = iommu_data; | |
7896c998 | 1305 | struct vfio_group *group; |
1ef3e2bc | 1306 | struct vfio_domain *domain, *d; |
a54eb550 | 1307 | struct bus_type *bus = NULL, *mdev_bus; |
73fa0d10 | 1308 | int ret; |
9d72f87b | 1309 | bool resv_msi, msi_remap; |
5d704992 | 1310 | phys_addr_t resv_msi_base; |
73fa0d10 | 1311 | |
73fa0d10 AW |
1312 | mutex_lock(&iommu->lock); |
1313 | ||
1ef3e2bc | 1314 | list_for_each_entry(d, &iommu->domain_list, next) { |
7896c998 | 1315 | if (find_iommu_group(d, iommu_group)) { |
73fa0d10 | 1316 | mutex_unlock(&iommu->lock); |
73fa0d10 AW |
1317 | return -EINVAL; |
1318 | } | |
1319 | } | |
1320 | ||
a54eb550 KW |
1321 | if (iommu->external_domain) { |
1322 | if (find_iommu_group(iommu->external_domain, iommu_group)) { | |
1323 | mutex_unlock(&iommu->lock); | |
1324 | return -EINVAL; | |
1325 | } | |
1326 | } | |
1327 | ||
1ef3e2bc AW |
1328 | group = kzalloc(sizeof(*group), GFP_KERNEL); |
1329 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | |
1330 | if (!group || !domain) { | |
1331 | ret = -ENOMEM; | |
1332 | goto out_free; | |
1333 | } | |
1334 | ||
1335 | group->iommu_group = iommu_group; | |
1336 | ||
1337 | /* Determine bus_type in order to allocate a domain */ | |
1338 | ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type); | |
1339 | if (ret) | |
1340 | goto out_free; | |
1341 | ||
a54eb550 KW |
1342 | mdev_bus = symbol_get(mdev_bus_type); |
1343 | ||
1344 | if (mdev_bus) { | |
1345 | if ((bus == mdev_bus) && !iommu_present(bus)) { | |
1346 | symbol_put(mdev_bus_type); | |
1347 | if (!iommu->external_domain) { | |
1348 | INIT_LIST_HEAD(&domain->group_list); | |
1349 | iommu->external_domain = domain; | |
1350 | } else | |
1351 | kfree(domain); | |
1352 | ||
1353 | list_add(&group->next, | |
1354 | &iommu->external_domain->group_list); | |
1355 | mutex_unlock(&iommu->lock); | |
1356 | return 0; | |
1357 | } | |
1358 | symbol_put(mdev_bus_type); | |
1359 | } | |
1360 | ||
1ef3e2bc AW |
1361 | domain->domain = iommu_domain_alloc(bus); |
1362 | if (!domain->domain) { | |
1363 | ret = -EIO; | |
1364 | goto out_free; | |
1365 | } | |
1366 | ||
f5c9eceb WD |
1367 | if (iommu->nesting) { |
1368 | int attr = 1; | |
1369 | ||
1370 | ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING, | |
1371 | &attr); | |
1372 | if (ret) | |
1373 | goto out_domain; | |
1374 | } | |
1375 | ||
1ef3e2bc AW |
1376 | ret = iommu_attach_group(domain->domain, iommu_group); |
1377 | if (ret) | |
1378 | goto out_domain; | |
1379 | ||
9d3a4de4 | 1380 | resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base); |
5d704992 | 1381 | |
1ef3e2bc AW |
1382 | INIT_LIST_HEAD(&domain->group_list); |
1383 | list_add(&group->next, &domain->group_list); | |
1384 | ||
db406cc0 RM |
1385 | msi_remap = irq_domain_check_msi_remap() || |
1386 | iommu_capable(bus, IOMMU_CAP_INTR_REMAP); | |
9d72f87b EA |
1387 | |
1388 | if (!allow_unsafe_interrupts && !msi_remap) { | |
1ef3e2bc AW |
1389 | pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", |
1390 | __func__); | |
1391 | ret = -EPERM; | |
1392 | goto out_detach; | |
1393 | } | |
1394 | ||
eb165f05 | 1395 | if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) |
1ef3e2bc AW |
1396 | domain->prot |= IOMMU_CACHE; |
1397 | ||
73fa0d10 | 1398 | /* |
1ef3e2bc AW |
1399 | * Try to match an existing compatible domain. We don't want to |
1400 | * preclude an IOMMU driver supporting multiple bus_types and being | |
1401 | * able to include different bus_types in the same IOMMU domain, so | |
1402 | * we test whether the domains use the same iommu_ops rather than | |
1403 | * testing if they're on the same bus_type. | |
73fa0d10 | 1404 | */ |
1ef3e2bc AW |
1405 | list_for_each_entry(d, &iommu->domain_list, next) { |
1406 | if (d->domain->ops == domain->domain->ops && | |
1407 | d->prot == domain->prot) { | |
1408 | iommu_detach_group(domain->domain, iommu_group); | |
1409 | if (!iommu_attach_group(d->domain, iommu_group)) { | |
1410 | list_add(&group->next, &d->group_list); | |
1411 | iommu_domain_free(domain->domain); | |
1412 | kfree(domain); | |
1413 | mutex_unlock(&iommu->lock); | |
1414 | return 0; | |
1415 | } | |
1416 | ||
1417 | ret = iommu_attach_group(domain->domain, iommu_group); | |
1418 | if (ret) | |
1419 | goto out_domain; | |
1420 | } | |
73fa0d10 AW |
1421 | } |
1422 | ||
6fe1010d AW |
1423 | vfio_test_domain_fgsp(domain); |
1424 | ||
1ef3e2bc AW |
1425 | /* replay mappings on new domains */ |
1426 | ret = vfio_iommu_replay(iommu, domain); | |
1427 | if (ret) | |
1428 | goto out_detach; | |
1429 | ||
2c9f1af5 WY |
1430 | if (resv_msi) { |
1431 | ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); | |
1432 | if (ret) | |
1433 | goto out_detach; | |
1434 | } | |
5d704992 | 1435 | |
1ef3e2bc | 1436 | list_add(&domain->next, &iommu->domain_list); |
73fa0d10 AW |
1437 | |
1438 | mutex_unlock(&iommu->lock); | |
1439 | ||
1440 | return 0; | |
1ef3e2bc AW |
1441 | |
1442 | out_detach: | |
1443 | iommu_detach_group(domain->domain, iommu_group); | |
1444 | out_domain: | |
1445 | iommu_domain_free(domain->domain); | |
1446 | out_free: | |
1447 | kfree(domain); | |
1448 | kfree(group); | |
1449 | mutex_unlock(&iommu->lock); | |
1450 | return ret; | |
1451 | } | |
1452 | ||
1453 | static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu) | |
1454 | { | |
1455 | struct rb_node *node; | |
1456 | ||
1457 | while ((node = rb_first(&iommu->dma_list))) | |
1458 | vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); | |
73fa0d10 AW |
1459 | } |
1460 | ||
a54eb550 KW |
1461 | static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) |
1462 | { | |
1463 | struct rb_node *n, *p; | |
1464 | ||
1465 | n = rb_first(&iommu->dma_list); | |
1466 | for (; n; n = rb_next(n)) { | |
1467 | struct vfio_dma *dma; | |
1468 | long locked = 0, unlocked = 0; | |
1469 | ||
1470 | dma = rb_entry(n, struct vfio_dma, node); | |
1471 | unlocked += vfio_unmap_unpin(iommu, dma, false); | |
1472 | p = rb_first(&dma->pfn_list); | |
1473 | for (; p; p = rb_next(p)) { | |
1474 | struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, | |
1475 | node); | |
1476 | ||
1477 | if (!is_invalid_reserved_pfn(vpfn->pfn)) | |
1478 | locked++; | |
1479 | } | |
48d8476b | 1480 | vfio_lock_acct(dma, locked - unlocked, true); |
a54eb550 KW |
1481 | } |
1482 | } | |
1483 | ||
1484 | static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu) | |
1485 | { | |
1486 | struct rb_node *n; | |
1487 | ||
1488 | n = rb_first(&iommu->dma_list); | |
1489 | for (; n; n = rb_next(n)) { | |
1490 | struct vfio_dma *dma; | |
1491 | ||
1492 | dma = rb_entry(n, struct vfio_dma, node); | |
1493 | ||
1494 | if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list))) | |
1495 | break; | |
1496 | } | |
3cedd7d7 KW |
1497 | /* mdev vendor driver must unregister notifier */ |
1498 | WARN_ON(iommu->notifier.head); | |
a54eb550 KW |
1499 | } |
1500 | ||
73fa0d10 AW |
1501 | static void vfio_iommu_type1_detach_group(void *iommu_data, |
1502 | struct iommu_group *iommu_group) | |
1503 | { | |
1504 | struct vfio_iommu *iommu = iommu_data; | |
1ef3e2bc | 1505 | struct vfio_domain *domain; |
73fa0d10 AW |
1506 | struct vfio_group *group; |
1507 | ||
1508 | mutex_lock(&iommu->lock); | |
1509 | ||
a54eb550 KW |
1510 | if (iommu->external_domain) { |
1511 | group = find_iommu_group(iommu->external_domain, iommu_group); | |
1512 | if (group) { | |
1513 | list_del(&group->next); | |
1514 | kfree(group); | |
1515 | ||
1516 | if (list_empty(&iommu->external_domain->group_list)) { | |
1517 | vfio_sanity_check_pfn_list(iommu); | |
1518 | ||
1519 | if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) | |
1520 | vfio_iommu_unmap_unpin_all(iommu); | |
1521 | ||
1522 | kfree(iommu->external_domain); | |
1523 | iommu->external_domain = NULL; | |
1524 | } | |
1525 | goto detach_group_done; | |
1526 | } | |
1527 | } | |
1528 | ||
1ef3e2bc | 1529 | list_for_each_entry(domain, &iommu->domain_list, next) { |
7896c998 KW |
1530 | group = find_iommu_group(domain, iommu_group); |
1531 | if (!group) | |
1532 | continue; | |
1ef3e2bc | 1533 | |
7896c998 KW |
1534 | iommu_detach_group(domain->domain, iommu_group); |
1535 | list_del(&group->next); | |
1536 | kfree(group); | |
1537 | /* | |
a54eb550 KW |
1538 | * Group ownership provides privilege, if the group list is |
1539 | * empty, the domain goes away. If it's the last domain with | |
1540 | * iommu and external domain doesn't exist, then all the | |
1541 | * mappings go away too. If it's the last domain with iommu and | |
1542 | * external domain exist, update accounting | |
7896c998 KW |
1543 | */ |
1544 | if (list_empty(&domain->group_list)) { | |
a54eb550 KW |
1545 | if (list_is_singular(&iommu->domain_list)) { |
1546 | if (!iommu->external_domain) | |
1547 | vfio_iommu_unmap_unpin_all(iommu); | |
1548 | else | |
1549 | vfio_iommu_unmap_unpin_reaccount(iommu); | |
1550 | } | |
7896c998 KW |
1551 | iommu_domain_free(domain->domain); |
1552 | list_del(&domain->next); | |
1553 | kfree(domain); | |
73fa0d10 | 1554 | } |
a54eb550 | 1555 | break; |
73fa0d10 AW |
1556 | } |
1557 | ||
a54eb550 | 1558 | detach_group_done: |
73fa0d10 AW |
1559 | mutex_unlock(&iommu->lock); |
1560 | } | |
1561 | ||
1562 | static void *vfio_iommu_type1_open(unsigned long arg) | |
1563 | { | |
1564 | struct vfio_iommu *iommu; | |
1565 | ||
73fa0d10 AW |
1566 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
1567 | if (!iommu) | |
1568 | return ERR_PTR(-ENOMEM); | |
1569 | ||
f5c9eceb WD |
1570 | switch (arg) { |
1571 | case VFIO_TYPE1_IOMMU: | |
1572 | break; | |
1573 | case VFIO_TYPE1_NESTING_IOMMU: | |
1574 | iommu->nesting = true; | |
544c05a6 | 1575 | /* fall through */ |
f5c9eceb WD |
1576 | case VFIO_TYPE1v2_IOMMU: |
1577 | iommu->v2 = true; | |
1578 | break; | |
1579 | default: | |
1580 | kfree(iommu); | |
1581 | return ERR_PTR(-EINVAL); | |
1582 | } | |
1583 | ||
1ef3e2bc | 1584 | INIT_LIST_HEAD(&iommu->domain_list); |
cd9b2268 | 1585 | iommu->dma_list = RB_ROOT; |
73fa0d10 | 1586 | mutex_init(&iommu->lock); |
c086de81 | 1587 | BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); |
73fa0d10 AW |
1588 | |
1589 | return iommu; | |
1590 | } | |
1591 | ||
a54eb550 KW |
1592 | static void vfio_release_domain(struct vfio_domain *domain, bool external) |
1593 | { | |
1594 | struct vfio_group *group, *group_tmp; | |
1595 | ||
1596 | list_for_each_entry_safe(group, group_tmp, | |
1597 | &domain->group_list, next) { | |
1598 | if (!external) | |
1599 | iommu_detach_group(domain->domain, group->iommu_group); | |
1600 | list_del(&group->next); | |
1601 | kfree(group); | |
1602 | } | |
1603 | ||
1604 | if (!external) | |
1605 | iommu_domain_free(domain->domain); | |
1606 | } | |
1607 | ||
73fa0d10 AW |
1608 | static void vfio_iommu_type1_release(void *iommu_data) |
1609 | { | |
1610 | struct vfio_iommu *iommu = iommu_data; | |
1ef3e2bc | 1611 | struct vfio_domain *domain, *domain_tmp; |
a54eb550 KW |
1612 | |
1613 | if (iommu->external_domain) { | |
1614 | vfio_release_domain(iommu->external_domain, true); | |
1615 | vfio_sanity_check_pfn_list(iommu); | |
1616 | kfree(iommu->external_domain); | |
1617 | } | |
73fa0d10 | 1618 | |
1ef3e2bc | 1619 | vfio_iommu_unmap_unpin_all(iommu); |
73fa0d10 | 1620 | |
1ef3e2bc AW |
1621 | list_for_each_entry_safe(domain, domain_tmp, |
1622 | &iommu->domain_list, next) { | |
a54eb550 | 1623 | vfio_release_domain(domain, false); |
1ef3e2bc AW |
1624 | list_del(&domain->next); |
1625 | kfree(domain); | |
73fa0d10 | 1626 | } |
73fa0d10 AW |
1627 | kfree(iommu); |
1628 | } | |
1629 | ||
aa429318 AW |
1630 | static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) |
1631 | { | |
1632 | struct vfio_domain *domain; | |
1633 | int ret = 1; | |
1634 | ||
1635 | mutex_lock(&iommu->lock); | |
1636 | list_for_each_entry(domain, &iommu->domain_list, next) { | |
1637 | if (!(domain->prot & IOMMU_CACHE)) { | |
1638 | ret = 0; | |
f5bfdbf2 | 1639 | break; |
aa429318 | 1640 | } |
73fa0d10 | 1641 | } |
aa429318 | 1642 | mutex_unlock(&iommu->lock); |
73fa0d10 | 1643 | |
aa429318 | 1644 | return ret; |
73fa0d10 AW |
1645 | } |
1646 | ||
1647 | static long vfio_iommu_type1_ioctl(void *iommu_data, | |
1648 | unsigned int cmd, unsigned long arg) | |
1649 | { | |
1650 | struct vfio_iommu *iommu = iommu_data; | |
1651 | unsigned long minsz; | |
1652 | ||
1653 | if (cmd == VFIO_CHECK_EXTENSION) { | |
1654 | switch (arg) { | |
1655 | case VFIO_TYPE1_IOMMU: | |
1ef3e2bc | 1656 | case VFIO_TYPE1v2_IOMMU: |
f5c9eceb | 1657 | case VFIO_TYPE1_NESTING_IOMMU: |
73fa0d10 | 1658 | return 1; |
aa429318 AW |
1659 | case VFIO_DMA_CC_IOMMU: |
1660 | if (!iommu) | |
1661 | return 0; | |
1662 | return vfio_domains_have_iommu_cache(iommu); | |
73fa0d10 AW |
1663 | default: |
1664 | return 0; | |
1665 | } | |
1666 | } else if (cmd == VFIO_IOMMU_GET_INFO) { | |
1667 | struct vfio_iommu_type1_info info; | |
1668 | ||
1669 | minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); | |
1670 | ||
1671 | if (copy_from_user(&info, (void __user *)arg, minsz)) | |
1672 | return -EFAULT; | |
1673 | ||
1674 | if (info.argsz < minsz) | |
1675 | return -EINVAL; | |
1676 | ||
d4f50ee2 | 1677 | info.flags = VFIO_IOMMU_INFO_PGSIZES; |
73fa0d10 | 1678 | |
1ef3e2bc | 1679 | info.iova_pgsizes = vfio_pgsize_bitmap(iommu); |
73fa0d10 | 1680 | |
8160c4e4 MT |
1681 | return copy_to_user((void __user *)arg, &info, minsz) ? |
1682 | -EFAULT : 0; | |
73fa0d10 AW |
1683 | |
1684 | } else if (cmd == VFIO_IOMMU_MAP_DMA) { | |
1685 | struct vfio_iommu_type1_dma_map map; | |
1686 | uint32_t mask = VFIO_DMA_MAP_FLAG_READ | | |
1687 | VFIO_DMA_MAP_FLAG_WRITE; | |
1688 | ||
1689 | minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); | |
1690 | ||
1691 | if (copy_from_user(&map, (void __user *)arg, minsz)) | |
1692 | return -EFAULT; | |
1693 | ||
1694 | if (map.argsz < minsz || map.flags & ~mask) | |
1695 | return -EINVAL; | |
1696 | ||
1697 | return vfio_dma_do_map(iommu, &map); | |
1698 | ||
1699 | } else if (cmd == VFIO_IOMMU_UNMAP_DMA) { | |
1700 | struct vfio_iommu_type1_dma_unmap unmap; | |
166fd7d9 | 1701 | long ret; |
73fa0d10 AW |
1702 | |
1703 | minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); | |
1704 | ||
1705 | if (copy_from_user(&unmap, (void __user *)arg, minsz)) | |
1706 | return -EFAULT; | |
1707 | ||
1708 | if (unmap.argsz < minsz || unmap.flags) | |
1709 | return -EINVAL; | |
1710 | ||
166fd7d9 AW |
1711 | ret = vfio_dma_do_unmap(iommu, &unmap); |
1712 | if (ret) | |
1713 | return ret; | |
1714 | ||
8160c4e4 MT |
1715 | return copy_to_user((void __user *)arg, &unmap, minsz) ? |
1716 | -EFAULT : 0; | |
73fa0d10 AW |
1717 | } |
1718 | ||
1719 | return -ENOTTY; | |
1720 | } | |
1721 | ||
c086de81 | 1722 | static int vfio_iommu_type1_register_notifier(void *iommu_data, |
22195cbd | 1723 | unsigned long *events, |
c086de81 KW |
1724 | struct notifier_block *nb) |
1725 | { | |
1726 | struct vfio_iommu *iommu = iommu_data; | |
1727 | ||
22195cbd JS |
1728 | /* clear known events */ |
1729 | *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP; | |
1730 | ||
1731 | /* refuse to register if still events remaining */ | |
1732 | if (*events) | |
1733 | return -EINVAL; | |
1734 | ||
c086de81 KW |
1735 | return blocking_notifier_chain_register(&iommu->notifier, nb); |
1736 | } | |
1737 | ||
1738 | static int vfio_iommu_type1_unregister_notifier(void *iommu_data, | |
1739 | struct notifier_block *nb) | |
1740 | { | |
1741 | struct vfio_iommu *iommu = iommu_data; | |
1742 | ||
1743 | return blocking_notifier_chain_unregister(&iommu->notifier, nb); | |
1744 | } | |
1745 | ||
73fa0d10 | 1746 | static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { |
c086de81 KW |
1747 | .name = "vfio-iommu-type1", |
1748 | .owner = THIS_MODULE, | |
1749 | .open = vfio_iommu_type1_open, | |
1750 | .release = vfio_iommu_type1_release, | |
1751 | .ioctl = vfio_iommu_type1_ioctl, | |
1752 | .attach_group = vfio_iommu_type1_attach_group, | |
1753 | .detach_group = vfio_iommu_type1_detach_group, | |
1754 | .pin_pages = vfio_iommu_type1_pin_pages, | |
1755 | .unpin_pages = vfio_iommu_type1_unpin_pages, | |
1756 | .register_notifier = vfio_iommu_type1_register_notifier, | |
1757 | .unregister_notifier = vfio_iommu_type1_unregister_notifier, | |
73fa0d10 AW |
1758 | }; |
1759 | ||
1760 | static int __init vfio_iommu_type1_init(void) | |
1761 | { | |
73fa0d10 AW |
1762 | return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1); |
1763 | } | |
1764 | ||
1765 | static void __exit vfio_iommu_type1_cleanup(void) | |
1766 | { | |
1767 | vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1); | |
1768 | } | |
1769 | ||
1770 | module_init(vfio_iommu_type1_init); | |
1771 | module_exit(vfio_iommu_type1_cleanup); | |
1772 | ||
1773 | MODULE_VERSION(DRIVER_VERSION); | |
1774 | MODULE_LICENSE("GPL v2"); | |
1775 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
1776 | MODULE_DESCRIPTION(DRIVER_DESC); |