]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
73fa0d10 AW |
2 | /* |
3 | * VFIO: IOMMU DMA mapping support for Type1 IOMMU | |
4 | * | |
5 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
6 | * Author: Alex Williamson <alex.williamson@redhat.com> | |
7 | * | |
73fa0d10 AW |
8 | * Derived from original vfio: |
9 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. | |
10 | * Author: Tom Lyon, pugs@cisco.com | |
11 | * | |
12 | * We arbitrarily define a Type1 IOMMU as one matching the below code. | |
13 | * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel | |
14 | * VT-d, but that makes it harder to re-use as theoretically anyone | |
15 | * implementing a similar IOMMU could make use of this. We expect the | |
16 | * IOMMU to support the IOMMU API and have few to no restrictions around | |
17 | * the IOVA range that can be mapped. The Type1 IOMMU is currently | |
18 | * optimized for relatively static mappings of a userspace process with | |
06d738c8 | 19 | * userspace pages pinned into memory. We also assume devices and IOMMU |
73fa0d10 AW |
20 | * domains are PCI based as the IOMMU API is still centered around a |
21 | * device/bus interface rather than a group interface. | |
22 | */ | |
23 | ||
24 | #include <linux/compat.h> | |
25 | #include <linux/device.h> | |
26 | #include <linux/fs.h> | |
07956b62 | 27 | #include <linux/highmem.h> |
73fa0d10 AW |
28 | #include <linux/iommu.h> |
29 | #include <linux/module.h> | |
30 | #include <linux/mm.h> | |
4dbe59a6 | 31 | #include <linux/kthread.h> |
cd9b2268 | 32 | #include <linux/rbtree.h> |
3f07c014 | 33 | #include <linux/sched/signal.h> |
6e84f315 | 34 | #include <linux/sched/mm.h> |
73fa0d10 AW |
35 | #include <linux/slab.h> |
36 | #include <linux/uaccess.h> | |
37 | #include <linux/vfio.h> | |
38 | #include <linux/workqueue.h> | |
a54eb550 | 39 | #include <linux/mdev.h> |
c086de81 | 40 | #include <linux/notifier.h> |
5d704992 | 41 | #include <linux/dma-iommu.h> |
9d72f87b | 42 | #include <linux/irqdomain.h> |
73fa0d10 AW |
43 | |
44 | #define DRIVER_VERSION "0.2" | |
45 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" | |
46 | #define DRIVER_DESC "Type1 IOMMU driver for VFIO" | |
47 | ||
48 | static bool allow_unsafe_interrupts; | |
49 | module_param_named(allow_unsafe_interrupts, | |
50 | allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR); | |
51 | MODULE_PARM_DESC(allow_unsafe_interrupts, | |
52 | "Enable VFIO IOMMU support for on platforms without interrupt remapping support."); | |
53 | ||
5c6c2b21 AW |
54 | static bool disable_hugepages; |
55 | module_param_named(disable_hugepages, | |
56 | disable_hugepages, bool, S_IRUGO | S_IWUSR); | |
57 | MODULE_PARM_DESC(disable_hugepages, | |
58 | "Disable VFIO IOMMU support for IOMMU hugepages."); | |
59 | ||
49285593 AW |
60 | static unsigned int dma_entry_limit __read_mostly = U16_MAX; |
61 | module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); | |
62 | MODULE_PARM_DESC(dma_entry_limit, | |
63 | "Maximum number of user DMA mappings per container (65535)."); | |
64 | ||
73fa0d10 | 65 | struct vfio_iommu { |
1ef3e2bc | 66 | struct list_head domain_list; |
1108696a | 67 | struct list_head iova_list; |
a54eb550 | 68 | struct vfio_domain *external_domain; /* domain for external user */ |
73fa0d10 | 69 | struct mutex lock; |
cd9b2268 | 70 | struct rb_root dma_list; |
c086de81 | 71 | struct blocking_notifier_head notifier; |
49285593 | 72 | unsigned int dma_avail; |
c3cbab24 | 73 | unsigned int vaddr_invalid_count; |
cade075f | 74 | uint64_t pgsize_bitmap; |
01032156 | 75 | uint64_t num_non_pinned_groups; |
898b9eae | 76 | wait_queue_head_t vaddr_wait; |
f5c9eceb WD |
77 | bool v2; |
78 | bool nesting; | |
d6a4c185 | 79 | bool dirty_page_tracking; |
487ace13 | 80 | bool container_open; |
1ef3e2bc AW |
81 | }; |
82 | ||
83 | struct vfio_domain { | |
84 | struct iommu_domain *domain; | |
85 | struct list_head next; | |
73fa0d10 | 86 | struct list_head group_list; |
1ef3e2bc | 87 | int prot; /* IOMMU_CACHE */ |
6fe1010d | 88 | bool fgsp; /* Fine-grained super pages */ |
73fa0d10 AW |
89 | }; |
90 | ||
91 | struct vfio_dma { | |
cd9b2268 | 92 | struct rb_node node; |
73fa0d10 AW |
93 | dma_addr_t iova; /* Device address */ |
94 | unsigned long vaddr; /* Process virtual addr */ | |
166fd7d9 | 95 | size_t size; /* Map size (bytes) */ |
73fa0d10 | 96 | int prot; /* IOMMU_READ/WRITE */ |
a54eb550 | 97 | bool iommu_mapped; |
48d8476b | 98 | bool lock_cap; /* capable(CAP_IPC_LOCK) */ |
c3cbab24 | 99 | bool vaddr_invalid; |
8f0d5bb9 | 100 | struct task_struct *task; |
a54eb550 | 101 | struct rb_root pfn_list; /* Ex-user pinned pfn list */ |
d6a4c185 | 102 | unsigned long *bitmap; |
73fa0d10 AW |
103 | }; |
104 | ||
4b6c33b3 DJ |
105 | struct vfio_batch { |
106 | struct page **pages; /* for pin_user_pages_remote */ | |
107 | struct page *fallback_page; /* if pages alloc fails */ | |
108 | int capacity; /* length of pages array */ | |
4d83de6d DJ |
109 | int size; /* of batch currently */ |
110 | int offset; /* of next entry in pages */ | |
4b6c33b3 DJ |
111 | }; |
112 | ||
c7396f2e | 113 | struct vfio_iommu_group { |
73fa0d10 AW |
114 | struct iommu_group *iommu_group; |
115 | struct list_head next; | |
7bd50f0c | 116 | bool mdev_group; /* An mdev group */ |
95fc87b4 | 117 | bool pinned_page_dirty_scope; |
73fa0d10 AW |
118 | }; |
119 | ||
1108696a SK |
120 | struct vfio_iova { |
121 | struct list_head list; | |
122 | dma_addr_t start; | |
123 | dma_addr_t end; | |
124 | }; | |
125 | ||
a54eb550 KW |
126 | /* |
127 | * Guest RAM pinning working set or DMA target | |
128 | */ | |
129 | struct vfio_pfn { | |
130 | struct rb_node node; | |
131 | dma_addr_t iova; /* Device address */ | |
132 | unsigned long pfn; /* Host pfn */ | |
65817085 | 133 | unsigned int ref_count; |
a54eb550 KW |
134 | }; |
135 | ||
6bd06f5a SS |
136 | struct vfio_regions { |
137 | struct list_head list; | |
138 | dma_addr_t iova; | |
139 | phys_addr_t phys; | |
140 | size_t len; | |
141 | }; | |
142 | ||
a54eb550 KW |
143 | #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ |
144 | (!list_empty(&iommu->domain_list)) | |
145 | ||
d6a4c185 KW |
146 | #define DIRTY_BITMAP_BYTES(n) (ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE) |
147 | ||
148 | /* | |
149 | * Input argument of number of bits to bitmap_set() is unsigned integer, which | |
150 | * further casts to signed integer for unaligned multi-bit operation, | |
151 | * __bitmap_set(). | |
152 | * Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte, | |
153 | * that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page | |
154 | * system. | |
155 | */ | |
156 | #define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX) | |
157 | #define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX) | |
158 | ||
898b9eae SS |
159 | #define WAITED 1 |
160 | ||
a54eb550 KW |
161 | static int put_pfn(unsigned long pfn, int prot); |
162 | ||
c7396f2e MG |
163 | static struct vfio_iommu_group* |
164 | vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, | |
165 | struct iommu_group *iommu_group); | |
95fc87b4 | 166 | |
73fa0d10 AW |
167 | /* |
168 | * This code handles mapping and unmapping of user data buffers | |
169 | * into DMA'ble space using the IOMMU | |
170 | */ | |
171 | ||
cd9b2268 AW |
172 | static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, |
173 | dma_addr_t start, size_t size) | |
174 | { | |
175 | struct rb_node *node = iommu->dma_list.rb_node; | |
176 | ||
177 | while (node) { | |
178 | struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); | |
179 | ||
180 | if (start + size <= dma->iova) | |
181 | node = node->rb_left; | |
166fd7d9 | 182 | else if (start >= dma->iova + dma->size) |
cd9b2268 AW |
183 | node = node->rb_right; |
184 | else | |
185 | return dma; | |
186 | } | |
187 | ||
188 | return NULL; | |
189 | } | |
190 | ||
40ae9b80 | 191 | static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, |
7dc4b2fd | 192 | dma_addr_t start, u64 size) |
40ae9b80 SS |
193 | { |
194 | struct rb_node *res = NULL; | |
195 | struct rb_node *node = iommu->dma_list.rb_node; | |
196 | struct vfio_dma *dma_res = NULL; | |
197 | ||
198 | while (node) { | |
199 | struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); | |
200 | ||
201 | if (start < dma->iova + dma->size) { | |
202 | res = node; | |
203 | dma_res = dma; | |
204 | if (start >= dma->iova) | |
205 | break; | |
206 | node = node->rb_left; | |
207 | } else { | |
208 | node = node->rb_right; | |
209 | } | |
210 | } | |
211 | if (res && size && dma_res->iova >= start + size) | |
212 | res = NULL; | |
213 | return res; | |
214 | } | |
215 | ||
1ef3e2bc | 216 | static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) |
cd9b2268 AW |
217 | { |
218 | struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; | |
219 | struct vfio_dma *dma; | |
220 | ||
221 | while (*link) { | |
222 | parent = *link; | |
223 | dma = rb_entry(parent, struct vfio_dma, node); | |
224 | ||
166fd7d9 | 225 | if (new->iova + new->size <= dma->iova) |
cd9b2268 AW |
226 | link = &(*link)->rb_left; |
227 | else | |
228 | link = &(*link)->rb_right; | |
229 | } | |
230 | ||
231 | rb_link_node(&new->node, parent, link); | |
232 | rb_insert_color(&new->node, &iommu->dma_list); | |
233 | } | |
234 | ||
1ef3e2bc | 235 | static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) |
cd9b2268 AW |
236 | { |
237 | rb_erase(&old->node, &iommu->dma_list); | |
238 | } | |
239 | ||
d6a4c185 KW |
240 | |
241 | static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize) | |
242 | { | |
243 | uint64_t npages = dma->size / pgsize; | |
244 | ||
245 | if (npages > DIRTY_BITMAP_PAGES_MAX) | |
246 | return -EINVAL; | |
247 | ||
248 | /* | |
249 | * Allocate extra 64 bits that are used to calculate shift required for | |
250 | * bitmap_shift_left() to manipulate and club unaligned number of pages | |
251 | * in adjacent vfio_dma ranges. | |
252 | */ | |
253 | dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages) + sizeof(u64), | |
254 | GFP_KERNEL); | |
255 | if (!dma->bitmap) | |
256 | return -ENOMEM; | |
257 | ||
258 | return 0; | |
259 | } | |
260 | ||
261 | static void vfio_dma_bitmap_free(struct vfio_dma *dma) | |
262 | { | |
263 | kfree(dma->bitmap); | |
264 | dma->bitmap = NULL; | |
265 | } | |
266 | ||
267 | static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) | |
268 | { | |
269 | struct rb_node *p; | |
cd0bb41e | 270 | unsigned long pgshift = __ffs(pgsize); |
d6a4c185 KW |
271 | |
272 | for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) { | |
273 | struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node); | |
274 | ||
cd0bb41e | 275 | bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1); |
d6a4c185 KW |
276 | } |
277 | } | |
278 | ||
d0a78f91 KZ |
279 | static void vfio_iommu_populate_bitmap_full(struct vfio_iommu *iommu) |
280 | { | |
281 | struct rb_node *n; | |
282 | unsigned long pgshift = __ffs(iommu->pgsize_bitmap); | |
283 | ||
284 | for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { | |
285 | struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); | |
286 | ||
287 | bitmap_set(dma->bitmap, 0, dma->size >> pgshift); | |
288 | } | |
289 | } | |
290 | ||
d6a4c185 KW |
291 | static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize) |
292 | { | |
293 | struct rb_node *n; | |
294 | ||
295 | for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { | |
296 | struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); | |
297 | int ret; | |
298 | ||
299 | ret = vfio_dma_bitmap_alloc(dma, pgsize); | |
300 | if (ret) { | |
301 | struct rb_node *p; | |
302 | ||
303 | for (p = rb_prev(n); p; p = rb_prev(p)) { | |
304 | struct vfio_dma *dma = rb_entry(n, | |
305 | struct vfio_dma, node); | |
306 | ||
307 | vfio_dma_bitmap_free(dma); | |
308 | } | |
309 | return ret; | |
310 | } | |
311 | vfio_dma_populate_bitmap(dma, pgsize); | |
312 | } | |
313 | return 0; | |
314 | } | |
315 | ||
316 | static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu) | |
317 | { | |
318 | struct rb_node *n; | |
319 | ||
320 | for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { | |
321 | struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); | |
322 | ||
323 | vfio_dma_bitmap_free(dma); | |
324 | } | |
325 | } | |
326 | ||
a54eb550 KW |
327 | /* |
328 | * Helper Functions for host iova-pfn list | |
329 | */ | |
330 | static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) | |
331 | { | |
332 | struct vfio_pfn *vpfn; | |
333 | struct rb_node *node = dma->pfn_list.rb_node; | |
334 | ||
335 | while (node) { | |
336 | vpfn = rb_entry(node, struct vfio_pfn, node); | |
337 | ||
338 | if (iova < vpfn->iova) | |
339 | node = node->rb_left; | |
340 | else if (iova > vpfn->iova) | |
341 | node = node->rb_right; | |
342 | else | |
343 | return vpfn; | |
344 | } | |
345 | return NULL; | |
346 | } | |
347 | ||
348 | static void vfio_link_pfn(struct vfio_dma *dma, | |
349 | struct vfio_pfn *new) | |
350 | { | |
351 | struct rb_node **link, *parent = NULL; | |
352 | struct vfio_pfn *vpfn; | |
353 | ||
354 | link = &dma->pfn_list.rb_node; | |
355 | while (*link) { | |
356 | parent = *link; | |
357 | vpfn = rb_entry(parent, struct vfio_pfn, node); | |
358 | ||
359 | if (new->iova < vpfn->iova) | |
360 | link = &(*link)->rb_left; | |
361 | else | |
362 | link = &(*link)->rb_right; | |
363 | } | |
364 | ||
365 | rb_link_node(&new->node, parent, link); | |
366 | rb_insert_color(&new->node, &dma->pfn_list); | |
367 | } | |
368 | ||
369 | static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old) | |
370 | { | |
371 | rb_erase(&old->node, &dma->pfn_list); | |
372 | } | |
373 | ||
374 | static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova, | |
375 | unsigned long pfn) | |
376 | { | |
377 | struct vfio_pfn *vpfn; | |
378 | ||
379 | vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL); | |
380 | if (!vpfn) | |
381 | return -ENOMEM; | |
382 | ||
383 | vpfn->iova = iova; | |
384 | vpfn->pfn = pfn; | |
65817085 | 385 | vpfn->ref_count = 1; |
a54eb550 KW |
386 | vfio_link_pfn(dma, vpfn); |
387 | return 0; | |
388 | } | |
389 | ||
390 | static void vfio_remove_from_pfn_list(struct vfio_dma *dma, | |
391 | struct vfio_pfn *vpfn) | |
392 | { | |
393 | vfio_unlink_pfn(dma, vpfn); | |
394 | kfree(vpfn); | |
395 | } | |
396 | ||
397 | static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma, | |
398 | unsigned long iova) | |
399 | { | |
400 | struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); | |
401 | ||
402 | if (vpfn) | |
65817085 | 403 | vpfn->ref_count++; |
a54eb550 KW |
404 | return vpfn; |
405 | } | |
406 | ||
407 | static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) | |
408 | { | |
409 | int ret = 0; | |
410 | ||
65817085 KW |
411 | vpfn->ref_count--; |
412 | if (!vpfn->ref_count) { | |
a54eb550 KW |
413 | ret = put_pfn(vpfn->pfn, dma->prot); |
414 | vfio_remove_from_pfn_list(dma, vpfn); | |
415 | } | |
416 | return ret; | |
417 | } | |
418 | ||
48d8476b | 419 | static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) |
73fa0d10 | 420 | { |
73fa0d10 | 421 | struct mm_struct *mm; |
0cfef2b7 | 422 | int ret; |
73fa0d10 | 423 | |
3624a248 | 424 | if (!npage) |
0cfef2b7 | 425 | return 0; |
3624a248 | 426 | |
48d8476b | 427 | mm = async ? get_task_mm(dma->task) : dma->task->mm; |
3624a248 | 428 | if (!mm) |
0cfef2b7 | 429 | return -ESRCH; /* process exited */ |
73fa0d10 | 430 | |
d8ed45c5 | 431 | ret = mmap_write_lock_killable(mm); |
0cfef2b7 | 432 | if (!ret) { |
79eb597c DJ |
433 | ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task, |
434 | dma->lock_cap); | |
d8ed45c5 | 435 | mmap_write_unlock(mm); |
6c38c055 AW |
436 | } |
437 | ||
48d8476b | 438 | if (async) |
3624a248 | 439 | mmput(mm); |
0cfef2b7 AW |
440 | |
441 | return ret; | |
73fa0d10 AW |
442 | } |
443 | ||
444 | /* | |
445 | * Some mappings aren't backed by a struct page, for example an mmap'd | |
446 | * MMIO range for our own or another device. These use a different | |
447 | * pfn conversion and shouldn't be tracked as locked pages. | |
026948f0 BL |
448 | * For compound pages, any driver that sets the reserved bit in head |
449 | * page needs to set the reserved bit in all subpages to be safe. | |
73fa0d10 AW |
450 | */ |
451 | static bool is_invalid_reserved_pfn(unsigned long pfn) | |
452 | { | |
026948f0 BL |
453 | if (pfn_valid(pfn)) |
454 | return PageReserved(pfn_to_page(pfn)); | |
73fa0d10 AW |
455 | |
456 | return true; | |
457 | } | |
458 | ||
459 | static int put_pfn(unsigned long pfn, int prot) | |
460 | { | |
461 | if (!is_invalid_reserved_pfn(pfn)) { | |
462 | struct page *page = pfn_to_page(pfn); | |
19fed0da | 463 | |
f1f6a7dd | 464 | unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE); |
73fa0d10 AW |
465 | return 1; |
466 | } | |
467 | return 0; | |
468 | } | |
469 | ||
4b6c33b3 DJ |
470 | #define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *)) |
471 | ||
472 | static void vfio_batch_init(struct vfio_batch *batch) | |
473 | { | |
4d83de6d DJ |
474 | batch->size = 0; |
475 | batch->offset = 0; | |
476 | ||
4b6c33b3 DJ |
477 | if (unlikely(disable_hugepages)) |
478 | goto fallback; | |
479 | ||
480 | batch->pages = (struct page **) __get_free_page(GFP_KERNEL); | |
481 | if (!batch->pages) | |
482 | goto fallback; | |
483 | ||
484 | batch->capacity = VFIO_BATCH_MAX_CAPACITY; | |
485 | return; | |
486 | ||
487 | fallback: | |
488 | batch->pages = &batch->fallback_page; | |
489 | batch->capacity = 1; | |
490 | } | |
491 | ||
4d83de6d DJ |
492 | static void vfio_batch_unpin(struct vfio_batch *batch, struct vfio_dma *dma) |
493 | { | |
494 | while (batch->size) { | |
495 | unsigned long pfn = page_to_pfn(batch->pages[batch->offset]); | |
496 | ||
497 | put_pfn(pfn, dma->prot); | |
498 | batch->offset++; | |
499 | batch->size--; | |
500 | } | |
501 | } | |
502 | ||
4b6c33b3 DJ |
503 | static void vfio_batch_fini(struct vfio_batch *batch) |
504 | { | |
505 | if (batch->capacity == VFIO_BATCH_MAX_CAPACITY) | |
506 | free_page((unsigned long)batch->pages); | |
507 | } | |
508 | ||
41311242 AW |
509 | static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, |
510 | unsigned long vaddr, unsigned long *pfn, | |
511 | bool write_fault) | |
512 | { | |
07956b62 AW |
513 | pte_t *ptep; |
514 | spinlock_t *ptl; | |
41311242 AW |
515 | int ret; |
516 | ||
07956b62 | 517 | ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); |
41311242 AW |
518 | if (ret) { |
519 | bool unlocked = false; | |
520 | ||
64019a2e | 521 | ret = fixup_user_fault(mm, vaddr, |
41311242 AW |
522 | FAULT_FLAG_REMOTE | |
523 | (write_fault ? FAULT_FLAG_WRITE : 0), | |
524 | &unlocked); | |
525 | if (unlocked) | |
526 | return -EAGAIN; | |
527 | ||
528 | if (ret) | |
529 | return ret; | |
530 | ||
07956b62 AW |
531 | ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); |
532 | if (ret) | |
533 | return ret; | |
41311242 AW |
534 | } |
535 | ||
07956b62 AW |
536 | if (write_fault && !pte_write(*ptep)) |
537 | ret = -EFAULT; | |
538 | else | |
539 | *pfn = pte_pfn(*ptep); | |
540 | ||
541 | pte_unmap_unlock(ptep, ptl); | |
41311242 AW |
542 | return ret; |
543 | } | |
544 | ||
be16c1fd DJ |
545 | /* |
546 | * Returns the positive number of pfns successfully obtained or a negative | |
547 | * error code. | |
548 | */ | |
4b6c33b3 DJ |
549 | static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, |
550 | long npages, int prot, unsigned long *pfn, | |
551 | struct page **pages) | |
73fa0d10 | 552 | { |
73fa0d10 | 553 | struct vm_area_struct *vma; |
bb94b55a | 554 | unsigned int flags = 0; |
ea85cf35 | 555 | int ret; |
73fa0d10 | 556 | |
bb94b55a JG |
557 | if (prot & IOMMU_WRITE) |
558 | flags |= FOLL_WRITE; | |
559 | ||
d8ed45c5 | 560 | mmap_read_lock(mm); |
4b6c33b3 DJ |
561 | ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, |
562 | pages, NULL, NULL); | |
563 | if (ret > 0) { | |
564 | *pfn = page_to_pfn(pages[0]); | |
3567813e | 565 | goto done; |
73fa0d10 AW |
566 | } |
567 | ||
6cf5354c AK |
568 | vaddr = untagged_addr(vaddr); |
569 | ||
41311242 | 570 | retry: |
85715d68 | 571 | vma = vma_lookup(mm, vaddr); |
73fa0d10 AW |
572 | |
573 | if (vma && vma->vm_flags & VM_PFNMAP) { | |
41311242 AW |
574 | ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); |
575 | if (ret == -EAGAIN) | |
576 | goto retry; | |
577 | ||
be16c1fd DJ |
578 | if (!ret) { |
579 | if (is_invalid_reserved_pfn(*pfn)) | |
580 | ret = 1; | |
581 | else | |
582 | ret = -EFAULT; | |
583 | } | |
73fa0d10 | 584 | } |
3567813e | 585 | done: |
d8ed45c5 | 586 | mmap_read_unlock(mm); |
73fa0d10 AW |
587 | return ret; |
588 | } | |
589 | ||
898b9eae SS |
590 | static int vfio_wait(struct vfio_iommu *iommu) |
591 | { | |
592 | DEFINE_WAIT(wait); | |
593 | ||
594 | prepare_to_wait(&iommu->vaddr_wait, &wait, TASK_KILLABLE); | |
595 | mutex_unlock(&iommu->lock); | |
596 | schedule(); | |
597 | mutex_lock(&iommu->lock); | |
598 | finish_wait(&iommu->vaddr_wait, &wait); | |
599 | if (kthread_should_stop() || !iommu->container_open || | |
600 | fatal_signal_pending(current)) { | |
601 | return -EFAULT; | |
602 | } | |
603 | return WAITED; | |
604 | } | |
605 | ||
606 | /* | |
607 | * Find dma struct and wait for its vaddr to be valid. iommu lock is dropped | |
608 | * if the task waits, but is re-locked on return. Return result in *dma_p. | |
609 | * Return 0 on success with no waiting, WAITED on success if waited, and -errno | |
610 | * on error. | |
611 | */ | |
612 | static int vfio_find_dma_valid(struct vfio_iommu *iommu, dma_addr_t start, | |
613 | size_t size, struct vfio_dma **dma_p) | |
614 | { | |
ffc95d1b | 615 | int ret = 0; |
898b9eae SS |
616 | |
617 | do { | |
618 | *dma_p = vfio_find_dma(iommu, start, size); | |
619 | if (!*dma_p) | |
ffc95d1b | 620 | return -EINVAL; |
898b9eae | 621 | else if (!(*dma_p)->vaddr_invalid) |
ffc95d1b | 622 | return ret; |
898b9eae SS |
623 | else |
624 | ret = vfio_wait(iommu); | |
ffc95d1b | 625 | } while (ret == WAITED); |
898b9eae SS |
626 | |
627 | return ret; | |
628 | } | |
629 | ||
630 | /* | |
631 | * Wait for all vaddr in the dma_list to become valid. iommu lock is dropped | |
632 | * if the task waits, but is re-locked on return. Return 0 on success with no | |
633 | * waiting, WAITED on success if waited, and -errno on error. | |
634 | */ | |
635 | static int vfio_wait_all_valid(struct vfio_iommu *iommu) | |
636 | { | |
637 | int ret = 0; | |
638 | ||
639 | while (iommu->vaddr_invalid_count && ret >= 0) | |
640 | ret = vfio_wait(iommu); | |
641 | ||
642 | return ret; | |
643 | } | |
644 | ||
166fd7d9 AW |
645 | /* |
646 | * Attempt to pin pages. We really don't want to track all the pfns and | |
647 | * the iommu can only map chunks of consecutive pfns anyway, so get the | |
648 | * first page and all consecutive pages with the same locking. | |
649 | */ | |
8f0d5bb9 | 650 | static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, |
7cb671e7 | 651 | long npage, unsigned long *pfn_base, |
4b6c33b3 | 652 | unsigned long limit, struct vfio_batch *batch) |
73fa0d10 | 653 | { |
4d83de6d DJ |
654 | unsigned long pfn; |
655 | struct mm_struct *mm = current->mm; | |
6c38c055 | 656 | long ret, pinned = 0, lock_acct = 0; |
89c29def | 657 | bool rsvd; |
a54eb550 | 658 | dma_addr_t iova = vaddr - dma->vaddr + dma->iova; |
73fa0d10 | 659 | |
6c38c055 | 660 | /* This code path is only user initiated */ |
4d83de6d | 661 | if (!mm) |
166fd7d9 | 662 | return -ENODEV; |
73fa0d10 | 663 | |
4d83de6d DJ |
664 | if (batch->size) { |
665 | /* Leftover pages in batch from an earlier call. */ | |
666 | *pfn_base = page_to_pfn(batch->pages[batch->offset]); | |
667 | pfn = *pfn_base; | |
668 | rsvd = is_invalid_reserved_pfn(*pfn_base); | |
669 | } else { | |
670 | *pfn_base = 0; | |
671 | } | |
73fa0d10 | 672 | |
4d83de6d DJ |
673 | while (npage) { |
674 | if (!batch->size) { | |
675 | /* Empty batch, so refill it. */ | |
676 | long req_pages = min_t(long, npage, batch->capacity); | |
73fa0d10 | 677 | |
4d83de6d DJ |
678 | ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot, |
679 | &pfn, batch->pages); | |
680 | if (ret < 0) | |
681 | goto unpin_out; | |
5c6c2b21 | 682 | |
4d83de6d DJ |
683 | batch->size = ret; |
684 | batch->offset = 0; | |
6c38c055 | 685 | |
4d83de6d DJ |
686 | if (!*pfn_base) { |
687 | *pfn_base = pfn; | |
688 | rsvd = is_invalid_reserved_pfn(*pfn_base); | |
689 | } | |
6c38c055 | 690 | } |
166fd7d9 | 691 | |
4d83de6d DJ |
692 | /* |
693 | * pfn is preset for the first iteration of this inner loop and | |
694 | * updated at the end to handle a VM_PFNMAP pfn. In that case, | |
695 | * batch->pages isn't valid (there's no struct page), so allow | |
696 | * batch->pages to be touched only when there's more than one | |
697 | * pfn to check, which guarantees the pfns are from a | |
698 | * !VM_PFNMAP vma. | |
699 | */ | |
700 | while (true) { | |
701 | if (pfn != *pfn_base + pinned || | |
702 | rsvd != is_invalid_reserved_pfn(pfn)) | |
703 | goto out; | |
704 | ||
705 | /* | |
706 | * Reserved pages aren't counted against the user, | |
707 | * externally pinned pages are already counted against | |
708 | * the user. | |
709 | */ | |
710 | if (!rsvd && !vfio_find_vpfn(dma, iova)) { | |
711 | if (!dma->lock_cap && | |
712 | mm->locked_vm + lock_acct + 1 > limit) { | |
713 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", | |
714 | __func__, limit << PAGE_SHIFT); | |
715 | ret = -ENOMEM; | |
716 | goto unpin_out; | |
717 | } | |
718 | lock_acct++; | |
a54eb550 | 719 | } |
4d83de6d DJ |
720 | |
721 | pinned++; | |
722 | npage--; | |
723 | vaddr += PAGE_SIZE; | |
724 | iova += PAGE_SIZE; | |
725 | batch->offset++; | |
726 | batch->size--; | |
727 | ||
728 | if (!batch->size) | |
729 | break; | |
730 | ||
731 | pfn = page_to_pfn(batch->pages[batch->offset]); | |
166fd7d9 | 732 | } |
4d83de6d DJ |
733 | |
734 | if (unlikely(disable_hugepages)) | |
735 | break; | |
166fd7d9 AW |
736 | } |
737 | ||
6c38c055 | 738 | out: |
48d8476b | 739 | ret = vfio_lock_acct(dma, lock_acct, false); |
0cfef2b7 AW |
740 | |
741 | unpin_out: | |
60c988bc DJ |
742 | if (batch->size == 1 && !batch->offset) { |
743 | /* May be a VM_PFNMAP pfn, which the batch can't remember. */ | |
744 | put_pfn(pfn, dma->prot); | |
745 | batch->size = 0; | |
746 | } | |
747 | ||
be16c1fd | 748 | if (ret < 0) { |
4d83de6d | 749 | if (pinned && !rsvd) { |
89c29def AW |
750 | for (pfn = *pfn_base ; pinned ; pfn++, pinned--) |
751 | put_pfn(pfn, dma->prot); | |
752 | } | |
4d83de6d | 753 | vfio_batch_unpin(batch, dma); |
0cfef2b7 AW |
754 | |
755 | return ret; | |
756 | } | |
166fd7d9 | 757 | |
6c38c055 | 758 | return pinned; |
166fd7d9 AW |
759 | } |
760 | ||
a54eb550 KW |
761 | static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, |
762 | unsigned long pfn, long npage, | |
763 | bool do_accounting) | |
166fd7d9 | 764 | { |
a54eb550 | 765 | long unlocked = 0, locked = 0; |
166fd7d9 AW |
766 | long i; |
767 | ||
6c38c055 | 768 | for (i = 0; i < npage; i++, iova += PAGE_SIZE) { |
a54eb550 KW |
769 | if (put_pfn(pfn++, dma->prot)) { |
770 | unlocked++; | |
6c38c055 | 771 | if (vfio_find_vpfn(dma, iova)) |
a54eb550 KW |
772 | locked++; |
773 | } | |
774 | } | |
775 | ||
776 | if (do_accounting) | |
48d8476b | 777 | vfio_lock_acct(dma, locked - unlocked, true); |
a54eb550 KW |
778 | |
779 | return unlocked; | |
780 | } | |
781 | ||
782 | static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, | |
783 | unsigned long *pfn_base, bool do_accounting) | |
784 | { | |
4b6c33b3 | 785 | struct page *pages[1]; |
a54eb550 KW |
786 | struct mm_struct *mm; |
787 | int ret; | |
a54eb550 KW |
788 | |
789 | mm = get_task_mm(dma->task); | |
790 | if (!mm) | |
791 | return -ENODEV; | |
792 | ||
4b6c33b3 | 793 | ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); |
4ab4fcfc DJ |
794 | if (ret != 1) |
795 | goto out; | |
796 | ||
797 | ret = 0; | |
798 | ||
799 | if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { | |
48d8476b | 800 | ret = vfio_lock_acct(dma, 1, true); |
0cfef2b7 AW |
801 | if (ret) { |
802 | put_pfn(*pfn_base, dma->prot); | |
80dbe1fb AW |
803 | if (ret == -ENOMEM) |
804 | pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK " | |
805 | "(%ld) exceeded\n", __func__, | |
806 | dma->task->comm, task_pid_nr(dma->task), | |
807 | task_rlimit(dma->task, RLIMIT_MEMLOCK)); | |
0cfef2b7 AW |
808 | } |
809 | } | |
810 | ||
4ab4fcfc | 811 | out: |
a54eb550 KW |
812 | mmput(mm); |
813 | return ret; | |
814 | } | |
815 | ||
816 | static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, | |
817 | bool do_accounting) | |
818 | { | |
819 | int unlocked; | |
820 | struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); | |
821 | ||
822 | if (!vpfn) | |
823 | return 0; | |
824 | ||
825 | unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); | |
166fd7d9 AW |
826 | |
827 | if (do_accounting) | |
48d8476b | 828 | vfio_lock_acct(dma, -unlocked, true); |
166fd7d9 AW |
829 | |
830 | return unlocked; | |
831 | } | |
832 | ||
a54eb550 | 833 | static int vfio_iommu_type1_pin_pages(void *iommu_data, |
95fc87b4 | 834 | struct iommu_group *iommu_group, |
a54eb550 KW |
835 | unsigned long *user_pfn, |
836 | int npage, int prot, | |
837 | unsigned long *phys_pfn) | |
838 | { | |
839 | struct vfio_iommu *iommu = iommu_data; | |
c7396f2e | 840 | struct vfio_iommu_group *group; |
a54eb550 KW |
841 | int i, j, ret; |
842 | unsigned long remote_vaddr; | |
843 | struct vfio_dma *dma; | |
844 | bool do_accounting; | |
898b9eae | 845 | dma_addr_t iova; |
a54eb550 KW |
846 | |
847 | if (!iommu || !user_pfn || !phys_pfn) | |
848 | return -EINVAL; | |
849 | ||
850 | /* Supported for v2 version only */ | |
851 | if (!iommu->v2) | |
852 | return -EACCES; | |
853 | ||
854 | mutex_lock(&iommu->lock); | |
855 | ||
898b9eae SS |
856 | /* |
857 | * Wait for all necessary vaddr's to be valid so they can be used in | |
858 | * the main loop without dropping the lock, to avoid racing vs unmap. | |
859 | */ | |
860 | again: | |
861 | if (iommu->vaddr_invalid_count) { | |
862 | for (i = 0; i < npage; i++) { | |
863 | iova = user_pfn[i] << PAGE_SHIFT; | |
864 | ret = vfio_find_dma_valid(iommu, iova, PAGE_SIZE, &dma); | |
865 | if (ret < 0) | |
866 | goto pin_done; | |
867 | if (ret == WAITED) | |
868 | goto again; | |
869 | } | |
870 | } | |
871 | ||
c086de81 | 872 | /* Fail if notifier list is empty */ |
be068fa2 | 873 | if (!iommu->notifier.head) { |
a54eb550 KW |
874 | ret = -EINVAL; |
875 | goto pin_done; | |
876 | } | |
877 | ||
878 | /* | |
879 | * If iommu capable domain exist in the container then all pages are | |
06d738c8 | 880 | * already pinned and accounted. Accounting should be done if there is no |
a54eb550 KW |
881 | * iommu capable domain in the container. |
882 | */ | |
883 | do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); | |
884 | ||
885 | for (i = 0; i < npage; i++) { | |
a54eb550 KW |
886 | struct vfio_pfn *vpfn; |
887 | ||
888 | iova = user_pfn[i] << PAGE_SHIFT; | |
2b8bb1d7 | 889 | dma = vfio_find_dma(iommu, iova, PAGE_SIZE); |
a54eb550 KW |
890 | if (!dma) { |
891 | ret = -EINVAL; | |
892 | goto pin_unwind; | |
893 | } | |
894 | ||
895 | if ((dma->prot & prot) != prot) { | |
896 | ret = -EPERM; | |
897 | goto pin_unwind; | |
898 | } | |
899 | ||
900 | vpfn = vfio_iova_get_vfio_pfn(dma, iova); | |
901 | if (vpfn) { | |
902 | phys_pfn[i] = vpfn->pfn; | |
903 | continue; | |
904 | } | |
905 | ||
0ea971f8 | 906 | remote_vaddr = dma->vaddr + (iova - dma->iova); |
a54eb550 KW |
907 | ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], |
908 | do_accounting); | |
80dbe1fb | 909 | if (ret) |
a54eb550 | 910 | goto pin_unwind; |
a54eb550 KW |
911 | |
912 | ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]); | |
913 | if (ret) { | |
2e6cfd49 XX |
914 | if (put_pfn(phys_pfn[i], dma->prot) && do_accounting) |
915 | vfio_lock_acct(dma, -1, true); | |
a54eb550 KW |
916 | goto pin_unwind; |
917 | } | |
d6a4c185 KW |
918 | |
919 | if (iommu->dirty_page_tracking) { | |
920 | unsigned long pgshift = __ffs(iommu->pgsize_bitmap); | |
921 | ||
922 | /* | |
923 | * Bitmap populated with the smallest supported page | |
924 | * size | |
925 | */ | |
926 | bitmap_set(dma->bitmap, | |
927 | (iova - dma->iova) >> pgshift, 1); | |
928 | } | |
a54eb550 | 929 | } |
a54eb550 | 930 | ret = i; |
95fc87b4 KW |
931 | |
932 | group = vfio_iommu_find_iommu_group(iommu, iommu_group); | |
933 | if (!group->pinned_page_dirty_scope) { | |
934 | group->pinned_page_dirty_scope = true; | |
01032156 | 935 | iommu->num_non_pinned_groups--; |
95fc87b4 KW |
936 | } |
937 | ||
a54eb550 KW |
938 | goto pin_done; |
939 | ||
940 | pin_unwind: | |
941 | phys_pfn[i] = 0; | |
942 | for (j = 0; j < i; j++) { | |
943 | dma_addr_t iova; | |
944 | ||
945 | iova = user_pfn[j] << PAGE_SHIFT; | |
2b8bb1d7 | 946 | dma = vfio_find_dma(iommu, iova, PAGE_SIZE); |
a54eb550 KW |
947 | vfio_unpin_page_external(dma, iova, do_accounting); |
948 | phys_pfn[j] = 0; | |
949 | } | |
950 | pin_done: | |
951 | mutex_unlock(&iommu->lock); | |
952 | return ret; | |
953 | } | |
954 | ||
955 | static int vfio_iommu_type1_unpin_pages(void *iommu_data, | |
956 | unsigned long *user_pfn, | |
957 | int npage) | |
958 | { | |
959 | struct vfio_iommu *iommu = iommu_data; | |
960 | bool do_accounting; | |
961 | int i; | |
962 | ||
a536019d | 963 | if (!iommu || !user_pfn || npage <= 0) |
a54eb550 KW |
964 | return -EINVAL; |
965 | ||
966 | /* Supported for v2 version only */ | |
967 | if (!iommu->v2) | |
968 | return -EACCES; | |
969 | ||
970 | mutex_lock(&iommu->lock); | |
971 | ||
a54eb550 KW |
972 | do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); |
973 | for (i = 0; i < npage; i++) { | |
974 | struct vfio_dma *dma; | |
975 | dma_addr_t iova; | |
976 | ||
977 | iova = user_pfn[i] << PAGE_SHIFT; | |
2b8bb1d7 | 978 | dma = vfio_find_dma(iommu, iova, PAGE_SIZE); |
a54eb550 | 979 | if (!dma) |
a536019d SL |
980 | break; |
981 | ||
a54eb550 KW |
982 | vfio_unpin_page_external(dma, iova, do_accounting); |
983 | } | |
984 | ||
a54eb550 | 985 | mutex_unlock(&iommu->lock); |
a536019d | 986 | return i > 0 ? i : -EINVAL; |
a54eb550 KW |
987 | } |
988 | ||
6bd06f5a | 989 | static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, |
a7d20dc1 WD |
990 | struct list_head *regions, |
991 | struct iommu_iotlb_gather *iotlb_gather) | |
6bd06f5a SS |
992 | { |
993 | long unlocked = 0; | |
994 | struct vfio_regions *entry, *next; | |
995 | ||
aae4c8e2 | 996 | iommu_iotlb_sync(domain->domain, iotlb_gather); |
6bd06f5a SS |
997 | |
998 | list_for_each_entry_safe(entry, next, regions, list) { | |
999 | unlocked += vfio_unpin_pages_remote(dma, | |
1000 | entry->iova, | |
1001 | entry->phys >> PAGE_SHIFT, | |
1002 | entry->len >> PAGE_SHIFT, | |
1003 | false); | |
1004 | list_del(&entry->list); | |
1005 | kfree(entry); | |
1006 | } | |
1007 | ||
1008 | cond_resched(); | |
1009 | ||
1010 | return unlocked; | |
1011 | } | |
1012 | ||
1013 | /* | |
1014 | * Generally, VFIO needs to unpin remote pages after each IOTLB flush. | |
1015 | * Therefore, when using IOTLB flush sync interface, VFIO need to keep track | |
1016 | * of these regions (currently using a list). | |
1017 | * | |
1018 | * This value specifies maximum number of regions for each IOTLB flush sync. | |
1019 | */ | |
1020 | #define VFIO_IOMMU_TLB_SYNC_MAX 512 | |
1021 | ||
1022 | static size_t unmap_unpin_fast(struct vfio_domain *domain, | |
1023 | struct vfio_dma *dma, dma_addr_t *iova, | |
1024 | size_t len, phys_addr_t phys, long *unlocked, | |
1025 | struct list_head *unmapped_list, | |
a7d20dc1 WD |
1026 | int *unmapped_cnt, |
1027 | struct iommu_iotlb_gather *iotlb_gather) | |
6bd06f5a SS |
1028 | { |
1029 | size_t unmapped = 0; | |
1030 | struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); | |
1031 | ||
1032 | if (entry) { | |
a7d20dc1 WD |
1033 | unmapped = iommu_unmap_fast(domain->domain, *iova, len, |
1034 | iotlb_gather); | |
6bd06f5a SS |
1035 | |
1036 | if (!unmapped) { | |
1037 | kfree(entry); | |
1038 | } else { | |
6bd06f5a SS |
1039 | entry->iova = *iova; |
1040 | entry->phys = phys; | |
1041 | entry->len = unmapped; | |
1042 | list_add_tail(&entry->list, unmapped_list); | |
1043 | ||
1044 | *iova += unmapped; | |
1045 | (*unmapped_cnt)++; | |
1046 | } | |
1047 | } | |
1048 | ||
1049 | /* | |
1050 | * Sync if the number of fast-unmap regions hits the limit | |
1051 | * or in case of errors. | |
1052 | */ | |
1053 | if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { | |
a7d20dc1 WD |
1054 | *unlocked += vfio_sync_unpin(dma, domain, unmapped_list, |
1055 | iotlb_gather); | |
6bd06f5a SS |
1056 | *unmapped_cnt = 0; |
1057 | } | |
1058 | ||
1059 | return unmapped; | |
1060 | } | |
1061 | ||
1062 | static size_t unmap_unpin_slow(struct vfio_domain *domain, | |
1063 | struct vfio_dma *dma, dma_addr_t *iova, | |
1064 | size_t len, phys_addr_t phys, | |
1065 | long *unlocked) | |
1066 | { | |
1067 | size_t unmapped = iommu_unmap(domain->domain, *iova, len); | |
1068 | ||
1069 | if (unmapped) { | |
1070 | *unlocked += vfio_unpin_pages_remote(dma, *iova, | |
1071 | phys >> PAGE_SHIFT, | |
1072 | unmapped >> PAGE_SHIFT, | |
1073 | false); | |
1074 | *iova += unmapped; | |
1075 | cond_resched(); | |
1076 | } | |
1077 | return unmapped; | |
1078 | } | |
1079 | ||
a54eb550 KW |
1080 | static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, |
1081 | bool do_accounting) | |
166fd7d9 | 1082 | { |
1ef3e2bc AW |
1083 | dma_addr_t iova = dma->iova, end = dma->iova + dma->size; |
1084 | struct vfio_domain *domain, *d; | |
6bd06f5a | 1085 | LIST_HEAD(unmapped_region_list); |
a7d20dc1 | 1086 | struct iommu_iotlb_gather iotlb_gather; |
6bd06f5a | 1087 | int unmapped_region_cnt = 0; |
166fd7d9 AW |
1088 | long unlocked = 0; |
1089 | ||
1ef3e2bc | 1090 | if (!dma->size) |
a54eb550 KW |
1091 | return 0; |
1092 | ||
1093 | if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) | |
1094 | return 0; | |
1095 | ||
1ef3e2bc AW |
1096 | /* |
1097 | * We use the IOMMU to track the physical addresses, otherwise we'd | |
1098 | * need a much more complicated tracking system. Unfortunately that | |
1099 | * means we need to use one of the iommu domains to figure out the | |
1100 | * pfns to unpin. The rest need to be unmapped in advance so we have | |
1101 | * no iommu translations remaining when the pages are unpinned. | |
1102 | */ | |
1103 | domain = d = list_first_entry(&iommu->domain_list, | |
1104 | struct vfio_domain, next); | |
1105 | ||
c5e66887 | 1106 | list_for_each_entry_continue(d, &iommu->domain_list, next) { |
1ef3e2bc | 1107 | iommu_unmap(d->domain, dma->iova, dma->size); |
c5e66887 AW |
1108 | cond_resched(); |
1109 | } | |
1ef3e2bc | 1110 | |
a7d20dc1 | 1111 | iommu_iotlb_gather_init(&iotlb_gather); |
166fd7d9 | 1112 | while (iova < end) { |
6fe1010d AW |
1113 | size_t unmapped, len; |
1114 | phys_addr_t phys, next; | |
166fd7d9 | 1115 | |
1ef3e2bc | 1116 | phys = iommu_iova_to_phys(domain->domain, iova); |
166fd7d9 AW |
1117 | if (WARN_ON(!phys)) { |
1118 | iova += PAGE_SIZE; | |
1119 | continue; | |
73fa0d10 | 1120 | } |
166fd7d9 | 1121 | |
6fe1010d AW |
1122 | /* |
1123 | * To optimize for fewer iommu_unmap() calls, each of which | |
1124 | * may require hardware cache flushing, try to find the | |
1125 | * largest contiguous physical memory chunk to unmap. | |
1126 | */ | |
1127 | for (len = PAGE_SIZE; | |
1128 | !domain->fgsp && iova + len < end; len += PAGE_SIZE) { | |
1129 | next = iommu_iova_to_phys(domain->domain, iova + len); | |
1130 | if (next != phys + len) | |
1131 | break; | |
1132 | } | |
1133 | ||
6bd06f5a SS |
1134 | /* |
1135 | * First, try to use fast unmap/unpin. In case of failure, | |
1136 | * switch to slow unmap/unpin path. | |
1137 | */ | |
1138 | unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, | |
1139 | &unlocked, &unmapped_region_list, | |
a7d20dc1 WD |
1140 | &unmapped_region_cnt, |
1141 | &iotlb_gather); | |
6bd06f5a SS |
1142 | if (!unmapped) { |
1143 | unmapped = unmap_unpin_slow(domain, dma, &iova, len, | |
1144 | phys, &unlocked); | |
1145 | if (WARN_ON(!unmapped)) | |
1146 | break; | |
1147 | } | |
73fa0d10 | 1148 | } |
166fd7d9 | 1149 | |
a54eb550 | 1150 | dma->iommu_mapped = false; |
6bd06f5a | 1151 | |
a7d20dc1 WD |
1152 | if (unmapped_region_cnt) { |
1153 | unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list, | |
1154 | &iotlb_gather); | |
1155 | } | |
6bd06f5a | 1156 | |
a54eb550 | 1157 | if (do_accounting) { |
48d8476b | 1158 | vfio_lock_acct(dma, -unlocked, true); |
a54eb550 KW |
1159 | return 0; |
1160 | } | |
1161 | return unlocked; | |
73fa0d10 AW |
1162 | } |
1163 | ||
1ef3e2bc | 1164 | static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) |
73fa0d10 | 1165 | { |
4a19f37a | 1166 | WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)); |
a54eb550 | 1167 | vfio_unmap_unpin(iommu, dma, true); |
1ef3e2bc | 1168 | vfio_unlink_dma(iommu, dma); |
8f0d5bb9 | 1169 | put_task_struct(dma->task); |
d6a4c185 | 1170 | vfio_dma_bitmap_free(dma); |
898b9eae | 1171 | if (dma->vaddr_invalid) { |
c3cbab24 | 1172 | iommu->vaddr_invalid_count--; |
898b9eae SS |
1173 | wake_up_all(&iommu->vaddr_wait); |
1174 | } | |
1ef3e2bc | 1175 | kfree(dma); |
49285593 | 1176 | iommu->dma_avail++; |
1ef3e2bc | 1177 | } |
73fa0d10 | 1178 | |
cade075f | 1179 | static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu) |
1ef3e2bc AW |
1180 | { |
1181 | struct vfio_domain *domain; | |
166fd7d9 | 1182 | |
cade075f KW |
1183 | iommu->pgsize_bitmap = ULONG_MAX; |
1184 | ||
1ef3e2bc | 1185 | list_for_each_entry(domain, &iommu->domain_list, next) |
cade075f | 1186 | iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap; |
73fa0d10 | 1187 | |
4644321f EA |
1188 | /* |
1189 | * In case the IOMMU supports page sizes smaller than PAGE_SIZE | |
1190 | * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes. | |
1191 | * That way the user will be able to map/unmap buffers whose size/ | |
1192 | * start address is aligned with PAGE_SIZE. Pinning code uses that | |
1193 | * granularity while iommu driver can use the sub-PAGE_SIZE size | |
1194 | * to map the buffer. | |
1195 | */ | |
cade075f KW |
1196 | if (iommu->pgsize_bitmap & ~PAGE_MASK) { |
1197 | iommu->pgsize_bitmap &= PAGE_MASK; | |
1198 | iommu->pgsize_bitmap |= PAGE_SIZE; | |
4644321f | 1199 | } |
73fa0d10 AW |
1200 | } |
1201 | ||
95fc87b4 KW |
1202 | static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, |
1203 | struct vfio_dma *dma, dma_addr_t base_iova, | |
1204 | size_t pgsize) | |
d6a4c185 KW |
1205 | { |
1206 | unsigned long pgshift = __ffs(pgsize); | |
1207 | unsigned long nbits = dma->size >> pgshift; | |
1208 | unsigned long bit_offset = (dma->iova - base_iova) >> pgshift; | |
1209 | unsigned long copy_offset = bit_offset / BITS_PER_LONG; | |
1210 | unsigned long shift = bit_offset % BITS_PER_LONG; | |
1211 | unsigned long leftover; | |
1212 | ||
95fc87b4 KW |
1213 | /* |
1214 | * mark all pages dirty if any IOMMU capable device is not able | |
1215 | * to report dirty pages and all pages are pinned and mapped. | |
1216 | */ | |
01032156 | 1217 | if (iommu->num_non_pinned_groups && dma->iommu_mapped) |
d6a4c185 KW |
1218 | bitmap_set(dma->bitmap, 0, nbits); |
1219 | ||
1220 | if (shift) { | |
1221 | bitmap_shift_left(dma->bitmap, dma->bitmap, shift, | |
1222 | nbits + shift); | |
1223 | ||
1224 | if (copy_from_user(&leftover, | |
c8e9df47 | 1225 | (void __user *)(bitmap + copy_offset), |
d6a4c185 KW |
1226 | sizeof(leftover))) |
1227 | return -EFAULT; | |
1228 | ||
1229 | bitmap_or(dma->bitmap, dma->bitmap, &leftover, shift); | |
1230 | } | |
1231 | ||
c8e9df47 | 1232 | if (copy_to_user((void __user *)(bitmap + copy_offset), dma->bitmap, |
d6a4c185 KW |
1233 | DIRTY_BITMAP_BYTES(nbits + shift))) |
1234 | return -EFAULT; | |
1235 | ||
1236 | return 0; | |
1237 | } | |
1238 | ||
1239 | static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, | |
1240 | dma_addr_t iova, size_t size, size_t pgsize) | |
1241 | { | |
1242 | struct vfio_dma *dma; | |
1243 | struct rb_node *n; | |
1244 | unsigned long pgshift = __ffs(pgsize); | |
1245 | int ret; | |
1246 | ||
1247 | /* | |
1248 | * GET_BITMAP request must fully cover vfio_dma mappings. Multiple | |
1249 | * vfio_dma mappings may be clubbed by specifying large ranges, but | |
1250 | * there must not be any previous mappings bisected by the range. | |
1251 | * An error will be returned if these conditions are not met. | |
1252 | */ | |
1253 | dma = vfio_find_dma(iommu, iova, 1); | |
1254 | if (dma && dma->iova != iova) | |
1255 | return -EINVAL; | |
1256 | ||
1257 | dma = vfio_find_dma(iommu, iova + size - 1, 0); | |
1258 | if (dma && dma->iova + dma->size != iova + size) | |
1259 | return -EINVAL; | |
1260 | ||
1261 | for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { | |
1262 | struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); | |
1263 | ||
1264 | if (dma->iova < iova) | |
1265 | continue; | |
1266 | ||
1267 | if (dma->iova > iova + size - 1) | |
1268 | break; | |
1269 | ||
95fc87b4 | 1270 | ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize); |
d6a4c185 KW |
1271 | if (ret) |
1272 | return ret; | |
1273 | ||
1274 | /* | |
1275 | * Re-populate bitmap to include all pinned pages which are | |
1276 | * considered as dirty but exclude pages which are unpinned and | |
1277 | * pages which are marked dirty by vfio_dma_rw() | |
1278 | */ | |
1279 | bitmap_clear(dma->bitmap, 0, dma->size >> pgshift); | |
1280 | vfio_dma_populate_bitmap(dma, pgsize); | |
1281 | } | |
1282 | return 0; | |
1283 | } | |
1284 | ||
1285 | static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size) | |
1286 | { | |
1287 | if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) || | |
1288 | (bitmap_size < DIRTY_BITMAP_BYTES(npages))) | |
1289 | return -EINVAL; | |
1290 | ||
1291 | return 0; | |
1292 | } | |
1293 | ||
73fa0d10 | 1294 | static int vfio_dma_do_unmap(struct vfio_iommu *iommu, |
331e33d2 KW |
1295 | struct vfio_iommu_type1_dma_unmap *unmap, |
1296 | struct vfio_bitmap *bitmap) | |
73fa0d10 | 1297 | { |
c086de81 | 1298 | struct vfio_dma *dma, *dma_last = NULL; |
331e33d2 | 1299 | size_t unmapped = 0, pgsize; |
0f53afa1 | 1300 | int ret = -EINVAL, retries = 0; |
331e33d2 | 1301 | unsigned long pgshift; |
0f53afa1 | 1302 | dma_addr_t iova = unmap->iova; |
7dc4b2fd | 1303 | u64 size = unmap->size; |
c1965099 | 1304 | bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL; |
c3cbab24 SS |
1305 | bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR; |
1306 | struct rb_node *n, *first_n; | |
73fa0d10 | 1307 | |
cade075f KW |
1308 | mutex_lock(&iommu->lock); |
1309 | ||
331e33d2 KW |
1310 | pgshift = __ffs(iommu->pgsize_bitmap); |
1311 | pgsize = (size_t)1 << pgshift; | |
cade075f | 1312 | |
0f53afa1 | 1313 | if (iova & (pgsize - 1)) |
cade075f | 1314 | goto unlock; |
cade075f | 1315 | |
c1965099 SS |
1316 | if (unmap_all) { |
1317 | if (iova || size) | |
1318 | goto unlock; | |
7dc4b2fd SS |
1319 | size = U64_MAX; |
1320 | } else if (!size || size & (pgsize - 1) || | |
1321 | iova + size - 1 < iova || size > SIZE_MAX) { | |
cade075f KW |
1322 | goto unlock; |
1323 | } | |
73fa0d10 | 1324 | |
331e33d2 KW |
1325 | /* When dirty tracking is enabled, allow only min supported pgsize */ |
1326 | if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) && | |
1327 | (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) { | |
331e33d2 KW |
1328 | goto unlock; |
1329 | } | |
73fa0d10 | 1330 | |
331e33d2 KW |
1331 | WARN_ON((pgsize - 1) & PAGE_MASK); |
1332 | again: | |
1ef3e2bc AW |
1333 | /* |
1334 | * vfio-iommu-type1 (v1) - User mappings were coalesced together to | |
1335 | * avoid tracking individual mappings. This means that the granularity | |
1336 | * of the original mapping was lost and the user was allowed to attempt | |
1337 | * to unmap any range. Depending on the contiguousness of physical | |
1338 | * memory and page sizes supported by the IOMMU, arbitrary unmaps may | |
1339 | * or may not have worked. We only guaranteed unmap granularity | |
1340 | * matching the original mapping; even though it was untracked here, | |
1341 | * the original mappings are reflected in IOMMU mappings. This | |
1342 | * resulted in a couple unusual behaviors. First, if a range is not | |
1343 | * able to be unmapped, ex. a set of 4k pages that was mapped as a | |
1344 | * 2M hugepage into the IOMMU, the unmap ioctl returns success but with | |
1345 | * a zero sized unmap. Also, if an unmap request overlaps the first | |
1346 | * address of a hugepage, the IOMMU will unmap the entire hugepage. | |
1347 | * This also returns success and the returned unmap size reflects the | |
1348 | * actual size unmapped. | |
1349 | * | |
1350 | * We attempt to maintain compatibility with this "v1" interface, but | |
1351 | * we take control out of the hands of the IOMMU. Therefore, an unmap | |
1352 | * request offset from the beginning of the original mapping will | |
1353 | * return success with zero sized unmap. And an unmap request covering | |
1354 | * the first iova of mapping will unmap the entire range. | |
1355 | * | |
1356 | * The v2 version of this interface intends to be more deterministic. | |
1357 | * Unmap requests must fully cover previous mappings. Multiple | |
1358 | * mappings may still be unmaped by specifying large ranges, but there | |
1359 | * must not be any previous mappings bisected by the range. An error | |
1360 | * will be returned if these conditions are not met. The v2 interface | |
1361 | * will only return success and a size of zero if there were no | |
1362 | * mappings within the range. | |
1363 | */ | |
c1965099 | 1364 | if (iommu->v2 && !unmap_all) { |
0f53afa1 SS |
1365 | dma = vfio_find_dma(iommu, iova, 1); |
1366 | if (dma && dma->iova != iova) | |
1ef3e2bc | 1367 | goto unlock; |
0f53afa1 SS |
1368 | |
1369 | dma = vfio_find_dma(iommu, iova + size - 1, 0); | |
1370 | if (dma && dma->iova + dma->size != iova + size) | |
1ef3e2bc | 1371 | goto unlock; |
1ef3e2bc AW |
1372 | } |
1373 | ||
0f53afa1 | 1374 | ret = 0; |
c3cbab24 | 1375 | n = first_n = vfio_find_dma_first_node(iommu, iova, size); |
40ae9b80 SS |
1376 | |
1377 | while (n) { | |
1378 | dma = rb_entry(n, struct vfio_dma, node); | |
1379 | if (dma->iova >= iova + size) | |
1380 | break; | |
1381 | ||
0f53afa1 | 1382 | if (!iommu->v2 && iova > dma->iova) |
166fd7d9 | 1383 | break; |
8f0d5bb9 KW |
1384 | /* |
1385 | * Task with same address space who mapped this iova range is | |
1386 | * allowed to unmap the iova range. | |
1387 | */ | |
1388 | if (dma->task->mm != current->mm) | |
1389 | break; | |
c086de81 | 1390 | |
c3cbab24 SS |
1391 | if (invalidate_vaddr) { |
1392 | if (dma->vaddr_invalid) { | |
1393 | struct rb_node *last_n = n; | |
1394 | ||
1395 | for (n = first_n; n != last_n; n = rb_next(n)) { | |
1396 | dma = rb_entry(n, | |
1397 | struct vfio_dma, node); | |
1398 | dma->vaddr_invalid = false; | |
1399 | iommu->vaddr_invalid_count--; | |
1400 | } | |
1401 | ret = -EINVAL; | |
1402 | unmapped = 0; | |
1403 | break; | |
1404 | } | |
1405 | dma->vaddr_invalid = true; | |
1406 | iommu->vaddr_invalid_count++; | |
1407 | unmapped += dma->size; | |
1408 | n = rb_next(n); | |
1409 | continue; | |
1410 | } | |
1411 | ||
c086de81 KW |
1412 | if (!RB_EMPTY_ROOT(&dma->pfn_list)) { |
1413 | struct vfio_iommu_type1_dma_unmap nb_unmap; | |
1414 | ||
1415 | if (dma_last == dma) { | |
1416 | BUG_ON(++retries > 10); | |
1417 | } else { | |
1418 | dma_last = dma; | |
1419 | retries = 0; | |
1420 | } | |
1421 | ||
1422 | nb_unmap.iova = dma->iova; | |
1423 | nb_unmap.size = dma->size; | |
1424 | ||
1425 | /* | |
1426 | * Notify anyone (mdev vendor drivers) to invalidate and | |
1427 | * unmap iovas within the range we're about to unmap. | |
1428 | * Vendor drivers MUST unpin pages in response to an | |
1429 | * invalidation. | |
1430 | */ | |
1431 | mutex_unlock(&iommu->lock); | |
1432 | blocking_notifier_call_chain(&iommu->notifier, | |
1433 | VFIO_IOMMU_NOTIFY_DMA_UNMAP, | |
1434 | &nb_unmap); | |
cade075f | 1435 | mutex_lock(&iommu->lock); |
c086de81 KW |
1436 | goto again; |
1437 | } | |
331e33d2 KW |
1438 | |
1439 | if (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) { | |
95fc87b4 | 1440 | ret = update_user_bitmap(bitmap->data, iommu, dma, |
0f53afa1 | 1441 | iova, pgsize); |
331e33d2 KW |
1442 | if (ret) |
1443 | break; | |
1444 | } | |
1445 | ||
1ef3e2bc | 1446 | unmapped += dma->size; |
40ae9b80 | 1447 | n = rb_next(n); |
1ef3e2bc | 1448 | vfio_remove_dma(iommu, dma); |
166fd7d9 | 1449 | } |
cd9b2268 | 1450 | |
1ef3e2bc | 1451 | unlock: |
73fa0d10 | 1452 | mutex_unlock(&iommu->lock); |
166fd7d9 | 1453 | |
1ef3e2bc | 1454 | /* Report how much was unmapped */ |
166fd7d9 AW |
1455 | unmap->size = unmapped; |
1456 | ||
1457 | return ret; | |
1458 | } | |
1459 | ||
1ef3e2bc AW |
1460 | static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, |
1461 | unsigned long pfn, long npage, int prot) | |
1462 | { | |
1463 | struct vfio_domain *d; | |
1464 | int ret; | |
1465 | ||
1466 | list_for_each_entry(d, &iommu->domain_list, next) { | |
1467 | ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, | |
1468 | npage << PAGE_SHIFT, prot | d->prot); | |
7a30423a JR |
1469 | if (ret) |
1470 | goto unwind; | |
c5e66887 AW |
1471 | |
1472 | cond_resched(); | |
1ef3e2bc AW |
1473 | } |
1474 | ||
1475 | return 0; | |
1476 | ||
1477 | unwind: | |
e1907d67 | 1478 | list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) { |
1ef3e2bc | 1479 | iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); |
e1907d67 XZ |
1480 | cond_resched(); |
1481 | } | |
166fd7d9 | 1482 | |
cd9b2268 | 1483 | return ret; |
73fa0d10 AW |
1484 | } |
1485 | ||
8f0d5bb9 KW |
1486 | static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, |
1487 | size_t map_size) | |
1488 | { | |
1489 | dma_addr_t iova = dma->iova; | |
1490 | unsigned long vaddr = dma->vaddr; | |
4b6c33b3 | 1491 | struct vfio_batch batch; |
8f0d5bb9 KW |
1492 | size_t size = map_size; |
1493 | long npage; | |
7cb671e7 | 1494 | unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
8f0d5bb9 KW |
1495 | int ret = 0; |
1496 | ||
4b6c33b3 DJ |
1497 | vfio_batch_init(&batch); |
1498 | ||
8f0d5bb9 KW |
1499 | while (size) { |
1500 | /* Pin a contiguous chunk of memory */ | |
1501 | npage = vfio_pin_pages_remote(dma, vaddr + dma->size, | |
4b6c33b3 DJ |
1502 | size >> PAGE_SHIFT, &pfn, limit, |
1503 | &batch); | |
8f0d5bb9 KW |
1504 | if (npage <= 0) { |
1505 | WARN_ON(!npage); | |
1506 | ret = (int)npage; | |
1507 | break; | |
1508 | } | |
1509 | ||
1510 | /* Map it! */ | |
1511 | ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, | |
1512 | dma->prot); | |
1513 | if (ret) { | |
a54eb550 KW |
1514 | vfio_unpin_pages_remote(dma, iova + dma->size, pfn, |
1515 | npage, true); | |
4d83de6d | 1516 | vfio_batch_unpin(&batch, dma); |
8f0d5bb9 KW |
1517 | break; |
1518 | } | |
1519 | ||
1520 | size -= npage << PAGE_SHIFT; | |
1521 | dma->size += npage << PAGE_SHIFT; | |
1522 | } | |
1523 | ||
4b6c33b3 | 1524 | vfio_batch_fini(&batch); |
a54eb550 KW |
1525 | dma->iommu_mapped = true; |
1526 | ||
8f0d5bb9 KW |
1527 | if (ret) |
1528 | vfio_remove_dma(iommu, dma); | |
1529 | ||
1530 | return ret; | |
1531 | } | |
1532 | ||
9b77e5c7 SK |
1533 | /* |
1534 | * Check dma map request is within a valid iova range | |
1535 | */ | |
1536 | static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu, | |
1537 | dma_addr_t start, dma_addr_t end) | |
1538 | { | |
1539 | struct list_head *iova = &iommu->iova_list; | |
1540 | struct vfio_iova *node; | |
1541 | ||
1542 | list_for_each_entry(node, iova, list) { | |
1543 | if (start >= node->start && end <= node->end) | |
1544 | return true; | |
1545 | } | |
1546 | ||
1547 | /* | |
1548 | * Check for list_empty() as well since a container with | |
1549 | * a single mdev device will have an empty list. | |
1550 | */ | |
1551 | return list_empty(iova); | |
1552 | } | |
1553 | ||
73fa0d10 AW |
1554 | static int vfio_dma_do_map(struct vfio_iommu *iommu, |
1555 | struct vfio_iommu_type1_dma_map *map) | |
1556 | { | |
c3cbab24 | 1557 | bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR; |
c8dbca16 | 1558 | dma_addr_t iova = map->iova; |
166fd7d9 | 1559 | unsigned long vaddr = map->vaddr; |
73fa0d10 AW |
1560 | size_t size = map->size; |
1561 | int ret = 0, prot = 0; | |
d6a4c185 | 1562 | size_t pgsize; |
1ef3e2bc | 1563 | struct vfio_dma *dma; |
166fd7d9 | 1564 | |
c8dbca16 AW |
1565 | /* Verify that none of our __u64 fields overflow */ |
1566 | if (map->size != size || map->vaddr != vaddr || map->iova != iova) | |
1567 | return -EINVAL; | |
73fa0d10 | 1568 | |
73fa0d10 AW |
1569 | /* READ/WRITE from device perspective */ |
1570 | if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) | |
1571 | prot |= IOMMU_WRITE; | |
1572 | if (map->flags & VFIO_DMA_MAP_FLAG_READ) | |
1573 | prot |= IOMMU_READ; | |
1574 | ||
c3cbab24 SS |
1575 | if ((prot && set_vaddr) || (!prot && !set_vaddr)) |
1576 | return -EINVAL; | |
1577 | ||
cade075f | 1578 | mutex_lock(&iommu->lock); |
73fa0d10 | 1579 | |
d6a4c185 | 1580 | pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); |
73fa0d10 | 1581 | |
d6a4c185 | 1582 | WARN_ON((pgsize - 1) & PAGE_MASK); |
cade075f | 1583 | |
c3cbab24 | 1584 | if (!size || (size | iova | vaddr) & (pgsize - 1)) { |
cade075f KW |
1585 | ret = -EINVAL; |
1586 | goto out_unlock; | |
1587 | } | |
1588 | ||
1589 | /* Don't allow IOVA or virtual address wrap */ | |
1590 | if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) { | |
1591 | ret = -EINVAL; | |
1592 | goto out_unlock; | |
1593 | } | |
73fa0d10 | 1594 | |
c3cbab24 SS |
1595 | dma = vfio_find_dma(iommu, iova, size); |
1596 | if (set_vaddr) { | |
1597 | if (!dma) { | |
1598 | ret = -ENOENT; | |
1599 | } else if (!dma->vaddr_invalid || dma->iova != iova || | |
1600 | dma->size != size) { | |
1601 | ret = -EINVAL; | |
1602 | } else { | |
1603 | dma->vaddr = vaddr; | |
1604 | dma->vaddr_invalid = false; | |
1605 | iommu->vaddr_invalid_count--; | |
898b9eae | 1606 | wake_up_all(&iommu->vaddr_wait); |
c3cbab24 SS |
1607 | } |
1608 | goto out_unlock; | |
1609 | } else if (dma) { | |
8f0d5bb9 KW |
1610 | ret = -EEXIST; |
1611 | goto out_unlock; | |
73fa0d10 AW |
1612 | } |
1613 | ||
49285593 AW |
1614 | if (!iommu->dma_avail) { |
1615 | ret = -ENOSPC; | |
1616 | goto out_unlock; | |
1617 | } | |
1618 | ||
9b77e5c7 SK |
1619 | if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) { |
1620 | ret = -EINVAL; | |
1621 | goto out_unlock; | |
1622 | } | |
1623 | ||
1ef3e2bc AW |
1624 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); |
1625 | if (!dma) { | |
8f0d5bb9 KW |
1626 | ret = -ENOMEM; |
1627 | goto out_unlock; | |
1ef3e2bc AW |
1628 | } |
1629 | ||
49285593 | 1630 | iommu->dma_avail--; |
c8dbca16 AW |
1631 | dma->iova = iova; |
1632 | dma->vaddr = vaddr; | |
1ef3e2bc | 1633 | dma->prot = prot; |
48d8476b AW |
1634 | |
1635 | /* | |
1636 | * We need to be able to both add to a task's locked memory and test | |
1637 | * against the locked memory limit and we need to be able to do both | |
1638 | * outside of this call path as pinning can be asynchronous via the | |
1639 | * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a | |
1640 | * task_struct and VM locked pages requires an mm_struct, however | |
1641 | * holding an indefinite mm reference is not recommended, therefore we | |
1642 | * only hold a reference to a task. We could hold a reference to | |
1643 | * current, however QEMU uses this call path through vCPU threads, | |
1644 | * which can be killed resulting in a NULL mm and failure in the unmap | |
1645 | * path when called via a different thread. Avoid this problem by | |
1646 | * using the group_leader as threads within the same group require | |
1647 | * both CLONE_THREAD and CLONE_VM and will therefore use the same | |
1648 | * mm_struct. | |
1649 | * | |
1650 | * Previously we also used the task for testing CAP_IPC_LOCK at the | |
1651 | * time of pinning and accounting, however has_capability() makes use | |
1652 | * of real_cred, a copy-on-write field, so we can't guarantee that it | |
1653 | * matches group_leader, or in fact that it might not change by the | |
1654 | * time it's evaluated. If a process were to call MAP_DMA with | |
1655 | * CAP_IPC_LOCK but later drop it, it doesn't make sense that they | |
1656 | * possibly see different results for an iommu_mapped vfio_dma vs | |
1657 | * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the | |
1658 | * time of calling MAP_DMA. | |
1659 | */ | |
1660 | get_task_struct(current->group_leader); | |
1661 | dma->task = current->group_leader; | |
1662 | dma->lock_cap = capable(CAP_IPC_LOCK); | |
1663 | ||
a54eb550 | 1664 | dma->pfn_list = RB_ROOT; |
166fd7d9 | 1665 | |
1ef3e2bc AW |
1666 | /* Insert zero-sized and grow as we map chunks of it */ |
1667 | vfio_link_dma(iommu, dma); | |
166fd7d9 | 1668 | |
a54eb550 KW |
1669 | /* Don't pin and map if container doesn't contain IOMMU capable domain*/ |
1670 | if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) | |
1671 | dma->size = size; | |
1672 | else | |
1673 | ret = vfio_pin_map_dma(iommu, dma, size); | |
1674 | ||
d6a4c185 KW |
1675 | if (!ret && iommu->dirty_page_tracking) { |
1676 | ret = vfio_dma_bitmap_alloc(dma, pgsize); | |
1677 | if (ret) | |
1678 | vfio_remove_dma(iommu, dma); | |
1679 | } | |
1680 | ||
8f0d5bb9 | 1681 | out_unlock: |
1ef3e2bc AW |
1682 | mutex_unlock(&iommu->lock); |
1683 | return ret; | |
1684 | } | |
1685 | ||
1686 | static int vfio_bus_type(struct device *dev, void *data) | |
1687 | { | |
1688 | struct bus_type **bus = data; | |
1689 | ||
1690 | if (*bus && *bus != dev->bus) | |
1691 | return -EINVAL; | |
1692 | ||
1693 | *bus = dev->bus; | |
1694 | ||
1695 | return 0; | |
1696 | } | |
1697 | ||
1698 | static int vfio_iommu_replay(struct vfio_iommu *iommu, | |
1699 | struct vfio_domain *domain) | |
1700 | { | |
4b6c33b3 | 1701 | struct vfio_batch batch; |
aae7a75a | 1702 | struct vfio_domain *d = NULL; |
1ef3e2bc | 1703 | struct rb_node *n; |
7cb671e7 | 1704 | unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
1ef3e2bc AW |
1705 | int ret; |
1706 | ||
898b9eae SS |
1707 | ret = vfio_wait_all_valid(iommu); |
1708 | if (ret < 0) | |
1709 | return ret; | |
1710 | ||
1ef3e2bc | 1711 | /* Arbitrarily pick the first domain in the list for lookups */ |
aae7a75a AW |
1712 | if (!list_empty(&iommu->domain_list)) |
1713 | d = list_first_entry(&iommu->domain_list, | |
1714 | struct vfio_domain, next); | |
1715 | ||
4b6c33b3 DJ |
1716 | vfio_batch_init(&batch); |
1717 | ||
1ef3e2bc AW |
1718 | n = rb_first(&iommu->dma_list); |
1719 | ||
1ef3e2bc AW |
1720 | for (; n; n = rb_next(n)) { |
1721 | struct vfio_dma *dma; | |
1722 | dma_addr_t iova; | |
1723 | ||
1724 | dma = rb_entry(n, struct vfio_dma, node); | |
1725 | iova = dma->iova; | |
1726 | ||
1727 | while (iova < dma->iova + dma->size) { | |
a54eb550 | 1728 | phys_addr_t phys; |
1ef3e2bc | 1729 | size_t size; |
73fa0d10 | 1730 | |
a54eb550 KW |
1731 | if (dma->iommu_mapped) { |
1732 | phys_addr_t p; | |
1733 | dma_addr_t i; | |
1734 | ||
aae7a75a AW |
1735 | if (WARN_ON(!d)) { /* mapped w/o a domain?! */ |
1736 | ret = -EINVAL; | |
1737 | goto unwind; | |
1738 | } | |
1739 | ||
a54eb550 KW |
1740 | phys = iommu_iova_to_phys(d->domain, iova); |
1741 | ||
1742 | if (WARN_ON(!phys)) { | |
1743 | iova += PAGE_SIZE; | |
1744 | continue; | |
1745 | } | |
1746 | ||
1747 | size = PAGE_SIZE; | |
1748 | p = phys + size; | |
1749 | i = iova + size; | |
1750 | while (i < dma->iova + dma->size && | |
1751 | p == iommu_iova_to_phys(d->domain, i)) { | |
1752 | size += PAGE_SIZE; | |
1753 | p += PAGE_SIZE; | |
1754 | i += PAGE_SIZE; | |
1755 | } | |
1756 | } else { | |
1757 | unsigned long pfn; | |
1758 | unsigned long vaddr = dma->vaddr + | |
1759 | (iova - dma->iova); | |
1760 | size_t n = dma->iova + dma->size - iova; | |
1761 | long npage; | |
1762 | ||
1763 | npage = vfio_pin_pages_remote(dma, vaddr, | |
1764 | n >> PAGE_SHIFT, | |
4b6c33b3 DJ |
1765 | &pfn, limit, |
1766 | &batch); | |
a54eb550 KW |
1767 | if (npage <= 0) { |
1768 | WARN_ON(!npage); | |
1769 | ret = (int)npage; | |
aae7a75a | 1770 | goto unwind; |
a54eb550 KW |
1771 | } |
1772 | ||
1773 | phys = pfn << PAGE_SHIFT; | |
1774 | size = npage << PAGE_SHIFT; | |
166fd7d9 AW |
1775 | } |
1776 | ||
1ef3e2bc AW |
1777 | ret = iommu_map(domain->domain, iova, phys, |
1778 | size, dma->prot | domain->prot); | |
aae7a75a | 1779 | if (ret) { |
4d83de6d | 1780 | if (!dma->iommu_mapped) { |
aae7a75a AW |
1781 | vfio_unpin_pages_remote(dma, iova, |
1782 | phys >> PAGE_SHIFT, | |
1783 | size >> PAGE_SHIFT, | |
1784 | true); | |
4d83de6d DJ |
1785 | vfio_batch_unpin(&batch, dma); |
1786 | } | |
aae7a75a AW |
1787 | goto unwind; |
1788 | } | |
d93b3ac0 | 1789 | |
1ef3e2bc AW |
1790 | iova += size; |
1791 | } | |
aae7a75a AW |
1792 | } |
1793 | ||
1794 | /* All dmas are now mapped, defer to second tree walk for unwind */ | |
1795 | for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { | |
1796 | struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); | |
1797 | ||
a54eb550 | 1798 | dma->iommu_mapped = true; |
166fd7d9 | 1799 | } |
aae7a75a | 1800 | |
4b6c33b3 | 1801 | vfio_batch_fini(&batch); |
1ef3e2bc | 1802 | return 0; |
aae7a75a AW |
1803 | |
1804 | unwind: | |
1805 | for (; n; n = rb_prev(n)) { | |
1806 | struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); | |
1807 | dma_addr_t iova; | |
1808 | ||
1809 | if (dma->iommu_mapped) { | |
1810 | iommu_unmap(domain->domain, dma->iova, dma->size); | |
1811 | continue; | |
1812 | } | |
1813 | ||
1814 | iova = dma->iova; | |
1815 | while (iova < dma->iova + dma->size) { | |
1816 | phys_addr_t phys, p; | |
1817 | size_t size; | |
1818 | dma_addr_t i; | |
1819 | ||
1820 | phys = iommu_iova_to_phys(domain->domain, iova); | |
1821 | if (!phys) { | |
1822 | iova += PAGE_SIZE; | |
1823 | continue; | |
1824 | } | |
1825 | ||
1826 | size = PAGE_SIZE; | |
1827 | p = phys + size; | |
1828 | i = iova + size; | |
1829 | while (i < dma->iova + dma->size && | |
1830 | p == iommu_iova_to_phys(domain->domain, i)) { | |
1831 | size += PAGE_SIZE; | |
1832 | p += PAGE_SIZE; | |
1833 | i += PAGE_SIZE; | |
1834 | } | |
1835 | ||
1836 | iommu_unmap(domain->domain, iova, size); | |
1837 | vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, | |
1838 | size >> PAGE_SHIFT, true); | |
1839 | } | |
1840 | } | |
1841 | ||
4b6c33b3 | 1842 | vfio_batch_fini(&batch); |
aae7a75a | 1843 | return ret; |
73fa0d10 AW |
1844 | } |
1845 | ||
6fe1010d AW |
1846 | /* |
1847 | * We change our unmap behavior slightly depending on whether the IOMMU | |
1848 | * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage | |
1849 | * for practically any contiguous power-of-two mapping we give it. This means | |
1850 | * we don't need to look for contiguous chunks ourselves to make unmapping | |
1851 | * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d | |
1852 | * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks | |
1853 | * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when | |
1854 | * hugetlbfs is in use. | |
1855 | */ | |
1856 | static void vfio_test_domain_fgsp(struct vfio_domain *domain) | |
1857 | { | |
1858 | struct page *pages; | |
1859 | int ret, order = get_order(PAGE_SIZE * 2); | |
1860 | ||
1861 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); | |
1862 | if (!pages) | |
1863 | return; | |
1864 | ||
1865 | ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, | |
1866 | IOMMU_READ | IOMMU_WRITE | domain->prot); | |
1867 | if (!ret) { | |
1868 | size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); | |
1869 | ||
1870 | if (unmapped == PAGE_SIZE) | |
1871 | iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE); | |
1872 | else | |
1873 | domain->fgsp = true; | |
1874 | } | |
1875 | ||
1876 | __free_pages(pages, order); | |
1877 | } | |
1878 | ||
c7396f2e MG |
1879 | static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain, |
1880 | struct iommu_group *iommu_group) | |
7896c998 | 1881 | { |
c7396f2e | 1882 | struct vfio_iommu_group *g; |
7896c998 KW |
1883 | |
1884 | list_for_each_entry(g, &domain->group_list, next) { | |
1885 | if (g->iommu_group == iommu_group) | |
1886 | return g; | |
1887 | } | |
1888 | ||
1889 | return NULL; | |
1890 | } | |
1891 | ||
c7396f2e MG |
1892 | static struct vfio_iommu_group* |
1893 | vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, | |
1894 | struct iommu_group *iommu_group) | |
95fc87b4 KW |
1895 | { |
1896 | struct vfio_domain *domain; | |
c7396f2e | 1897 | struct vfio_iommu_group *group = NULL; |
95fc87b4 KW |
1898 | |
1899 | list_for_each_entry(domain, &iommu->domain_list, next) { | |
1900 | group = find_iommu_group(domain, iommu_group); | |
1901 | if (group) | |
1902 | return group; | |
1903 | } | |
1904 | ||
1905 | if (iommu->external_domain) | |
1906 | group = find_iommu_group(iommu->external_domain, iommu_group); | |
1907 | ||
1908 | return group; | |
1909 | } | |
1910 | ||
b09d6e47 SK |
1911 | static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions, |
1912 | phys_addr_t *base) | |
5d704992 | 1913 | { |
b09d6e47 | 1914 | struct iommu_resv_region *region; |
5d704992 EA |
1915 | bool ret = false; |
1916 | ||
b09d6e47 | 1917 | list_for_each_entry(region, group_resv_regions, list) { |
f203f7f1 RM |
1918 | /* |
1919 | * The presence of any 'real' MSI regions should take | |
1920 | * precedence over the software-managed one if the | |
1921 | * IOMMU driver happens to advertise both types. | |
1922 | */ | |
1923 | if (region->type == IOMMU_RESV_MSI) { | |
1924 | ret = false; | |
1925 | break; | |
1926 | } | |
1927 | ||
9d3a4de4 | 1928 | if (region->type == IOMMU_RESV_SW_MSI) { |
5d704992 EA |
1929 | *base = region->start; |
1930 | ret = true; | |
5d704992 EA |
1931 | } |
1932 | } | |
b09d6e47 | 1933 | |
5d704992 EA |
1934 | return ret; |
1935 | } | |
1936 | ||
7bd50f0c LB |
1937 | static int vfio_mdev_attach_domain(struct device *dev, void *data) |
1938 | { | |
2a3d15f2 | 1939 | struct mdev_device *mdev = to_mdev_device(dev); |
7bd50f0c LB |
1940 | struct iommu_domain *domain = data; |
1941 | struct device *iommu_device; | |
1942 | ||
2a3d15f2 | 1943 | iommu_device = mdev_get_iommu_device(mdev); |
7bd50f0c LB |
1944 | if (iommu_device) { |
1945 | if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX)) | |
1946 | return iommu_aux_attach_device(domain, iommu_device); | |
1947 | else | |
1948 | return iommu_attach_device(domain, iommu_device); | |
1949 | } | |
1950 | ||
1951 | return -EINVAL; | |
1952 | } | |
1953 | ||
1954 | static int vfio_mdev_detach_domain(struct device *dev, void *data) | |
1955 | { | |
2a3d15f2 | 1956 | struct mdev_device *mdev = to_mdev_device(dev); |
7bd50f0c LB |
1957 | struct iommu_domain *domain = data; |
1958 | struct device *iommu_device; | |
1959 | ||
2a3d15f2 | 1960 | iommu_device = mdev_get_iommu_device(mdev); |
7bd50f0c LB |
1961 | if (iommu_device) { |
1962 | if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX)) | |
1963 | iommu_aux_detach_device(domain, iommu_device); | |
1964 | else | |
1965 | iommu_detach_device(domain, iommu_device); | |
1966 | } | |
1967 | ||
1968 | return 0; | |
1969 | } | |
1970 | ||
1971 | static int vfio_iommu_attach_group(struct vfio_domain *domain, | |
c7396f2e | 1972 | struct vfio_iommu_group *group) |
7bd50f0c LB |
1973 | { |
1974 | if (group->mdev_group) | |
1975 | return iommu_group_for_each_dev(group->iommu_group, | |
1976 | domain->domain, | |
1977 | vfio_mdev_attach_domain); | |
1978 | else | |
1979 | return iommu_attach_group(domain->domain, group->iommu_group); | |
1980 | } | |
1981 | ||
1982 | static void vfio_iommu_detach_group(struct vfio_domain *domain, | |
c7396f2e | 1983 | struct vfio_iommu_group *group) |
7bd50f0c LB |
1984 | { |
1985 | if (group->mdev_group) | |
1986 | iommu_group_for_each_dev(group->iommu_group, domain->domain, | |
1987 | vfio_mdev_detach_domain); | |
1988 | else | |
1989 | iommu_detach_group(domain->domain, group->iommu_group); | |
1990 | } | |
1991 | ||
be068fa2 LB |
1992 | static bool vfio_bus_is_mdev(struct bus_type *bus) |
1993 | { | |
1994 | struct bus_type *mdev_bus; | |
1995 | bool ret = false; | |
1996 | ||
1997 | mdev_bus = symbol_get(mdev_bus_type); | |
1998 | if (mdev_bus) { | |
1999 | ret = (bus == mdev_bus); | |
2000 | symbol_put(mdev_bus_type); | |
2001 | } | |
2002 | ||
2003 | return ret; | |
2004 | } | |
2005 | ||
2006 | static int vfio_mdev_iommu_device(struct device *dev, void *data) | |
2007 | { | |
2a3d15f2 | 2008 | struct mdev_device *mdev = to_mdev_device(dev); |
be068fa2 LB |
2009 | struct device **old = data, *new; |
2010 | ||
2a3d15f2 | 2011 | new = mdev_get_iommu_device(mdev); |
be068fa2 LB |
2012 | if (!new || (*old && *old != new)) |
2013 | return -EINVAL; | |
2014 | ||
2015 | *old = new; | |
2016 | ||
2017 | return 0; | |
2018 | } | |
2019 | ||
1108696a SK |
2020 | /* |
2021 | * This is a helper function to insert an address range to iova list. | |
2022 | * The list is initially created with a single entry corresponding to | |
2023 | * the IOMMU domain geometry to which the device group is attached. | |
2024 | * The list aperture gets modified when a new domain is added to the | |
2025 | * container if the new aperture doesn't conflict with the current one | |
2026 | * or with any existing dma mappings. The list is also modified to | |
2027 | * exclude any reserved regions associated with the device group. | |
2028 | */ | |
2029 | static int vfio_iommu_iova_insert(struct list_head *head, | |
2030 | dma_addr_t start, dma_addr_t end) | |
2031 | { | |
2032 | struct vfio_iova *region; | |
2033 | ||
2034 | region = kmalloc(sizeof(*region), GFP_KERNEL); | |
2035 | if (!region) | |
2036 | return -ENOMEM; | |
2037 | ||
2038 | INIT_LIST_HEAD(®ion->list); | |
2039 | region->start = start; | |
2040 | region->end = end; | |
2041 | ||
2042 | list_add_tail(®ion->list, head); | |
2043 | return 0; | |
2044 | } | |
2045 | ||
2046 | /* | |
2047 | * Check the new iommu aperture conflicts with existing aper or with any | |
2048 | * existing dma mappings. | |
2049 | */ | |
2050 | static bool vfio_iommu_aper_conflict(struct vfio_iommu *iommu, | |
2051 | dma_addr_t start, dma_addr_t end) | |
2052 | { | |
2053 | struct vfio_iova *first, *last; | |
2054 | struct list_head *iova = &iommu->iova_list; | |
2055 | ||
2056 | if (list_empty(iova)) | |
2057 | return false; | |
2058 | ||
2059 | /* Disjoint sets, return conflict */ | |
2060 | first = list_first_entry(iova, struct vfio_iova, list); | |
2061 | last = list_last_entry(iova, struct vfio_iova, list); | |
2062 | if (start > last->end || end < first->start) | |
2063 | return true; | |
2064 | ||
2065 | /* Check for any existing dma mappings below the new start */ | |
2066 | if (start > first->start) { | |
2067 | if (vfio_find_dma(iommu, first->start, start - first->start)) | |
2068 | return true; | |
2069 | } | |
2070 | ||
2071 | /* Check for any existing dma mappings beyond the new end */ | |
2072 | if (end < last->end) { | |
2073 | if (vfio_find_dma(iommu, end + 1, last->end - end)) | |
2074 | return true; | |
2075 | } | |
2076 | ||
2077 | return false; | |
2078 | } | |
2079 | ||
2080 | /* | |
2081 | * Resize iommu iova aperture window. This is called only if the new | |
2082 | * aperture has no conflict with existing aperture and dma mappings. | |
2083 | */ | |
2084 | static int vfio_iommu_aper_resize(struct list_head *iova, | |
2085 | dma_addr_t start, dma_addr_t end) | |
2086 | { | |
2087 | struct vfio_iova *node, *next; | |
2088 | ||
2089 | if (list_empty(iova)) | |
2090 | return vfio_iommu_iova_insert(iova, start, end); | |
2091 | ||
2092 | /* Adjust iova list start */ | |
2093 | list_for_each_entry_safe(node, next, iova, list) { | |
2094 | if (start < node->start) | |
2095 | break; | |
2096 | if (start >= node->start && start < node->end) { | |
2097 | node->start = start; | |
2098 | break; | |
2099 | } | |
2100 | /* Delete nodes before new start */ | |
2101 | list_del(&node->list); | |
2102 | kfree(node); | |
2103 | } | |
2104 | ||
2105 | /* Adjust iova list end */ | |
2106 | list_for_each_entry_safe(node, next, iova, list) { | |
2107 | if (end > node->end) | |
2108 | continue; | |
2109 | if (end > node->start && end <= node->end) { | |
2110 | node->end = end; | |
2111 | continue; | |
2112 | } | |
2113 | /* Delete nodes after new end */ | |
2114 | list_del(&node->list); | |
2115 | kfree(node); | |
2116 | } | |
2117 | ||
2118 | return 0; | |
2119 | } | |
2120 | ||
af029169 SK |
2121 | /* |
2122 | * Check reserved region conflicts with existing dma mappings | |
2123 | */ | |
2124 | static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu, | |
2125 | struct list_head *resv_regions) | |
2126 | { | |
2127 | struct iommu_resv_region *region; | |
2128 | ||
2129 | /* Check for conflict with existing dma mappings */ | |
2130 | list_for_each_entry(region, resv_regions, list) { | |
2131 | if (region->type == IOMMU_RESV_DIRECT_RELAXABLE) | |
2132 | continue; | |
2133 | ||
2134 | if (vfio_find_dma(iommu, region->start, region->length)) | |
2135 | return true; | |
2136 | } | |
2137 | ||
2138 | return false; | |
2139 | } | |
2140 | ||
2141 | /* | |
2142 | * Check iova region overlap with reserved regions and | |
2143 | * exclude them from the iommu iova range | |
2144 | */ | |
2145 | static int vfio_iommu_resv_exclude(struct list_head *iova, | |
2146 | struct list_head *resv_regions) | |
2147 | { | |
2148 | struct iommu_resv_region *resv; | |
2149 | struct vfio_iova *n, *next; | |
2150 | ||
2151 | list_for_each_entry(resv, resv_regions, list) { | |
2152 | phys_addr_t start, end; | |
2153 | ||
2154 | if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE) | |
2155 | continue; | |
2156 | ||
2157 | start = resv->start; | |
2158 | end = resv->start + resv->length - 1; | |
2159 | ||
2160 | list_for_each_entry_safe(n, next, iova, list) { | |
2161 | int ret = 0; | |
2162 | ||
2163 | /* No overlap */ | |
2164 | if (start > n->end || end < n->start) | |
2165 | continue; | |
2166 | /* | |
2167 | * Insert a new node if current node overlaps with the | |
06d738c8 | 2168 | * reserve region to exclude that from valid iova range. |
af029169 SK |
2169 | * Note that, new node is inserted before the current |
2170 | * node and finally the current node is deleted keeping | |
2171 | * the list updated and sorted. | |
2172 | */ | |
2173 | if (start > n->start) | |
2174 | ret = vfio_iommu_iova_insert(&n->list, n->start, | |
2175 | start - 1); | |
2176 | if (!ret && end < n->end) | |
2177 | ret = vfio_iommu_iova_insert(&n->list, end + 1, | |
2178 | n->end); | |
2179 | if (ret) | |
2180 | return ret; | |
2181 | ||
2182 | list_del(&n->list); | |
2183 | kfree(n); | |
2184 | } | |
2185 | } | |
2186 | ||
2187 | if (list_empty(iova)) | |
2188 | return -EINVAL; | |
2189 | ||
2190 | return 0; | |
2191 | } | |
2192 | ||
2193 | static void vfio_iommu_resv_free(struct list_head *resv_regions) | |
2194 | { | |
2195 | struct iommu_resv_region *n, *next; | |
2196 | ||
2197 | list_for_each_entry_safe(n, next, resv_regions, list) { | |
2198 | list_del(&n->list); | |
2199 | kfree(n); | |
2200 | } | |
2201 | } | |
2202 | ||
1108696a SK |
2203 | static void vfio_iommu_iova_free(struct list_head *iova) |
2204 | { | |
2205 | struct vfio_iova *n, *next; | |
2206 | ||
2207 | list_for_each_entry_safe(n, next, iova, list) { | |
2208 | list_del(&n->list); | |
2209 | kfree(n); | |
2210 | } | |
2211 | } | |
2212 | ||
2213 | static int vfio_iommu_iova_get_copy(struct vfio_iommu *iommu, | |
2214 | struct list_head *iova_copy) | |
2215 | { | |
2216 | struct list_head *iova = &iommu->iova_list; | |
2217 | struct vfio_iova *n; | |
2218 | int ret; | |
2219 | ||
2220 | list_for_each_entry(n, iova, list) { | |
2221 | ret = vfio_iommu_iova_insert(iova_copy, n->start, n->end); | |
2222 | if (ret) | |
2223 | goto out_free; | |
2224 | } | |
2225 | ||
2226 | return 0; | |
2227 | ||
2228 | out_free: | |
2229 | vfio_iommu_iova_free(iova_copy); | |
2230 | return ret; | |
2231 | } | |
2232 | ||
2233 | static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu, | |
2234 | struct list_head *iova_copy) | |
2235 | { | |
2236 | struct list_head *iova = &iommu->iova_list; | |
2237 | ||
2238 | vfio_iommu_iova_free(iova); | |
2239 | ||
2240 | list_splice_tail(iova_copy, iova); | |
2241 | } | |
572f64c7 | 2242 | |
73fa0d10 AW |
2243 | static int vfio_iommu_type1_attach_group(void *iommu_data, |
2244 | struct iommu_group *iommu_group) | |
2245 | { | |
2246 | struct vfio_iommu *iommu = iommu_data; | |
c7396f2e | 2247 | struct vfio_iommu_group *group; |
1ef3e2bc | 2248 | struct vfio_domain *domain, *d; |
be068fa2 | 2249 | struct bus_type *bus = NULL; |
73fa0d10 | 2250 | int ret; |
9d72f87b | 2251 | bool resv_msi, msi_remap; |
95f89e09 | 2252 | phys_addr_t resv_msi_base = 0; |
bc9a05ee | 2253 | struct iommu_domain_geometry *geo; |
1108696a | 2254 | LIST_HEAD(iova_copy); |
af029169 | 2255 | LIST_HEAD(group_resv_regions); |
73fa0d10 | 2256 | |
73fa0d10 AW |
2257 | mutex_lock(&iommu->lock); |
2258 | ||
572f64c7 ZY |
2259 | /* Check for duplicates */ |
2260 | if (vfio_iommu_find_iommu_group(iommu, iommu_group)) { | |
2261 | mutex_unlock(&iommu->lock); | |
2262 | return -EINVAL; | |
a54eb550 KW |
2263 | } |
2264 | ||
1ef3e2bc AW |
2265 | group = kzalloc(sizeof(*group), GFP_KERNEL); |
2266 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | |
2267 | if (!group || !domain) { | |
2268 | ret = -ENOMEM; | |
2269 | goto out_free; | |
2270 | } | |
2271 | ||
2272 | group->iommu_group = iommu_group; | |
2273 | ||
2274 | /* Determine bus_type in order to allocate a domain */ | |
2275 | ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type); | |
2276 | if (ret) | |
2277 | goto out_free; | |
2278 | ||
be068fa2 LB |
2279 | if (vfio_bus_is_mdev(bus)) { |
2280 | struct device *iommu_device = NULL; | |
a54eb550 | 2281 | |
be068fa2 LB |
2282 | group->mdev_group = true; |
2283 | ||
2284 | /* Determine the isolation type */ | |
2285 | ret = iommu_group_for_each_dev(iommu_group, &iommu_device, | |
2286 | vfio_mdev_iommu_device); | |
2287 | if (ret || !iommu_device) { | |
a54eb550 KW |
2288 | if (!iommu->external_domain) { |
2289 | INIT_LIST_HEAD(&domain->group_list); | |
2290 | iommu->external_domain = domain; | |
cade075f | 2291 | vfio_update_pgsize_bitmap(iommu); |
be068fa2 | 2292 | } else { |
a54eb550 | 2293 | kfree(domain); |
be068fa2 | 2294 | } |
a54eb550 KW |
2295 | |
2296 | list_add(&group->next, | |
2297 | &iommu->external_domain->group_list); | |
95fc87b4 KW |
2298 | /* |
2299 | * Non-iommu backed group cannot dirty memory directly, | |
2300 | * it can only use interfaces that provide dirty | |
2301 | * tracking. | |
2302 | * The iommu scope can only be promoted with the | |
2303 | * addition of a dirty tracking group. | |
2304 | */ | |
2305 | group->pinned_page_dirty_scope = true; | |
a54eb550 | 2306 | mutex_unlock(&iommu->lock); |
be068fa2 | 2307 | |
a54eb550 KW |
2308 | return 0; |
2309 | } | |
be068fa2 LB |
2310 | |
2311 | bus = iommu_device->bus; | |
a54eb550 KW |
2312 | } |
2313 | ||
1ef3e2bc AW |
2314 | domain->domain = iommu_domain_alloc(bus); |
2315 | if (!domain->domain) { | |
2316 | ret = -EIO; | |
2317 | goto out_free; | |
2318 | } | |
2319 | ||
f5c9eceb | 2320 | if (iommu->nesting) { |
7e147547 | 2321 | ret = iommu_enable_nesting(domain->domain); |
f5c9eceb WD |
2322 | if (ret) |
2323 | goto out_domain; | |
2324 | } | |
2325 | ||
7bd50f0c | 2326 | ret = vfio_iommu_attach_group(domain, group); |
1ef3e2bc AW |
2327 | if (ret) |
2328 | goto out_domain; | |
2329 | ||
1108696a | 2330 | /* Get aperture info */ |
bc9a05ee CH |
2331 | geo = &domain->domain->geometry; |
2332 | if (vfio_iommu_aper_conflict(iommu, geo->aperture_start, | |
2333 | geo->aperture_end)) { | |
1108696a SK |
2334 | ret = -EINVAL; |
2335 | goto out_detach; | |
2336 | } | |
2337 | ||
af029169 SK |
2338 | ret = iommu_get_group_resv_regions(iommu_group, &group_resv_regions); |
2339 | if (ret) | |
2340 | goto out_detach; | |
2341 | ||
2342 | if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) { | |
2343 | ret = -EINVAL; | |
2344 | goto out_detach; | |
2345 | } | |
2346 | ||
1108696a SK |
2347 | /* |
2348 | * We don't want to work on the original iova list as the list | |
2349 | * gets modified and in case of failure we have to retain the | |
2350 | * original list. Get a copy here. | |
2351 | */ | |
2352 | ret = vfio_iommu_iova_get_copy(iommu, &iova_copy); | |
2353 | if (ret) | |
2354 | goto out_detach; | |
2355 | ||
bc9a05ee CH |
2356 | ret = vfio_iommu_aper_resize(&iova_copy, geo->aperture_start, |
2357 | geo->aperture_end); | |
1108696a SK |
2358 | if (ret) |
2359 | goto out_detach; | |
2360 | ||
af029169 SK |
2361 | ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions); |
2362 | if (ret) | |
2363 | goto out_detach; | |
2364 | ||
b09d6e47 | 2365 | resv_msi = vfio_iommu_has_sw_msi(&group_resv_regions, &resv_msi_base); |
5d704992 | 2366 | |
1ef3e2bc AW |
2367 | INIT_LIST_HEAD(&domain->group_list); |
2368 | list_add(&group->next, &domain->group_list); | |
2369 | ||
db406cc0 RM |
2370 | msi_remap = irq_domain_check_msi_remap() || |
2371 | iommu_capable(bus, IOMMU_CAP_INTR_REMAP); | |
9d72f87b EA |
2372 | |
2373 | if (!allow_unsafe_interrupts && !msi_remap) { | |
1ef3e2bc AW |
2374 | pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", |
2375 | __func__); | |
2376 | ret = -EPERM; | |
2377 | goto out_detach; | |
2378 | } | |
2379 | ||
eb165f05 | 2380 | if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) |
1ef3e2bc AW |
2381 | domain->prot |= IOMMU_CACHE; |
2382 | ||
73fa0d10 | 2383 | /* |
1ef3e2bc AW |
2384 | * Try to match an existing compatible domain. We don't want to |
2385 | * preclude an IOMMU driver supporting multiple bus_types and being | |
2386 | * able to include different bus_types in the same IOMMU domain, so | |
2387 | * we test whether the domains use the same iommu_ops rather than | |
2388 | * testing if they're on the same bus_type. | |
73fa0d10 | 2389 | */ |
1ef3e2bc AW |
2390 | list_for_each_entry(d, &iommu->domain_list, next) { |
2391 | if (d->domain->ops == domain->domain->ops && | |
2392 | d->prot == domain->prot) { | |
7bd50f0c LB |
2393 | vfio_iommu_detach_group(domain, group); |
2394 | if (!vfio_iommu_attach_group(d, group)) { | |
1ef3e2bc AW |
2395 | list_add(&group->next, &d->group_list); |
2396 | iommu_domain_free(domain->domain); | |
2397 | kfree(domain); | |
1108696a | 2398 | goto done; |
1ef3e2bc AW |
2399 | } |
2400 | ||
7bd50f0c | 2401 | ret = vfio_iommu_attach_group(domain, group); |
1ef3e2bc AW |
2402 | if (ret) |
2403 | goto out_domain; | |
2404 | } | |
73fa0d10 AW |
2405 | } |
2406 | ||
6fe1010d AW |
2407 | vfio_test_domain_fgsp(domain); |
2408 | ||
1ef3e2bc AW |
2409 | /* replay mappings on new domains */ |
2410 | ret = vfio_iommu_replay(iommu, domain); | |
2411 | if (ret) | |
2412 | goto out_detach; | |
2413 | ||
2c9f1af5 WY |
2414 | if (resv_msi) { |
2415 | ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); | |
f44efca0 | 2416 | if (ret && ret != -ENODEV) |
2c9f1af5 WY |
2417 | goto out_detach; |
2418 | } | |
5d704992 | 2419 | |
1ef3e2bc | 2420 | list_add(&domain->next, &iommu->domain_list); |
cade075f | 2421 | vfio_update_pgsize_bitmap(iommu); |
1108696a SK |
2422 | done: |
2423 | /* Delete the old one and insert new iova list */ | |
2424 | vfio_iommu_iova_insert_copy(iommu, &iova_copy); | |
95fc87b4 KW |
2425 | |
2426 | /* | |
2427 | * An iommu backed group can dirty memory directly and therefore | |
2428 | * demotes the iommu scope until it declares itself dirty tracking | |
2429 | * capable via the page pinning interface. | |
2430 | */ | |
01032156 | 2431 | iommu->num_non_pinned_groups++; |
73fa0d10 | 2432 | mutex_unlock(&iommu->lock); |
af029169 | 2433 | vfio_iommu_resv_free(&group_resv_regions); |
73fa0d10 AW |
2434 | |
2435 | return 0; | |
1ef3e2bc AW |
2436 | |
2437 | out_detach: | |
7bd50f0c | 2438 | vfio_iommu_detach_group(domain, group); |
1ef3e2bc AW |
2439 | out_domain: |
2440 | iommu_domain_free(domain->domain); | |
1108696a | 2441 | vfio_iommu_iova_free(&iova_copy); |
af029169 | 2442 | vfio_iommu_resv_free(&group_resv_regions); |
1ef3e2bc AW |
2443 | out_free: |
2444 | kfree(domain); | |
2445 | kfree(group); | |
2446 | mutex_unlock(&iommu->lock); | |
2447 | return ret; | |
2448 | } | |
2449 | ||
2450 | static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu) | |
2451 | { | |
2452 | struct rb_node *node; | |
2453 | ||
2454 | while ((node = rb_first(&iommu->dma_list))) | |
2455 | vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); | |
73fa0d10 AW |
2456 | } |
2457 | ||
a54eb550 KW |
2458 | static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) |
2459 | { | |
2460 | struct rb_node *n, *p; | |
2461 | ||
2462 | n = rb_first(&iommu->dma_list); | |
2463 | for (; n; n = rb_next(n)) { | |
2464 | struct vfio_dma *dma; | |
2465 | long locked = 0, unlocked = 0; | |
2466 | ||
2467 | dma = rb_entry(n, struct vfio_dma, node); | |
2468 | unlocked += vfio_unmap_unpin(iommu, dma, false); | |
2469 | p = rb_first(&dma->pfn_list); | |
2470 | for (; p; p = rb_next(p)) { | |
2471 | struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, | |
2472 | node); | |
2473 | ||
2474 | if (!is_invalid_reserved_pfn(vpfn->pfn)) | |
2475 | locked++; | |
2476 | } | |
48d8476b | 2477 | vfio_lock_acct(dma, locked - unlocked, true); |
a54eb550 KW |
2478 | } |
2479 | } | |
2480 | ||
f45daadf SK |
2481 | /* |
2482 | * Called when a domain is removed in detach. It is possible that | |
2483 | * the removed domain decided the iova aperture window. Modify the | |
2484 | * iova aperture with the smallest window among existing domains. | |
2485 | */ | |
2486 | static void vfio_iommu_aper_expand(struct vfio_iommu *iommu, | |
2487 | struct list_head *iova_copy) | |
2488 | { | |
2489 | struct vfio_domain *domain; | |
f45daadf SK |
2490 | struct vfio_iova *node; |
2491 | dma_addr_t start = 0; | |
2492 | dma_addr_t end = (dma_addr_t)~0; | |
2493 | ||
2494 | if (list_empty(iova_copy)) | |
2495 | return; | |
2496 | ||
2497 | list_for_each_entry(domain, &iommu->domain_list, next) { | |
bc9a05ee CH |
2498 | struct iommu_domain_geometry *geo = &domain->domain->geometry; |
2499 | ||
2500 | if (geo->aperture_start > start) | |
2501 | start = geo->aperture_start; | |
2502 | if (geo->aperture_end < end) | |
2503 | end = geo->aperture_end; | |
f45daadf SK |
2504 | } |
2505 | ||
2506 | /* Modify aperture limits. The new aper is either same or bigger */ | |
2507 | node = list_first_entry(iova_copy, struct vfio_iova, list); | |
2508 | node->start = start; | |
2509 | node = list_last_entry(iova_copy, struct vfio_iova, list); | |
2510 | node->end = end; | |
2511 | } | |
2512 | ||
2513 | /* | |
2514 | * Called when a group is detached. The reserved regions for that | |
2515 | * group can be part of valid iova now. But since reserved regions | |
2516 | * may be duplicated among groups, populate the iova valid regions | |
2517 | * list again. | |
2518 | */ | |
2519 | static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu, | |
2520 | struct list_head *iova_copy) | |
2521 | { | |
2522 | struct vfio_domain *d; | |
c7396f2e | 2523 | struct vfio_iommu_group *g; |
f45daadf SK |
2524 | struct vfio_iova *node; |
2525 | dma_addr_t start, end; | |
2526 | LIST_HEAD(resv_regions); | |
2527 | int ret; | |
2528 | ||
2529 | if (list_empty(iova_copy)) | |
2530 | return -EINVAL; | |
2531 | ||
2532 | list_for_each_entry(d, &iommu->domain_list, next) { | |
2533 | list_for_each_entry(g, &d->group_list, next) { | |
2534 | ret = iommu_get_group_resv_regions(g->iommu_group, | |
2535 | &resv_regions); | |
2536 | if (ret) | |
2537 | goto done; | |
2538 | } | |
2539 | } | |
2540 | ||
2541 | node = list_first_entry(iova_copy, struct vfio_iova, list); | |
2542 | start = node->start; | |
2543 | node = list_last_entry(iova_copy, struct vfio_iova, list); | |
2544 | end = node->end; | |
2545 | ||
2546 | /* purge the iova list and create new one */ | |
2547 | vfio_iommu_iova_free(iova_copy); | |
2548 | ||
2549 | ret = vfio_iommu_aper_resize(iova_copy, start, end); | |
2550 | if (ret) | |
2551 | goto done; | |
2552 | ||
2553 | /* Exclude current reserved regions from iova ranges */ | |
2554 | ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions); | |
2555 | done: | |
2556 | vfio_iommu_resv_free(&resv_regions); | |
2557 | return ret; | |
2558 | } | |
2559 | ||
73fa0d10 AW |
2560 | static void vfio_iommu_type1_detach_group(void *iommu_data, |
2561 | struct iommu_group *iommu_group) | |
2562 | { | |
2563 | struct vfio_iommu *iommu = iommu_data; | |
1ef3e2bc | 2564 | struct vfio_domain *domain; |
c7396f2e | 2565 | struct vfio_iommu_group *group; |
95fc87b4 | 2566 | bool update_dirty_scope = false; |
f45daadf | 2567 | LIST_HEAD(iova_copy); |
73fa0d10 AW |
2568 | |
2569 | mutex_lock(&iommu->lock); | |
2570 | ||
a54eb550 KW |
2571 | if (iommu->external_domain) { |
2572 | group = find_iommu_group(iommu->external_domain, iommu_group); | |
2573 | if (group) { | |
95fc87b4 | 2574 | update_dirty_scope = !group->pinned_page_dirty_scope; |
a54eb550 KW |
2575 | list_del(&group->next); |
2576 | kfree(group); | |
2577 | ||
2578 | if (list_empty(&iommu->external_domain->group_list)) { | |
4a19f37a KZ |
2579 | if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) { |
2580 | WARN_ON(iommu->notifier.head); | |
a54eb550 | 2581 | vfio_iommu_unmap_unpin_all(iommu); |
4a19f37a | 2582 | } |
a54eb550 KW |
2583 | |
2584 | kfree(iommu->external_domain); | |
2585 | iommu->external_domain = NULL; | |
2586 | } | |
2587 | goto detach_group_done; | |
2588 | } | |
2589 | } | |
2590 | ||
f45daadf SK |
2591 | /* |
2592 | * Get a copy of iova list. This will be used to update | |
2593 | * and to replace the current one later. Please note that | |
2594 | * we will leave the original list as it is if update fails. | |
2595 | */ | |
2596 | vfio_iommu_iova_get_copy(iommu, &iova_copy); | |
2597 | ||
1ef3e2bc | 2598 | list_for_each_entry(domain, &iommu->domain_list, next) { |
7896c998 KW |
2599 | group = find_iommu_group(domain, iommu_group); |
2600 | if (!group) | |
2601 | continue; | |
1ef3e2bc | 2602 | |
7bd50f0c | 2603 | vfio_iommu_detach_group(domain, group); |
95fc87b4 | 2604 | update_dirty_scope = !group->pinned_page_dirty_scope; |
7896c998 KW |
2605 | list_del(&group->next); |
2606 | kfree(group); | |
2607 | /* | |
a54eb550 KW |
2608 | * Group ownership provides privilege, if the group list is |
2609 | * empty, the domain goes away. If it's the last domain with | |
2610 | * iommu and external domain doesn't exist, then all the | |
2611 | * mappings go away too. If it's the last domain with iommu and | |
2612 | * external domain exist, update accounting | |
7896c998 KW |
2613 | */ |
2614 | if (list_empty(&domain->group_list)) { | |
a54eb550 | 2615 | if (list_is_singular(&iommu->domain_list)) { |
4a19f37a KZ |
2616 | if (!iommu->external_domain) { |
2617 | WARN_ON(iommu->notifier.head); | |
a54eb550 | 2618 | vfio_iommu_unmap_unpin_all(iommu); |
4a19f37a | 2619 | } else { |
a54eb550 | 2620 | vfio_iommu_unmap_unpin_reaccount(iommu); |
4a19f37a | 2621 | } |
a54eb550 | 2622 | } |
7896c998 KW |
2623 | iommu_domain_free(domain->domain); |
2624 | list_del(&domain->next); | |
2625 | kfree(domain); | |
f45daadf | 2626 | vfio_iommu_aper_expand(iommu, &iova_copy); |
cade075f | 2627 | vfio_update_pgsize_bitmap(iommu); |
73fa0d10 | 2628 | } |
a54eb550 | 2629 | break; |
73fa0d10 AW |
2630 | } |
2631 | ||
f45daadf SK |
2632 | if (!vfio_iommu_resv_refresh(iommu, &iova_copy)) |
2633 | vfio_iommu_iova_insert_copy(iommu, &iova_copy); | |
2634 | else | |
2635 | vfio_iommu_iova_free(&iova_copy); | |
2636 | ||
a54eb550 | 2637 | detach_group_done: |
95fc87b4 KW |
2638 | /* |
2639 | * Removal of a group without dirty tracking may allow the iommu scope | |
2640 | * to be promoted. | |
2641 | */ | |
d0a78f91 | 2642 | if (update_dirty_scope) { |
01032156 | 2643 | iommu->num_non_pinned_groups--; |
d0a78f91 KZ |
2644 | if (iommu->dirty_page_tracking) |
2645 | vfio_iommu_populate_bitmap_full(iommu); | |
2646 | } | |
73fa0d10 AW |
2647 | mutex_unlock(&iommu->lock); |
2648 | } | |
2649 | ||
2650 | static void *vfio_iommu_type1_open(unsigned long arg) | |
2651 | { | |
2652 | struct vfio_iommu *iommu; | |
2653 | ||
73fa0d10 AW |
2654 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
2655 | if (!iommu) | |
2656 | return ERR_PTR(-ENOMEM); | |
2657 | ||
f5c9eceb WD |
2658 | switch (arg) { |
2659 | case VFIO_TYPE1_IOMMU: | |
2660 | break; | |
2661 | case VFIO_TYPE1_NESTING_IOMMU: | |
2662 | iommu->nesting = true; | |
df561f66 | 2663 | fallthrough; |
f5c9eceb WD |
2664 | case VFIO_TYPE1v2_IOMMU: |
2665 | iommu->v2 = true; | |
2666 | break; | |
2667 | default: | |
2668 | kfree(iommu); | |
2669 | return ERR_PTR(-EINVAL); | |
2670 | } | |
2671 | ||
1ef3e2bc | 2672 | INIT_LIST_HEAD(&iommu->domain_list); |
1108696a | 2673 | INIT_LIST_HEAD(&iommu->iova_list); |
cd9b2268 | 2674 | iommu->dma_list = RB_ROOT; |
49285593 | 2675 | iommu->dma_avail = dma_entry_limit; |
487ace13 | 2676 | iommu->container_open = true; |
73fa0d10 | 2677 | mutex_init(&iommu->lock); |
c086de81 | 2678 | BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); |
898b9eae | 2679 | init_waitqueue_head(&iommu->vaddr_wait); |
73fa0d10 AW |
2680 | |
2681 | return iommu; | |
2682 | } | |
2683 | ||
a54eb550 KW |
2684 | static void vfio_release_domain(struct vfio_domain *domain, bool external) |
2685 | { | |
c7396f2e | 2686 | struct vfio_iommu_group *group, *group_tmp; |
a54eb550 KW |
2687 | |
2688 | list_for_each_entry_safe(group, group_tmp, | |
2689 | &domain->group_list, next) { | |
2690 | if (!external) | |
7bd50f0c | 2691 | vfio_iommu_detach_group(domain, group); |
a54eb550 KW |
2692 | list_del(&group->next); |
2693 | kfree(group); | |
2694 | } | |
2695 | ||
2696 | if (!external) | |
2697 | iommu_domain_free(domain->domain); | |
2698 | } | |
2699 | ||
73fa0d10 AW |
2700 | static void vfio_iommu_type1_release(void *iommu_data) |
2701 | { | |
2702 | struct vfio_iommu *iommu = iommu_data; | |
1ef3e2bc | 2703 | struct vfio_domain *domain, *domain_tmp; |
a54eb550 KW |
2704 | |
2705 | if (iommu->external_domain) { | |
2706 | vfio_release_domain(iommu->external_domain, true); | |
a54eb550 KW |
2707 | kfree(iommu->external_domain); |
2708 | } | |
73fa0d10 | 2709 | |
1ef3e2bc | 2710 | vfio_iommu_unmap_unpin_all(iommu); |
73fa0d10 | 2711 | |
1ef3e2bc AW |
2712 | list_for_each_entry_safe(domain, domain_tmp, |
2713 | &iommu->domain_list, next) { | |
a54eb550 | 2714 | vfio_release_domain(domain, false); |
1ef3e2bc AW |
2715 | list_del(&domain->next); |
2716 | kfree(domain); | |
73fa0d10 | 2717 | } |
1108696a SK |
2718 | |
2719 | vfio_iommu_iova_free(&iommu->iova_list); | |
2720 | ||
73fa0d10 AW |
2721 | kfree(iommu); |
2722 | } | |
2723 | ||
aa429318 AW |
2724 | static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) |
2725 | { | |
2726 | struct vfio_domain *domain; | |
2727 | int ret = 1; | |
2728 | ||
2729 | mutex_lock(&iommu->lock); | |
2730 | list_for_each_entry(domain, &iommu->domain_list, next) { | |
2731 | if (!(domain->prot & IOMMU_CACHE)) { | |
2732 | ret = 0; | |
f5bfdbf2 | 2733 | break; |
aa429318 | 2734 | } |
73fa0d10 | 2735 | } |
aa429318 | 2736 | mutex_unlock(&iommu->lock); |
73fa0d10 | 2737 | |
aa429318 | 2738 | return ret; |
73fa0d10 AW |
2739 | } |
2740 | ||
ccd59dce LY |
2741 | static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu, |
2742 | unsigned long arg) | |
2743 | { | |
2744 | switch (arg) { | |
2745 | case VFIO_TYPE1_IOMMU: | |
2746 | case VFIO_TYPE1v2_IOMMU: | |
2747 | case VFIO_TYPE1_NESTING_IOMMU: | |
c1965099 | 2748 | case VFIO_UNMAP_ALL: |
c3cbab24 | 2749 | case VFIO_UPDATE_VADDR: |
ccd59dce LY |
2750 | return 1; |
2751 | case VFIO_DMA_CC_IOMMU: | |
2752 | if (!iommu) | |
2753 | return 0; | |
2754 | return vfio_domains_have_iommu_cache(iommu); | |
2755 | default: | |
2756 | return 0; | |
2757 | } | |
2758 | } | |
2759 | ||
a7170720 SK |
2760 | static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps, |
2761 | struct vfio_iommu_type1_info_cap_iova_range *cap_iovas, | |
2762 | size_t size) | |
2763 | { | |
2764 | struct vfio_info_cap_header *header; | |
2765 | struct vfio_iommu_type1_info_cap_iova_range *iova_cap; | |
2766 | ||
2767 | header = vfio_info_cap_add(caps, size, | |
2768 | VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, 1); | |
2769 | if (IS_ERR(header)) | |
2770 | return PTR_ERR(header); | |
2771 | ||
2772 | iova_cap = container_of(header, | |
2773 | struct vfio_iommu_type1_info_cap_iova_range, | |
2774 | header); | |
2775 | iova_cap->nr_iovas = cap_iovas->nr_iovas; | |
2776 | memcpy(iova_cap->iova_ranges, cap_iovas->iova_ranges, | |
2777 | cap_iovas->nr_iovas * sizeof(*cap_iovas->iova_ranges)); | |
2778 | return 0; | |
2779 | } | |
2780 | ||
2781 | static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu, | |
2782 | struct vfio_info_cap *caps) | |
2783 | { | |
2784 | struct vfio_iommu_type1_info_cap_iova_range *cap_iovas; | |
2785 | struct vfio_iova *iova; | |
2786 | size_t size; | |
2787 | int iovas = 0, i = 0, ret; | |
2788 | ||
a7170720 SK |
2789 | list_for_each_entry(iova, &iommu->iova_list, list) |
2790 | iovas++; | |
2791 | ||
2792 | if (!iovas) { | |
2793 | /* | |
2794 | * Return 0 as a container with a single mdev device | |
2795 | * will have an empty list | |
2796 | */ | |
cade075f | 2797 | return 0; |
a7170720 SK |
2798 | } |
2799 | ||
78b23814 | 2800 | size = struct_size(cap_iovas, iova_ranges, iovas); |
a7170720 SK |
2801 | |
2802 | cap_iovas = kzalloc(size, GFP_KERNEL); | |
cade075f KW |
2803 | if (!cap_iovas) |
2804 | return -ENOMEM; | |
a7170720 SK |
2805 | |
2806 | cap_iovas->nr_iovas = iovas; | |
2807 | ||
2808 | list_for_each_entry(iova, &iommu->iova_list, list) { | |
2809 | cap_iovas->iova_ranges[i].start = iova->start; | |
2810 | cap_iovas->iova_ranges[i].end = iova->end; | |
2811 | i++; | |
2812 | } | |
2813 | ||
2814 | ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size); | |
2815 | ||
2816 | kfree(cap_iovas); | |
a7170720 SK |
2817 | return ret; |
2818 | } | |
2819 | ||
ad721705 KW |
2820 | static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu, |
2821 | struct vfio_info_cap *caps) | |
2822 | { | |
2823 | struct vfio_iommu_type1_info_cap_migration cap_mig; | |
2824 | ||
2825 | cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION; | |
2826 | cap_mig.header.version = 1; | |
2827 | ||
2828 | cap_mig.flags = 0; | |
2829 | /* support minimum pgsize */ | |
2830 | cap_mig.pgsize_bitmap = (size_t)1 << __ffs(iommu->pgsize_bitmap); | |
2831 | cap_mig.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX; | |
2832 | ||
2833 | return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig)); | |
2834 | } | |
2835 | ||
7d6e1329 MR |
2836 | static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu, |
2837 | struct vfio_info_cap *caps) | |
2838 | { | |
2839 | struct vfio_iommu_type1_info_dma_avail cap_dma_avail; | |
2840 | ||
2841 | cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL; | |
2842 | cap_dma_avail.header.version = 1; | |
2843 | ||
2844 | cap_dma_avail.avail = iommu->dma_avail; | |
2845 | ||
2846 | return vfio_info_add_capability(caps, &cap_dma_avail.header, | |
2847 | sizeof(cap_dma_avail)); | |
2848 | } | |
2849 | ||
ccd59dce LY |
2850 | static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu, |
2851 | unsigned long arg) | |
73fa0d10 | 2852 | { |
ccd59dce | 2853 | struct vfio_iommu_type1_info info; |
73fa0d10 | 2854 | unsigned long minsz; |
ccd59dce LY |
2855 | struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; |
2856 | unsigned long capsz; | |
2857 | int ret; | |
73fa0d10 | 2858 | |
ccd59dce | 2859 | minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); |
73fa0d10 | 2860 | |
ccd59dce LY |
2861 | /* For backward compatibility, cannot require this */ |
2862 | capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); | |
a7170720 | 2863 | |
ccd59dce LY |
2864 | if (copy_from_user(&info, (void __user *)arg, minsz)) |
2865 | return -EFAULT; | |
73fa0d10 | 2866 | |
ccd59dce LY |
2867 | if (info.argsz < minsz) |
2868 | return -EINVAL; | |
73fa0d10 | 2869 | |
ccd59dce LY |
2870 | if (info.argsz >= capsz) { |
2871 | minsz = capsz; | |
2872 | info.cap_offset = 0; /* output, no-recopy necessary */ | |
2873 | } | |
a7170720 | 2874 | |
ccd59dce LY |
2875 | mutex_lock(&iommu->lock); |
2876 | info.flags = VFIO_IOMMU_INFO_PGSIZES; | |
73fa0d10 | 2877 | |
ccd59dce | 2878 | info.iova_pgsizes = iommu->pgsize_bitmap; |
73fa0d10 | 2879 | |
ccd59dce | 2880 | ret = vfio_iommu_migration_build_caps(iommu, &caps); |
ad721705 | 2881 | |
7d6e1329 MR |
2882 | if (!ret) |
2883 | ret = vfio_iommu_dma_avail_build_caps(iommu, &caps); | |
2884 | ||
ccd59dce LY |
2885 | if (!ret) |
2886 | ret = vfio_iommu_iova_build_caps(iommu, &caps); | |
ad721705 | 2887 | |
ccd59dce | 2888 | mutex_unlock(&iommu->lock); |
ad721705 | 2889 | |
ccd59dce LY |
2890 | if (ret) |
2891 | return ret; | |
a7170720 | 2892 | |
ccd59dce LY |
2893 | if (caps.size) { |
2894 | info.flags |= VFIO_IOMMU_INFO_CAPS; | |
a7170720 | 2895 | |
ccd59dce LY |
2896 | if (info.argsz < sizeof(info) + caps.size) { |
2897 | info.argsz = sizeof(info) + caps.size; | |
2898 | } else { | |
2899 | vfio_info_cap_shift(&caps, sizeof(info)); | |
2900 | if (copy_to_user((void __user *)arg + | |
2901 | sizeof(info), caps.buf, | |
2902 | caps.size)) { | |
2903 | kfree(caps.buf); | |
2904 | return -EFAULT; | |
a7170720 | 2905 | } |
ccd59dce | 2906 | info.cap_offset = sizeof(info); |
a7170720 SK |
2907 | } |
2908 | ||
ccd59dce LY |
2909 | kfree(caps.buf); |
2910 | } | |
73fa0d10 | 2911 | |
ccd59dce LY |
2912 | return copy_to_user((void __user *)arg, &info, minsz) ? |
2913 | -EFAULT : 0; | |
2914 | } | |
73fa0d10 | 2915 | |
ccd59dce LY |
2916 | static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu, |
2917 | unsigned long arg) | |
2918 | { | |
2919 | struct vfio_iommu_type1_dma_map map; | |
2920 | unsigned long minsz; | |
c3cbab24 SS |
2921 | uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE | |
2922 | VFIO_DMA_MAP_FLAG_VADDR; | |
73fa0d10 | 2923 | |
ccd59dce | 2924 | minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); |
73fa0d10 | 2925 | |
ccd59dce LY |
2926 | if (copy_from_user(&map, (void __user *)arg, minsz)) |
2927 | return -EFAULT; | |
73fa0d10 | 2928 | |
ccd59dce LY |
2929 | if (map.argsz < minsz || map.flags & ~mask) |
2930 | return -EINVAL; | |
73fa0d10 | 2931 | |
ccd59dce LY |
2932 | return vfio_dma_do_map(iommu, &map); |
2933 | } | |
73fa0d10 | 2934 | |
ccd59dce LY |
2935 | static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu, |
2936 | unsigned long arg) | |
2937 | { | |
2938 | struct vfio_iommu_type1_dma_unmap unmap; | |
2939 | struct vfio_bitmap bitmap = { 0 }; | |
c1965099 | 2940 | uint32_t mask = VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP | |
c3cbab24 | 2941 | VFIO_DMA_UNMAP_FLAG_VADDR | |
c1965099 | 2942 | VFIO_DMA_UNMAP_FLAG_ALL; |
ccd59dce LY |
2943 | unsigned long minsz; |
2944 | int ret; | |
73fa0d10 | 2945 | |
ccd59dce | 2946 | minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); |
73fa0d10 | 2947 | |
ccd59dce LY |
2948 | if (copy_from_user(&unmap, (void __user *)arg, minsz)) |
2949 | return -EFAULT; | |
73fa0d10 | 2950 | |
c1965099 SS |
2951 | if (unmap.argsz < minsz || unmap.flags & ~mask) |
2952 | return -EINVAL; | |
2953 | ||
2954 | if ((unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) && | |
c3cbab24 SS |
2955 | (unmap.flags & (VFIO_DMA_UNMAP_FLAG_ALL | |
2956 | VFIO_DMA_UNMAP_FLAG_VADDR))) | |
ccd59dce | 2957 | return -EINVAL; |
331e33d2 | 2958 | |
ccd59dce LY |
2959 | if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) { |
2960 | unsigned long pgshift; | |
331e33d2 | 2961 | |
ccd59dce LY |
2962 | if (unmap.argsz < (minsz + sizeof(bitmap))) |
2963 | return -EINVAL; | |
331e33d2 | 2964 | |
ccd59dce LY |
2965 | if (copy_from_user(&bitmap, |
2966 | (void __user *)(arg + minsz), | |
2967 | sizeof(bitmap))) | |
2968 | return -EFAULT; | |
331e33d2 | 2969 | |
ccd59dce LY |
2970 | if (!access_ok((void __user *)bitmap.data, bitmap.size)) |
2971 | return -EINVAL; | |
331e33d2 | 2972 | |
ccd59dce LY |
2973 | pgshift = __ffs(bitmap.pgsize); |
2974 | ret = verify_bitmap_size(unmap.size >> pgshift, | |
2975 | bitmap.size); | |
166fd7d9 AW |
2976 | if (ret) |
2977 | return ret; | |
ccd59dce LY |
2978 | } |
2979 | ||
2980 | ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap); | |
2981 | if (ret) | |
2982 | return ret; | |
166fd7d9 | 2983 | |
ccd59dce | 2984 | return copy_to_user((void __user *)arg, &unmap, minsz) ? |
8160c4e4 | 2985 | -EFAULT : 0; |
ccd59dce | 2986 | } |
d6a4c185 | 2987 | |
ccd59dce LY |
2988 | static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu, |
2989 | unsigned long arg) | |
2990 | { | |
2991 | struct vfio_iommu_type1_dirty_bitmap dirty; | |
2992 | uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START | | |
2993 | VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP | | |
2994 | VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; | |
2995 | unsigned long minsz; | |
2996 | int ret = 0; | |
d6a4c185 | 2997 | |
ccd59dce LY |
2998 | if (!iommu->v2) |
2999 | return -EACCES; | |
d6a4c185 | 3000 | |
ccd59dce | 3001 | minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags); |
d6a4c185 | 3002 | |
ccd59dce LY |
3003 | if (copy_from_user(&dirty, (void __user *)arg, minsz)) |
3004 | return -EFAULT; | |
d6a4c185 | 3005 | |
ccd59dce LY |
3006 | if (dirty.argsz < minsz || dirty.flags & ~mask) |
3007 | return -EINVAL; | |
d6a4c185 | 3008 | |
ccd59dce LY |
3009 | /* only one flag should be set at a time */ |
3010 | if (__ffs(dirty.flags) != __fls(dirty.flags)) | |
3011 | return -EINVAL; | |
d6a4c185 | 3012 | |
ccd59dce LY |
3013 | if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) { |
3014 | size_t pgsize; | |
d6a4c185 | 3015 | |
ccd59dce LY |
3016 | mutex_lock(&iommu->lock); |
3017 | pgsize = 1 << __ffs(iommu->pgsize_bitmap); | |
3018 | if (!iommu->dirty_page_tracking) { | |
3019 | ret = vfio_dma_bitmap_alloc_all(iommu, pgsize); | |
3020 | if (!ret) | |
3021 | iommu->dirty_page_tracking = true; | |
3022 | } | |
3023 | mutex_unlock(&iommu->lock); | |
3024 | return ret; | |
3025 | } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) { | |
3026 | mutex_lock(&iommu->lock); | |
3027 | if (iommu->dirty_page_tracking) { | |
3028 | iommu->dirty_page_tracking = false; | |
3029 | vfio_dma_bitmap_free_all(iommu); | |
3030 | } | |
3031 | mutex_unlock(&iommu->lock); | |
3032 | return 0; | |
3033 | } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) { | |
3034 | struct vfio_iommu_type1_dirty_bitmap_get range; | |
3035 | unsigned long pgshift; | |
3036 | size_t data_size = dirty.argsz - minsz; | |
3037 | size_t iommu_pgsize; | |
d6a4c185 | 3038 | |
ccd59dce LY |
3039 | if (!data_size || data_size < sizeof(range)) |
3040 | return -EINVAL; | |
d6a4c185 | 3041 | |
ccd59dce LY |
3042 | if (copy_from_user(&range, (void __user *)(arg + minsz), |
3043 | sizeof(range))) | |
3044 | return -EFAULT; | |
d6a4c185 | 3045 | |
ccd59dce LY |
3046 | if (range.iova + range.size < range.iova) |
3047 | return -EINVAL; | |
3048 | if (!access_ok((void __user *)range.bitmap.data, | |
3049 | range.bitmap.size)) | |
3050 | return -EINVAL; | |
d6a4c185 | 3051 | |
ccd59dce LY |
3052 | pgshift = __ffs(range.bitmap.pgsize); |
3053 | ret = verify_bitmap_size(range.size >> pgshift, | |
3054 | range.bitmap.size); | |
3055 | if (ret) | |
3056 | return ret; | |
d6a4c185 | 3057 | |
ccd59dce | 3058 | mutex_lock(&iommu->lock); |
d6a4c185 | 3059 | |
ccd59dce LY |
3060 | iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); |
3061 | ||
3062 | /* allow only smallest supported pgsize */ | |
3063 | if (range.bitmap.pgsize != iommu_pgsize) { | |
3064 | ret = -EINVAL; | |
3065 | goto out_unlock; | |
3066 | } | |
3067 | if (range.iova & (iommu_pgsize - 1)) { | |
3068 | ret = -EINVAL; | |
3069 | goto out_unlock; | |
3070 | } | |
3071 | if (!range.size || range.size & (iommu_pgsize - 1)) { | |
3072 | ret = -EINVAL; | |
3073 | goto out_unlock; | |
d6a4c185 | 3074 | } |
ccd59dce LY |
3075 | |
3076 | if (iommu->dirty_page_tracking) | |
3077 | ret = vfio_iova_dirty_bitmap(range.bitmap.data, | |
3078 | iommu, range.iova, | |
3079 | range.size, | |
3080 | range.bitmap.pgsize); | |
3081 | else | |
3082 | ret = -EINVAL; | |
3083 | out_unlock: | |
3084 | mutex_unlock(&iommu->lock); | |
3085 | ||
3086 | return ret; | |
73fa0d10 AW |
3087 | } |
3088 | ||
ccd59dce LY |
3089 | return -EINVAL; |
3090 | } | |
3091 | ||
3092 | static long vfio_iommu_type1_ioctl(void *iommu_data, | |
3093 | unsigned int cmd, unsigned long arg) | |
3094 | { | |
3095 | struct vfio_iommu *iommu = iommu_data; | |
3096 | ||
3097 | switch (cmd) { | |
3098 | case VFIO_CHECK_EXTENSION: | |
3099 | return vfio_iommu_type1_check_extension(iommu, arg); | |
3100 | case VFIO_IOMMU_GET_INFO: | |
3101 | return vfio_iommu_type1_get_info(iommu, arg); | |
3102 | case VFIO_IOMMU_MAP_DMA: | |
3103 | return vfio_iommu_type1_map_dma(iommu, arg); | |
3104 | case VFIO_IOMMU_UNMAP_DMA: | |
3105 | return vfio_iommu_type1_unmap_dma(iommu, arg); | |
3106 | case VFIO_IOMMU_DIRTY_PAGES: | |
3107 | return vfio_iommu_type1_dirty_pages(iommu, arg); | |
3108 | default: | |
3109 | return -ENOTTY; | |
3110 | } | |
73fa0d10 AW |
3111 | } |
3112 | ||
c086de81 | 3113 | static int vfio_iommu_type1_register_notifier(void *iommu_data, |
22195cbd | 3114 | unsigned long *events, |
c086de81 KW |
3115 | struct notifier_block *nb) |
3116 | { | |
3117 | struct vfio_iommu *iommu = iommu_data; | |
3118 | ||
22195cbd JS |
3119 | /* clear known events */ |
3120 | *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP; | |
3121 | ||
3122 | /* refuse to register if still events remaining */ | |
3123 | if (*events) | |
3124 | return -EINVAL; | |
3125 | ||
c086de81 KW |
3126 | return blocking_notifier_chain_register(&iommu->notifier, nb); |
3127 | } | |
3128 | ||
3129 | static int vfio_iommu_type1_unregister_notifier(void *iommu_data, | |
3130 | struct notifier_block *nb) | |
3131 | { | |
3132 | struct vfio_iommu *iommu = iommu_data; | |
3133 | ||
3134 | return blocking_notifier_chain_unregister(&iommu->notifier, nb); | |
3135 | } | |
3136 | ||
8d46c0cc YZ |
3137 | static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu, |
3138 | dma_addr_t user_iova, void *data, | |
3139 | size_t count, bool write, | |
3140 | size_t *copied) | |
3141 | { | |
3142 | struct mm_struct *mm; | |
3143 | unsigned long vaddr; | |
3144 | struct vfio_dma *dma; | |
3145 | bool kthread = current->mm == NULL; | |
3146 | size_t offset; | |
898b9eae | 3147 | int ret; |
8d46c0cc YZ |
3148 | |
3149 | *copied = 0; | |
3150 | ||
898b9eae SS |
3151 | ret = vfio_find_dma_valid(iommu, user_iova, 1, &dma); |
3152 | if (ret < 0) | |
3153 | return ret; | |
8d46c0cc YZ |
3154 | |
3155 | if ((write && !(dma->prot & IOMMU_WRITE)) || | |
3156 | !(dma->prot & IOMMU_READ)) | |
3157 | return -EPERM; | |
3158 | ||
3159 | mm = get_task_mm(dma->task); | |
3160 | ||
3161 | if (!mm) | |
3162 | return -EPERM; | |
3163 | ||
3164 | if (kthread) | |
f5678e7f | 3165 | kthread_use_mm(mm); |
8d46c0cc YZ |
3166 | else if (current->mm != mm) |
3167 | goto out; | |
3168 | ||
3169 | offset = user_iova - dma->iova; | |
3170 | ||
3171 | if (count > dma->size - offset) | |
3172 | count = dma->size - offset; | |
3173 | ||
3174 | vaddr = dma->vaddr + offset; | |
3175 | ||
d6a4c185 | 3176 | if (write) { |
205323b8 | 3177 | *copied = copy_to_user((void __user *)vaddr, data, |
8d46c0cc | 3178 | count) ? 0 : count; |
d6a4c185 KW |
3179 | if (*copied && iommu->dirty_page_tracking) { |
3180 | unsigned long pgshift = __ffs(iommu->pgsize_bitmap); | |
3181 | /* | |
3182 | * Bitmap populated with the smallest supported page | |
3183 | * size | |
3184 | */ | |
3185 | bitmap_set(dma->bitmap, offset >> pgshift, | |
2c5af985 YZ |
3186 | ((offset + *copied - 1) >> pgshift) - |
3187 | (offset >> pgshift) + 1); | |
d6a4c185 KW |
3188 | } |
3189 | } else | |
205323b8 | 3190 | *copied = copy_from_user(data, (void __user *)vaddr, |
8d46c0cc YZ |
3191 | count) ? 0 : count; |
3192 | if (kthread) | |
f5678e7f | 3193 | kthread_unuse_mm(mm); |
8d46c0cc YZ |
3194 | out: |
3195 | mmput(mm); | |
3196 | return *copied ? 0 : -EFAULT; | |
3197 | } | |
3198 | ||
3199 | static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova, | |
3200 | void *data, size_t count, bool write) | |
3201 | { | |
3202 | struct vfio_iommu *iommu = iommu_data; | |
3203 | int ret = 0; | |
3204 | size_t done; | |
3205 | ||
3206 | mutex_lock(&iommu->lock); | |
3207 | while (count > 0) { | |
3208 | ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data, | |
3209 | count, write, &done); | |
3210 | if (ret) | |
3211 | break; | |
3212 | ||
3213 | count -= done; | |
3214 | data += done; | |
3215 | user_iova += done; | |
3216 | } | |
3217 | ||
3218 | mutex_unlock(&iommu->lock); | |
3219 | return ret; | |
3220 | } | |
3221 | ||
bdfae1c9 LB |
3222 | static struct iommu_domain * |
3223 | vfio_iommu_type1_group_iommu_domain(void *iommu_data, | |
3224 | struct iommu_group *iommu_group) | |
3225 | { | |
3226 | struct iommu_domain *domain = ERR_PTR(-ENODEV); | |
3227 | struct vfio_iommu *iommu = iommu_data; | |
3228 | struct vfio_domain *d; | |
3229 | ||
3230 | if (!iommu || !iommu_group) | |
3231 | return ERR_PTR(-EINVAL); | |
3232 | ||
3233 | mutex_lock(&iommu->lock); | |
3234 | list_for_each_entry(d, &iommu->domain_list, next) { | |
3235 | if (find_iommu_group(d, iommu_group)) { | |
3236 | domain = d->domain; | |
3237 | break; | |
3238 | } | |
3239 | } | |
3240 | mutex_unlock(&iommu->lock); | |
3241 | ||
3242 | return domain; | |
3243 | } | |
3244 | ||
487ace13 SS |
3245 | static void vfio_iommu_type1_notify(void *iommu_data, |
3246 | enum vfio_iommu_notify_type event) | |
3247 | { | |
3248 | struct vfio_iommu *iommu = iommu_data; | |
3249 | ||
3250 | if (event != VFIO_IOMMU_CONTAINER_CLOSE) | |
3251 | return; | |
3252 | mutex_lock(&iommu->lock); | |
3253 | iommu->container_open = false; | |
3254 | mutex_unlock(&iommu->lock); | |
898b9eae | 3255 | wake_up_all(&iommu->vaddr_wait); |
487ace13 SS |
3256 | } |
3257 | ||
73fa0d10 | 3258 | static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { |
c086de81 KW |
3259 | .name = "vfio-iommu-type1", |
3260 | .owner = THIS_MODULE, | |
3261 | .open = vfio_iommu_type1_open, | |
3262 | .release = vfio_iommu_type1_release, | |
3263 | .ioctl = vfio_iommu_type1_ioctl, | |
3264 | .attach_group = vfio_iommu_type1_attach_group, | |
3265 | .detach_group = vfio_iommu_type1_detach_group, | |
3266 | .pin_pages = vfio_iommu_type1_pin_pages, | |
3267 | .unpin_pages = vfio_iommu_type1_unpin_pages, | |
3268 | .register_notifier = vfio_iommu_type1_register_notifier, | |
3269 | .unregister_notifier = vfio_iommu_type1_unregister_notifier, | |
8d46c0cc | 3270 | .dma_rw = vfio_iommu_type1_dma_rw, |
bdfae1c9 | 3271 | .group_iommu_domain = vfio_iommu_type1_group_iommu_domain, |
487ace13 | 3272 | .notify = vfio_iommu_type1_notify, |
73fa0d10 AW |
3273 | }; |
3274 | ||
3275 | static int __init vfio_iommu_type1_init(void) | |
3276 | { | |
73fa0d10 AW |
3277 | return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1); |
3278 | } | |
3279 | ||
3280 | static void __exit vfio_iommu_type1_cleanup(void) | |
3281 | { | |
3282 | vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1); | |
3283 | } | |
3284 | ||
3285 | module_init(vfio_iommu_type1_init); | |
3286 | module_exit(vfio_iommu_type1_cleanup); | |
3287 | ||
3288 | MODULE_VERSION(DRIVER_VERSION); | |
3289 | MODULE_LICENSE("GPL v2"); | |
3290 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
3291 | MODULE_DESCRIPTION(DRIVER_DESC); |