]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/vfio/vfio_iommu_type1.c
drm/i915: make mappable struct resource centric
[mirror_ubuntu-bionic-kernel.git] / drivers / vfio / vfio_iommu_type1.c
CommitLineData
73fa0d10
AW
1/*
2 * VFIO: IOMMU DMA mapping support for Type1 IOMMU
3 *
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
14 *
15 * We arbitrarily define a Type1 IOMMU as one matching the below code.
16 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
17 * VT-d, but that makes it harder to re-use as theoretically anyone
18 * implementing a similar IOMMU could make use of this. We expect the
19 * IOMMU to support the IOMMU API and have few to no restrictions around
20 * the IOVA range that can be mapped. The Type1 IOMMU is currently
21 * optimized for relatively static mappings of a userspace process with
22 * userpsace pages pinned into memory. We also assume devices and IOMMU
23 * domains are PCI based as the IOMMU API is still centered around a
24 * device/bus interface rather than a group interface.
25 */
26
27#include <linux/compat.h>
28#include <linux/device.h>
29#include <linux/fs.h>
30#include <linux/iommu.h>
31#include <linux/module.h>
32#include <linux/mm.h>
cd9b2268 33#include <linux/rbtree.h>
3f07c014 34#include <linux/sched/signal.h>
6e84f315 35#include <linux/sched/mm.h>
73fa0d10
AW
36#include <linux/slab.h>
37#include <linux/uaccess.h>
38#include <linux/vfio.h>
39#include <linux/workqueue.h>
a54eb550 40#include <linux/mdev.h>
c086de81 41#include <linux/notifier.h>
5d704992 42#include <linux/dma-iommu.h>
9d72f87b 43#include <linux/irqdomain.h>
73fa0d10
AW
44
45#define DRIVER_VERSION "0.2"
46#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
47#define DRIVER_DESC "Type1 IOMMU driver for VFIO"
48
49static bool allow_unsafe_interrupts;
50module_param_named(allow_unsafe_interrupts,
51 allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
52MODULE_PARM_DESC(allow_unsafe_interrupts,
53 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
54
5c6c2b21
AW
55static bool disable_hugepages;
56module_param_named(disable_hugepages,
57 disable_hugepages, bool, S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(disable_hugepages,
59 "Disable VFIO IOMMU support for IOMMU hugepages.");
60
a5354ea8
AW
61static unsigned int dma_entry_limit __read_mostly = U16_MAX;
62module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
63MODULE_PARM_DESC(dma_entry_limit,
64 "Maximum number of user DMA mappings per container (65535).");
65
73fa0d10 66struct vfio_iommu {
1ef3e2bc 67 struct list_head domain_list;
a54eb550 68 struct vfio_domain *external_domain; /* domain for external user */
73fa0d10 69 struct mutex lock;
cd9b2268 70 struct rb_root dma_list;
c086de81 71 struct blocking_notifier_head notifier;
a5354ea8 72 unsigned int dma_avail;
f5c9eceb
WD
73 bool v2;
74 bool nesting;
1ef3e2bc
AW
75};
76
77struct vfio_domain {
78 struct iommu_domain *domain;
79 struct list_head next;
73fa0d10 80 struct list_head group_list;
1ef3e2bc 81 int prot; /* IOMMU_CACHE */
6fe1010d 82 bool fgsp; /* Fine-grained super pages */
73fa0d10
AW
83};
84
85struct vfio_dma {
cd9b2268 86 struct rb_node node;
73fa0d10
AW
87 dma_addr_t iova; /* Device address */
88 unsigned long vaddr; /* Process virtual addr */
166fd7d9 89 size_t size; /* Map size (bytes) */
73fa0d10 90 int prot; /* IOMMU_READ/WRITE */
a54eb550 91 bool iommu_mapped;
ddfd451d 92 bool lock_cap; /* capable(CAP_IPC_LOCK) */
8f0d5bb9 93 struct task_struct *task;
a54eb550 94 struct rb_root pfn_list; /* Ex-user pinned pfn list */
73fa0d10
AW
95};
96
97struct vfio_group {
98 struct iommu_group *iommu_group;
99 struct list_head next;
100};
101
a54eb550
KW
102/*
103 * Guest RAM pinning working set or DMA target
104 */
105struct vfio_pfn {
106 struct rb_node node;
107 dma_addr_t iova; /* Device address */
108 unsigned long pfn; /* Host pfn */
109 atomic_t ref_count;
110};
111
112#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
113 (!list_empty(&iommu->domain_list))
114
115static int put_pfn(unsigned long pfn, int prot);
116
73fa0d10
AW
117/*
118 * This code handles mapping and unmapping of user data buffers
119 * into DMA'ble space using the IOMMU
120 */
121
cd9b2268
AW
122static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
123 dma_addr_t start, size_t size)
124{
125 struct rb_node *node = iommu->dma_list.rb_node;
126
127 while (node) {
128 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
129
130 if (start + size <= dma->iova)
131 node = node->rb_left;
166fd7d9 132 else if (start >= dma->iova + dma->size)
cd9b2268
AW
133 node = node->rb_right;
134 else
135 return dma;
136 }
137
138 return NULL;
139}
140
1ef3e2bc 141static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
cd9b2268
AW
142{
143 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
144 struct vfio_dma *dma;
145
146 while (*link) {
147 parent = *link;
148 dma = rb_entry(parent, struct vfio_dma, node);
149
166fd7d9 150 if (new->iova + new->size <= dma->iova)
cd9b2268
AW
151 link = &(*link)->rb_left;
152 else
153 link = &(*link)->rb_right;
154 }
155
156 rb_link_node(&new->node, parent, link);
157 rb_insert_color(&new->node, &iommu->dma_list);
158}
159
1ef3e2bc 160static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
cd9b2268
AW
161{
162 rb_erase(&old->node, &iommu->dma_list);
163}
164
a54eb550
KW
165/*
166 * Helper Functions for host iova-pfn list
167 */
168static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova)
169{
170 struct vfio_pfn *vpfn;
171 struct rb_node *node = dma->pfn_list.rb_node;
172
173 while (node) {
174 vpfn = rb_entry(node, struct vfio_pfn, node);
175
176 if (iova < vpfn->iova)
177 node = node->rb_left;
178 else if (iova > vpfn->iova)
179 node = node->rb_right;
180 else
181 return vpfn;
182 }
183 return NULL;
184}
185
186static void vfio_link_pfn(struct vfio_dma *dma,
187 struct vfio_pfn *new)
188{
189 struct rb_node **link, *parent = NULL;
190 struct vfio_pfn *vpfn;
191
192 link = &dma->pfn_list.rb_node;
193 while (*link) {
194 parent = *link;
195 vpfn = rb_entry(parent, struct vfio_pfn, node);
196
197 if (new->iova < vpfn->iova)
198 link = &(*link)->rb_left;
199 else
200 link = &(*link)->rb_right;
201 }
202
203 rb_link_node(&new->node, parent, link);
204 rb_insert_color(&new->node, &dma->pfn_list);
205}
206
207static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old)
208{
209 rb_erase(&old->node, &dma->pfn_list);
210}
211
212static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
213 unsigned long pfn)
214{
215 struct vfio_pfn *vpfn;
216
217 vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL);
218 if (!vpfn)
219 return -ENOMEM;
220
221 vpfn->iova = iova;
222 vpfn->pfn = pfn;
223 atomic_set(&vpfn->ref_count, 1);
224 vfio_link_pfn(dma, vpfn);
225 return 0;
226}
227
228static void vfio_remove_from_pfn_list(struct vfio_dma *dma,
229 struct vfio_pfn *vpfn)
230{
231 vfio_unlink_pfn(dma, vpfn);
232 kfree(vpfn);
233}
234
235static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma,
236 unsigned long iova)
237{
238 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
239
240 if (vpfn)
241 atomic_inc(&vpfn->ref_count);
242 return vpfn;
243}
244
245static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
246{
247 int ret = 0;
248
249 if (atomic_dec_and_test(&vpfn->ref_count)) {
250 ret = put_pfn(vpfn->pfn, dma->prot);
251 vfio_remove_from_pfn_list(dma, vpfn);
252 }
253 return ret;
254}
255
ddfd451d 256static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
73fa0d10 257{
73fa0d10 258 struct mm_struct *mm;
0cfef2b7 259 int ret;
73fa0d10 260
3624a248 261 if (!npage)
0cfef2b7 262 return 0;
3624a248 263
ddfd451d 264 mm = async ? get_task_mm(dma->task) : dma->task->mm;
3624a248 265 if (!mm)
0cfef2b7 266 return -ESRCH; /* process exited */
73fa0d10 267
0cfef2b7
AW
268 ret = down_write_killable(&mm->mmap_sem);
269 if (!ret) {
270 if (npage > 0) {
ddfd451d 271 if (!dma->lock_cap) {
0cfef2b7
AW
272 unsigned long limit;
273
ddfd451d 274 limit = task_rlimit(dma->task,
0cfef2b7
AW
275 RLIMIT_MEMLOCK) >> PAGE_SHIFT;
276
277 if (mm->locked_vm + npage > limit)
278 ret = -ENOMEM;
279 }
280 }
281
282 if (!ret)
283 mm->locked_vm += npage;
73fa0d10 284
0cfef2b7 285 up_write(&mm->mmap_sem);
6c38c055
AW
286 }
287
ddfd451d 288 if (async)
3624a248 289 mmput(mm);
0cfef2b7
AW
290
291 return ret;
73fa0d10
AW
292}
293
294/*
295 * Some mappings aren't backed by a struct page, for example an mmap'd
296 * MMIO range for our own or another device. These use a different
297 * pfn conversion and shouldn't be tracked as locked pages.
298 */
299static bool is_invalid_reserved_pfn(unsigned long pfn)
300{
301 if (pfn_valid(pfn)) {
302 bool reserved;
303 struct page *tail = pfn_to_page(pfn);
668f9abb 304 struct page *head = compound_head(tail);
73fa0d10
AW
305 reserved = !!(PageReserved(head));
306 if (head != tail) {
307 /*
308 * "head" is not a dangling pointer
668f9abb 309 * (compound_head takes care of that)
73fa0d10
AW
310 * but the hugepage may have been split
311 * from under us (and we may not hold a
312 * reference count on the head page so it can
313 * be reused before we run PageReferenced), so
314 * we've to check PageTail before returning
315 * what we just read.
316 */
317 smp_rmb();
318 if (PageTail(tail))
319 return reserved;
320 }
321 return PageReserved(tail);
322 }
323
324 return true;
325}
326
327static int put_pfn(unsigned long pfn, int prot)
328{
329 if (!is_invalid_reserved_pfn(pfn)) {
330 struct page *page = pfn_to_page(pfn);
331 if (prot & IOMMU_WRITE)
332 SetPageDirty(page);
333 put_page(page);
334 return 1;
335 }
336 return 0;
337}
338
ea85cf35
KW
339static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
340 int prot, unsigned long *pfn)
73fa0d10
AW
341{
342 struct page *page[1];
343 struct vm_area_struct *vma;
acf4a054 344 struct vm_area_struct *vmas[1];
d432bec2 345 unsigned int flags = 0;
ea85cf35 346 int ret;
73fa0d10 347
d432bec2
JG
348 if (prot & IOMMU_WRITE)
349 flags |= FOLL_WRITE;
350
351 down_read(&mm->mmap_sem);
ea85cf35 352 if (mm == current->mm) {
d432bec2 353 ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
ea85cf35 354 } else {
ea85cf35 355 ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
acf4a054
DW
356 vmas, NULL);
357 /*
358 * The lifetime of a vaddr_get_pfn() page pin is
359 * userspace-controlled. In the fs-dax case this could
360 * lead to indefinite stalls in filesystem operations.
361 * Disallow attempts to pin fs-dax pages via this
362 * interface.
363 */
364 if (ret > 0 && vma_is_fsdax(vmas[0])) {
365 ret = -EOPNOTSUPP;
366 put_page(page[0]);
367 }
ea85cf35 368 }
d432bec2 369 up_read(&mm->mmap_sem);
ea85cf35
KW
370
371 if (ret == 1) {
73fa0d10
AW
372 *pfn = page_to_pfn(page[0]);
373 return 0;
374 }
375
ea85cf35 376 down_read(&mm->mmap_sem);
73fa0d10 377
ea85cf35 378 vma = find_vma_intersection(mm, vaddr, vaddr + 1);
73fa0d10
AW
379
380 if (vma && vma->vm_flags & VM_PFNMAP) {
381 *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
382 if (is_invalid_reserved_pfn(*pfn))
383 ret = 0;
384 }
385
ea85cf35 386 up_read(&mm->mmap_sem);
73fa0d10
AW
387 return ret;
388}
389
166fd7d9
AW
390/*
391 * Attempt to pin pages. We really don't want to track all the pfns and
392 * the iommu can only map chunks of consecutive pfns anyway, so get the
393 * first page and all consecutive pages with the same locking.
394 */
8f0d5bb9 395static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
7cb671e7 396 long npage, unsigned long *pfn_base,
ddfd451d 397 unsigned long limit)
73fa0d10 398{
7cb671e7 399 unsigned long pfn = 0;
6c38c055 400 long ret, pinned = 0, lock_acct = 0;
babbf176 401 bool rsvd;
a54eb550 402 dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
73fa0d10 403
6c38c055
AW
404 /* This code path is only user initiated */
405 if (!current->mm)
166fd7d9 406 return -ENODEV;
73fa0d10 407
6c38c055 408 ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
166fd7d9 409 if (ret)
6c38c055 410 return ret;
73fa0d10 411
6c38c055 412 pinned++;
babbf176 413 rsvd = is_invalid_reserved_pfn(*pfn_base);
73fa0d10 414
a54eb550
KW
415 /*
416 * Reserved pages aren't counted against the user, externally pinned
417 * pages are already counted against the user.
418 */
419 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
ddfd451d 420 if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) {
a54eb550
KW
421 put_pfn(*pfn_base, dma->prot);
422 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
423 limit << PAGE_SHIFT);
6c38c055 424 return -ENOMEM;
a54eb550
KW
425 }
426 lock_acct++;
5c6c2b21
AW
427 }
428
6c38c055
AW
429 if (unlikely(disable_hugepages))
430 goto out;
73fa0d10 431
6c38c055
AW
432 /* Lock all the consecutive pages from pfn_base */
433 for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
434 pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
6c38c055
AW
435 ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
436 if (ret)
437 break;
438
439 if (pfn != *pfn_base + pinned ||
440 rsvd != is_invalid_reserved_pfn(pfn)) {
441 put_pfn(pfn, dma->prot);
442 break;
443 }
166fd7d9 444
6c38c055 445 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
ddfd451d 446 if (!dma->lock_cap &&
6c38c055 447 current->mm->locked_vm + lock_acct + 1 > limit) {
a54eb550 448 put_pfn(pfn, dma->prot);
6c38c055
AW
449 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
450 __func__, limit << PAGE_SHIFT);
0cfef2b7
AW
451 ret = -ENOMEM;
452 goto unpin_out;
a54eb550 453 }
6c38c055 454 lock_acct++;
166fd7d9
AW
455 }
456 }
457
6c38c055 458out:
ddfd451d 459 ret = vfio_lock_acct(dma, lock_acct, false);
0cfef2b7
AW
460
461unpin_out:
462 if (ret) {
463 if (!rsvd) {
464 for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
465 put_pfn(pfn, dma->prot);
466 }
467
468 return ret;
469 }
166fd7d9 470
6c38c055 471 return pinned;
166fd7d9
AW
472}
473
a54eb550
KW
474static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
475 unsigned long pfn, long npage,
476 bool do_accounting)
166fd7d9 477{
a54eb550 478 long unlocked = 0, locked = 0;
166fd7d9
AW
479 long i;
480
6c38c055 481 for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
a54eb550
KW
482 if (put_pfn(pfn++, dma->prot)) {
483 unlocked++;
6c38c055 484 if (vfio_find_vpfn(dma, iova))
a54eb550
KW
485 locked++;
486 }
487 }
488
489 if (do_accounting)
ddfd451d 490 vfio_lock_acct(dma, locked - unlocked, true);
a54eb550
KW
491
492 return unlocked;
493}
494
495static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
496 unsigned long *pfn_base, bool do_accounting)
497{
a54eb550
KW
498 struct mm_struct *mm;
499 int ret;
a54eb550
KW
500
501 mm = get_task_mm(dma->task);
502 if (!mm)
503 return -ENODEV;
504
505 ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
80dbe1fb 506 if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
ddfd451d 507 ret = vfio_lock_acct(dma, 1, true);
0cfef2b7
AW
508 if (ret) {
509 put_pfn(*pfn_base, dma->prot);
80dbe1fb
AW
510 if (ret == -ENOMEM)
511 pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK "
512 "(%ld) exceeded\n", __func__,
513 dma->task->comm, task_pid_nr(dma->task),
514 task_rlimit(dma->task, RLIMIT_MEMLOCK));
0cfef2b7
AW
515 }
516 }
517
a54eb550
KW
518 mmput(mm);
519 return ret;
520}
521
522static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
523 bool do_accounting)
524{
525 int unlocked;
526 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
527
528 if (!vpfn)
529 return 0;
530
531 unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
166fd7d9
AW
532
533 if (do_accounting)
ddfd451d 534 vfio_lock_acct(dma, -unlocked, true);
166fd7d9
AW
535
536 return unlocked;
537}
538
a54eb550
KW
539static int vfio_iommu_type1_pin_pages(void *iommu_data,
540 unsigned long *user_pfn,
541 int npage, int prot,
542 unsigned long *phys_pfn)
543{
544 struct vfio_iommu *iommu = iommu_data;
545 int i, j, ret;
546 unsigned long remote_vaddr;
547 struct vfio_dma *dma;
548 bool do_accounting;
549
550 if (!iommu || !user_pfn || !phys_pfn)
551 return -EINVAL;
552
553 /* Supported for v2 version only */
554 if (!iommu->v2)
555 return -EACCES;
556
557 mutex_lock(&iommu->lock);
558
c086de81
KW
559 /* Fail if notifier list is empty */
560 if ((!iommu->external_domain) || (!iommu->notifier.head)) {
a54eb550
KW
561 ret = -EINVAL;
562 goto pin_done;
563 }
564
565 /*
566 * If iommu capable domain exist in the container then all pages are
567 * already pinned and accounted. Accouting should be done if there is no
568 * iommu capable domain in the container.
569 */
570 do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
571
572 for (i = 0; i < npage; i++) {
573 dma_addr_t iova;
574 struct vfio_pfn *vpfn;
575
576 iova = user_pfn[i] << PAGE_SHIFT;
2b8bb1d7 577 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
a54eb550
KW
578 if (!dma) {
579 ret = -EINVAL;
580 goto pin_unwind;
581 }
582
583 if ((dma->prot & prot) != prot) {
584 ret = -EPERM;
585 goto pin_unwind;
586 }
587
588 vpfn = vfio_iova_get_vfio_pfn(dma, iova);
589 if (vpfn) {
590 phys_pfn[i] = vpfn->pfn;
591 continue;
592 }
593
594 remote_vaddr = dma->vaddr + iova - dma->iova;
595 ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
596 do_accounting);
80dbe1fb 597 if (ret)
a54eb550 598 goto pin_unwind;
a54eb550
KW
599
600 ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
601 if (ret) {
602 vfio_unpin_page_external(dma, iova, do_accounting);
603 goto pin_unwind;
604 }
605 }
606
607 ret = i;
608 goto pin_done;
609
610pin_unwind:
611 phys_pfn[i] = 0;
612 for (j = 0; j < i; j++) {
613 dma_addr_t iova;
614
615 iova = user_pfn[j] << PAGE_SHIFT;
2b8bb1d7 616 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
a54eb550
KW
617 vfio_unpin_page_external(dma, iova, do_accounting);
618 phys_pfn[j] = 0;
619 }
620pin_done:
621 mutex_unlock(&iommu->lock);
622 return ret;
623}
624
625static int vfio_iommu_type1_unpin_pages(void *iommu_data,
626 unsigned long *user_pfn,
627 int npage)
628{
629 struct vfio_iommu *iommu = iommu_data;
630 bool do_accounting;
631 int i;
632
633 if (!iommu || !user_pfn)
634 return -EINVAL;
635
636 /* Supported for v2 version only */
637 if (!iommu->v2)
638 return -EACCES;
639
640 mutex_lock(&iommu->lock);
641
642 if (!iommu->external_domain) {
643 mutex_unlock(&iommu->lock);
644 return -EINVAL;
645 }
646
647 do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
648 for (i = 0; i < npage; i++) {
649 struct vfio_dma *dma;
650 dma_addr_t iova;
651
652 iova = user_pfn[i] << PAGE_SHIFT;
2b8bb1d7 653 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
a54eb550
KW
654 if (!dma)
655 goto unpin_exit;
656 vfio_unpin_page_external(dma, iova, do_accounting);
657 }
658
659unpin_exit:
660 mutex_unlock(&iommu->lock);
661 return i > npage ? npage : (i > 0 ? i : -EINVAL);
662}
663
664static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
665 bool do_accounting)
166fd7d9 666{
1ef3e2bc
AW
667 dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
668 struct vfio_domain *domain, *d;
166fd7d9
AW
669 long unlocked = 0;
670
1ef3e2bc 671 if (!dma->size)
a54eb550
KW
672 return 0;
673
674 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
675 return 0;
676
1ef3e2bc
AW
677 /*
678 * We use the IOMMU to track the physical addresses, otherwise we'd
679 * need a much more complicated tracking system. Unfortunately that
680 * means we need to use one of the iommu domains to figure out the
681 * pfns to unpin. The rest need to be unmapped in advance so we have
682 * no iommu translations remaining when the pages are unpinned.
683 */
684 domain = d = list_first_entry(&iommu->domain_list,
685 struct vfio_domain, next);
686
c5e66887 687 list_for_each_entry_continue(d, &iommu->domain_list, next) {
1ef3e2bc 688 iommu_unmap(d->domain, dma->iova, dma->size);
c5e66887
AW
689 cond_resched();
690 }
1ef3e2bc 691
166fd7d9 692 while (iova < end) {
6fe1010d
AW
693 size_t unmapped, len;
694 phys_addr_t phys, next;
166fd7d9 695
1ef3e2bc 696 phys = iommu_iova_to_phys(domain->domain, iova);
166fd7d9
AW
697 if (WARN_ON(!phys)) {
698 iova += PAGE_SIZE;
699 continue;
73fa0d10 700 }
166fd7d9 701
6fe1010d
AW
702 /*
703 * To optimize for fewer iommu_unmap() calls, each of which
704 * may require hardware cache flushing, try to find the
705 * largest contiguous physical memory chunk to unmap.
706 */
707 for (len = PAGE_SIZE;
708 !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
709 next = iommu_iova_to_phys(domain->domain, iova + len);
710 if (next != phys + len)
711 break;
712 }
713
714 unmapped = iommu_unmap(domain->domain, iova, len);
1ef3e2bc 715 if (WARN_ON(!unmapped))
166fd7d9
AW
716 break;
717
a54eb550
KW
718 unlocked += vfio_unpin_pages_remote(dma, iova,
719 phys >> PAGE_SHIFT,
2169037d 720 unmapped >> PAGE_SHIFT,
a54eb550 721 false);
166fd7d9 722 iova += unmapped;
c5e66887
AW
723
724 cond_resched();
73fa0d10 725 }
166fd7d9 726
a54eb550
KW
727 dma->iommu_mapped = false;
728 if (do_accounting) {
ddfd451d 729 vfio_lock_acct(dma, -unlocked, true);
a54eb550
KW
730 return 0;
731 }
732 return unlocked;
73fa0d10
AW
733}
734
1ef3e2bc 735static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
73fa0d10 736{
a54eb550 737 vfio_unmap_unpin(iommu, dma, true);
1ef3e2bc 738 vfio_unlink_dma(iommu, dma);
8f0d5bb9 739 put_task_struct(dma->task);
1ef3e2bc 740 kfree(dma);
a5354ea8 741 iommu->dma_avail++;
1ef3e2bc 742}
73fa0d10 743
1ef3e2bc
AW
744static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
745{
746 struct vfio_domain *domain;
4644321f 747 unsigned long bitmap = ULONG_MAX;
166fd7d9 748
1ef3e2bc
AW
749 mutex_lock(&iommu->lock);
750 list_for_each_entry(domain, &iommu->domain_list, next)
d16e0faa 751 bitmap &= domain->domain->pgsize_bitmap;
1ef3e2bc 752 mutex_unlock(&iommu->lock);
73fa0d10 753
4644321f
EA
754 /*
755 * In case the IOMMU supports page sizes smaller than PAGE_SIZE
756 * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes.
757 * That way the user will be able to map/unmap buffers whose size/
758 * start address is aligned with PAGE_SIZE. Pinning code uses that
759 * granularity while iommu driver can use the sub-PAGE_SIZE size
760 * to map the buffer.
761 */
762 if (bitmap & ~PAGE_MASK) {
763 bitmap &= PAGE_MASK;
764 bitmap |= PAGE_SIZE;
765 }
766
1ef3e2bc 767 return bitmap;
73fa0d10
AW
768}
769
770static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
771 struct vfio_iommu_type1_dma_unmap *unmap)
772{
73fa0d10 773 uint64_t mask;
c086de81 774 struct vfio_dma *dma, *dma_last = NULL;
1ef3e2bc 775 size_t unmapped = 0;
c086de81 776 int ret = 0, retries = 0;
73fa0d10 777
1ef3e2bc 778 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
73fa0d10
AW
779
780 if (unmap->iova & mask)
781 return -EINVAL;
f5bfdbf2 782 if (!unmap->size || unmap->size & mask)
73fa0d10 783 return -EINVAL;
71a7d3d7
DC
784 if (unmap->iova + unmap->size < unmap->iova ||
785 unmap->size > SIZE_MAX)
786 return -EINVAL;
73fa0d10 787
73fa0d10 788 WARN_ON(mask & PAGE_MASK);
c086de81 789again:
73fa0d10
AW
790 mutex_lock(&iommu->lock);
791
1ef3e2bc
AW
792 /*
793 * vfio-iommu-type1 (v1) - User mappings were coalesced together to
794 * avoid tracking individual mappings. This means that the granularity
795 * of the original mapping was lost and the user was allowed to attempt
796 * to unmap any range. Depending on the contiguousness of physical
797 * memory and page sizes supported by the IOMMU, arbitrary unmaps may
798 * or may not have worked. We only guaranteed unmap granularity
799 * matching the original mapping; even though it was untracked here,
800 * the original mappings are reflected in IOMMU mappings. This
801 * resulted in a couple unusual behaviors. First, if a range is not
802 * able to be unmapped, ex. a set of 4k pages that was mapped as a
803 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
804 * a zero sized unmap. Also, if an unmap request overlaps the first
805 * address of a hugepage, the IOMMU will unmap the entire hugepage.
806 * This also returns success and the returned unmap size reflects the
807 * actual size unmapped.
808 *
809 * We attempt to maintain compatibility with this "v1" interface, but
810 * we take control out of the hands of the IOMMU. Therefore, an unmap
811 * request offset from the beginning of the original mapping will
812 * return success with zero sized unmap. And an unmap request covering
813 * the first iova of mapping will unmap the entire range.
814 *
815 * The v2 version of this interface intends to be more deterministic.
816 * Unmap requests must fully cover previous mappings. Multiple
817 * mappings may still be unmaped by specifying large ranges, but there
818 * must not be any previous mappings bisected by the range. An error
819 * will be returned if these conditions are not met. The v2 interface
820 * will only return success and a size of zero if there were no
821 * mappings within the range.
822 */
823 if (iommu->v2) {
7c03f428 824 dma = vfio_find_dma(iommu, unmap->iova, 1);
1ef3e2bc
AW
825 if (dma && dma->iova != unmap->iova) {
826 ret = -EINVAL;
827 goto unlock;
828 }
829 dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
830 if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
831 ret = -EINVAL;
832 goto unlock;
833 }
834 }
835
166fd7d9 836 while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
1ef3e2bc 837 if (!iommu->v2 && unmap->iova > dma->iova)
166fd7d9 838 break;
8f0d5bb9
KW
839 /*
840 * Task with same address space who mapped this iova range is
841 * allowed to unmap the iova range.
842 */
843 if (dma->task->mm != current->mm)
844 break;
c086de81
KW
845
846 if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
847 struct vfio_iommu_type1_dma_unmap nb_unmap;
848
849 if (dma_last == dma) {
850 BUG_ON(++retries > 10);
851 } else {
852 dma_last = dma;
853 retries = 0;
854 }
855
856 nb_unmap.iova = dma->iova;
857 nb_unmap.size = dma->size;
858
859 /*
860 * Notify anyone (mdev vendor drivers) to invalidate and
861 * unmap iovas within the range we're about to unmap.
862 * Vendor drivers MUST unpin pages in response to an
863 * invalidation.
864 */
865 mutex_unlock(&iommu->lock);
866 blocking_notifier_call_chain(&iommu->notifier,
867 VFIO_IOMMU_NOTIFY_DMA_UNMAP,
868 &nb_unmap);
869 goto again;
870 }
1ef3e2bc
AW
871 unmapped += dma->size;
872 vfio_remove_dma(iommu, dma);
166fd7d9 873 }
cd9b2268 874
1ef3e2bc 875unlock:
73fa0d10 876 mutex_unlock(&iommu->lock);
166fd7d9 877
1ef3e2bc 878 /* Report how much was unmapped */
166fd7d9
AW
879 unmap->size = unmapped;
880
881 return ret;
882}
883
884/*
885 * Turns out AMD IOMMU has a page table bug where it won't map large pages
886 * to a region that previously mapped smaller pages. This should be fixed
887 * soon, so this is just a temporary workaround to break mappings down into
888 * PAGE_SIZE. Better to map smaller pages than nothing.
889 */
1ef3e2bc 890static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
166fd7d9
AW
891 unsigned long pfn, long npage, int prot)
892{
893 long i;
089f1c6b 894 int ret = 0;
166fd7d9
AW
895
896 for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
1ef3e2bc 897 ret = iommu_map(domain->domain, iova,
166fd7d9 898 (phys_addr_t)pfn << PAGE_SHIFT,
1ef3e2bc 899 PAGE_SIZE, prot | domain->prot);
166fd7d9
AW
900 if (ret)
901 break;
902 }
903
904 for (; i < npage && i > 0; i--, iova -= PAGE_SIZE)
1ef3e2bc
AW
905 iommu_unmap(domain->domain, iova, PAGE_SIZE);
906
907 return ret;
908}
909
910static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
911 unsigned long pfn, long npage, int prot)
912{
913 struct vfio_domain *d;
914 int ret;
915
916 list_for_each_entry(d, &iommu->domain_list, next) {
917 ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
918 npage << PAGE_SHIFT, prot | d->prot);
919 if (ret) {
920 if (ret != -EBUSY ||
921 map_try_harder(d, iova, pfn, npage, prot))
922 goto unwind;
923 }
c5e66887
AW
924
925 cond_resched();
1ef3e2bc
AW
926 }
927
928 return 0;
929
930unwind:
931 list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
932 iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
166fd7d9 933
cd9b2268 934 return ret;
73fa0d10
AW
935}
936
8f0d5bb9
KW
937static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
938 size_t map_size)
939{
940 dma_addr_t iova = dma->iova;
941 unsigned long vaddr = dma->vaddr;
942 size_t size = map_size;
943 long npage;
7cb671e7 944 unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8f0d5bb9
KW
945 int ret = 0;
946
947 while (size) {
948 /* Pin a contiguous chunk of memory */
949 npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
ddfd451d 950 size >> PAGE_SHIFT, &pfn, limit);
8f0d5bb9
KW
951 if (npage <= 0) {
952 WARN_ON(!npage);
953 ret = (int)npage;
954 break;
955 }
956
957 /* Map it! */
958 ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
959 dma->prot);
960 if (ret) {
a54eb550
KW
961 vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
962 npage, true);
8f0d5bb9
KW
963 break;
964 }
965
966 size -= npage << PAGE_SHIFT;
967 dma->size += npage << PAGE_SHIFT;
968 }
969
a54eb550
KW
970 dma->iommu_mapped = true;
971
8f0d5bb9
KW
972 if (ret)
973 vfio_remove_dma(iommu, dma);
974
975 return ret;
976}
977
73fa0d10
AW
978static int vfio_dma_do_map(struct vfio_iommu *iommu,
979 struct vfio_iommu_type1_dma_map *map)
980{
c8dbca16 981 dma_addr_t iova = map->iova;
166fd7d9 982 unsigned long vaddr = map->vaddr;
73fa0d10
AW
983 size_t size = map->size;
984 int ret = 0, prot = 0;
985 uint64_t mask;
1ef3e2bc 986 struct vfio_dma *dma;
166fd7d9 987
c8dbca16
AW
988 /* Verify that none of our __u64 fields overflow */
989 if (map->size != size || map->vaddr != vaddr || map->iova != iova)
990 return -EINVAL;
73fa0d10 991
1ef3e2bc 992 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
73fa0d10 993
c8dbca16
AW
994 WARN_ON(mask & PAGE_MASK);
995
73fa0d10
AW
996 /* READ/WRITE from device perspective */
997 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
998 prot |= IOMMU_WRITE;
999 if (map->flags & VFIO_DMA_MAP_FLAG_READ)
1000 prot |= IOMMU_READ;
1001
c8dbca16 1002 if (!prot || !size || (size | iova | vaddr) & mask)
73fa0d10
AW
1003 return -EINVAL;
1004
c8dbca16
AW
1005 /* Don't allow IOVA or virtual address wrap */
1006 if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
73fa0d10
AW
1007 return -EINVAL;
1008
1009 mutex_lock(&iommu->lock);
1010
c8dbca16 1011 if (vfio_find_dma(iommu, iova, size)) {
8f0d5bb9
KW
1012 ret = -EEXIST;
1013 goto out_unlock;
73fa0d10
AW
1014 }
1015
a5354ea8
AW
1016 if (!iommu->dma_avail) {
1017 ret = -ENOSPC;
1018 goto out_unlock;
1019 }
1020
1ef3e2bc
AW
1021 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
1022 if (!dma) {
8f0d5bb9
KW
1023 ret = -ENOMEM;
1024 goto out_unlock;
1ef3e2bc
AW
1025 }
1026
a5354ea8 1027 iommu->dma_avail--;
c8dbca16
AW
1028 dma->iova = iova;
1029 dma->vaddr = vaddr;
1ef3e2bc 1030 dma->prot = prot;
ddfd451d
AW
1031
1032 /*
1033 * We need to be able to both add to a task's locked memory and test
1034 * against the locked memory limit and we need to be able to do both
1035 * outside of this call path as pinning can be asynchronous via the
1036 * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a
1037 * task_struct and VM locked pages requires an mm_struct, however
1038 * holding an indefinite mm reference is not recommended, therefore we
1039 * only hold a reference to a task. We could hold a reference to
1040 * current, however QEMU uses this call path through vCPU threads,
1041 * which can be killed resulting in a NULL mm and failure in the unmap
1042 * path when called via a different thread. Avoid this problem by
1043 * using the group_leader as threads within the same group require
1044 * both CLONE_THREAD and CLONE_VM and will therefore use the same
1045 * mm_struct.
1046 *
1047 * Previously we also used the task for testing CAP_IPC_LOCK at the
1048 * time of pinning and accounting, however has_capability() makes use
1049 * of real_cred, a copy-on-write field, so we can't guarantee that it
1050 * matches group_leader, or in fact that it might not change by the
1051 * time it's evaluated. If a process were to call MAP_DMA with
1052 * CAP_IPC_LOCK but later drop it, it doesn't make sense that they
1053 * possibly see different results for an iommu_mapped vfio_dma vs
1054 * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the
1055 * time of calling MAP_DMA.
1056 */
1057 get_task_struct(current->group_leader);
1058 dma->task = current->group_leader;
1059 dma->lock_cap = capable(CAP_IPC_LOCK);
1060
a54eb550 1061 dma->pfn_list = RB_ROOT;
166fd7d9 1062
1ef3e2bc
AW
1063 /* Insert zero-sized and grow as we map chunks of it */
1064 vfio_link_dma(iommu, dma);
166fd7d9 1065
a54eb550
KW
1066 /* Don't pin and map if container doesn't contain IOMMU capable domain*/
1067 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
1068 dma->size = size;
1069 else
1070 ret = vfio_pin_map_dma(iommu, dma, size);
1071
8f0d5bb9 1072out_unlock:
1ef3e2bc
AW
1073 mutex_unlock(&iommu->lock);
1074 return ret;
1075}
1076
1077static int vfio_bus_type(struct device *dev, void *data)
1078{
1079 struct bus_type **bus = data;
1080
1081 if (*bus && *bus != dev->bus)
1082 return -EINVAL;
1083
1084 *bus = dev->bus;
1085
1086 return 0;
1087}
1088
1089static int vfio_iommu_replay(struct vfio_iommu *iommu,
1090 struct vfio_domain *domain)
1091{
1092 struct vfio_domain *d;
1093 struct rb_node *n;
7cb671e7 1094 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1ef3e2bc
AW
1095 int ret;
1096
1097 /* Arbitrarily pick the first domain in the list for lookups */
1098 d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
1099 n = rb_first(&iommu->dma_list);
1100
1ef3e2bc
AW
1101 for (; n; n = rb_next(n)) {
1102 struct vfio_dma *dma;
1103 dma_addr_t iova;
1104
1105 dma = rb_entry(n, struct vfio_dma, node);
1106 iova = dma->iova;
1107
1108 while (iova < dma->iova + dma->size) {
a54eb550 1109 phys_addr_t phys;
1ef3e2bc 1110 size_t size;
73fa0d10 1111
a54eb550
KW
1112 if (dma->iommu_mapped) {
1113 phys_addr_t p;
1114 dma_addr_t i;
1115
1116 phys = iommu_iova_to_phys(d->domain, iova);
1117
1118 if (WARN_ON(!phys)) {
1119 iova += PAGE_SIZE;
1120 continue;
1121 }
1122
1123 size = PAGE_SIZE;
1124 p = phys + size;
1125 i = iova + size;
1126 while (i < dma->iova + dma->size &&
1127 p == iommu_iova_to_phys(d->domain, i)) {
1128 size += PAGE_SIZE;
1129 p += PAGE_SIZE;
1130 i += PAGE_SIZE;
1131 }
1132 } else {
1133 unsigned long pfn;
1134 unsigned long vaddr = dma->vaddr +
1135 (iova - dma->iova);
1136 size_t n = dma->iova + dma->size - iova;
1137 long npage;
1138
1139 npage = vfio_pin_pages_remote(dma, vaddr,
1140 n >> PAGE_SHIFT,
ddfd451d 1141 &pfn, limit);
a54eb550
KW
1142 if (npage <= 0) {
1143 WARN_ON(!npage);
1144 ret = (int)npage;
1145 return ret;
1146 }
1147
1148 phys = pfn << PAGE_SHIFT;
1149 size = npage << PAGE_SHIFT;
166fd7d9
AW
1150 }
1151
1ef3e2bc
AW
1152 ret = iommu_map(domain->domain, iova, phys,
1153 size, dma->prot | domain->prot);
1154 if (ret)
1155 return ret;
d93b3ac0 1156
1ef3e2bc
AW
1157 iova += size;
1158 }
a54eb550 1159 dma->iommu_mapped = true;
166fd7d9 1160 }
1ef3e2bc 1161 return 0;
73fa0d10
AW
1162}
1163
6fe1010d
AW
1164/*
1165 * We change our unmap behavior slightly depending on whether the IOMMU
1166 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
1167 * for practically any contiguous power-of-two mapping we give it. This means
1168 * we don't need to look for contiguous chunks ourselves to make unmapping
1169 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
1170 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
1171 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
1172 * hugetlbfs is in use.
1173 */
1174static void vfio_test_domain_fgsp(struct vfio_domain *domain)
1175{
1176 struct page *pages;
1177 int ret, order = get_order(PAGE_SIZE * 2);
1178
1179 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1180 if (!pages)
1181 return;
1182
1183 ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
1184 IOMMU_READ | IOMMU_WRITE | domain->prot);
1185 if (!ret) {
1186 size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
1187
1188 if (unmapped == PAGE_SIZE)
1189 iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
1190 else
1191 domain->fgsp = true;
1192 }
1193
1194 __free_pages(pages, order);
1195}
1196
7896c998
KW
1197static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
1198 struct iommu_group *iommu_group)
1199{
1200 struct vfio_group *g;
1201
1202 list_for_each_entry(g, &domain->group_list, next) {
1203 if (g->iommu_group == iommu_group)
1204 return g;
1205 }
1206
1207 return NULL;
1208}
1209
9d3a4de4 1210static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
5d704992
EA
1211{
1212 struct list_head group_resv_regions;
1213 struct iommu_resv_region *region, *next;
1214 bool ret = false;
1215
1216 INIT_LIST_HEAD(&group_resv_regions);
1217 iommu_get_group_resv_regions(group, &group_resv_regions);
1218 list_for_each_entry(region, &group_resv_regions, list) {
f203f7f1
RM
1219 /*
1220 * The presence of any 'real' MSI regions should take
1221 * precedence over the software-managed one if the
1222 * IOMMU driver happens to advertise both types.
1223 */
1224 if (region->type == IOMMU_RESV_MSI) {
1225 ret = false;
1226 break;
1227 }
1228
9d3a4de4 1229 if (region->type == IOMMU_RESV_SW_MSI) {
5d704992
EA
1230 *base = region->start;
1231 ret = true;
5d704992
EA
1232 }
1233 }
5d704992
EA
1234 list_for_each_entry_safe(region, next, &group_resv_regions, list)
1235 kfree(region);
1236 return ret;
1237}
1238
73fa0d10
AW
1239static int vfio_iommu_type1_attach_group(void *iommu_data,
1240 struct iommu_group *iommu_group)
1241{
1242 struct vfio_iommu *iommu = iommu_data;
7896c998 1243 struct vfio_group *group;
1ef3e2bc 1244 struct vfio_domain *domain, *d;
a54eb550 1245 struct bus_type *bus = NULL, *mdev_bus;
73fa0d10 1246 int ret;
9d72f87b 1247 bool resv_msi, msi_remap;
5d704992 1248 phys_addr_t resv_msi_base;
73fa0d10 1249
73fa0d10
AW
1250 mutex_lock(&iommu->lock);
1251
1ef3e2bc 1252 list_for_each_entry(d, &iommu->domain_list, next) {
7896c998 1253 if (find_iommu_group(d, iommu_group)) {
73fa0d10 1254 mutex_unlock(&iommu->lock);
73fa0d10
AW
1255 return -EINVAL;
1256 }
1257 }
1258
a54eb550
KW
1259 if (iommu->external_domain) {
1260 if (find_iommu_group(iommu->external_domain, iommu_group)) {
1261 mutex_unlock(&iommu->lock);
1262 return -EINVAL;
1263 }
1264 }
1265
1ef3e2bc
AW
1266 group = kzalloc(sizeof(*group), GFP_KERNEL);
1267 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
1268 if (!group || !domain) {
1269 ret = -ENOMEM;
1270 goto out_free;
1271 }
1272
1273 group->iommu_group = iommu_group;
1274
1275 /* Determine bus_type in order to allocate a domain */
1276 ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
1277 if (ret)
1278 goto out_free;
1279
a54eb550
KW
1280 mdev_bus = symbol_get(mdev_bus_type);
1281
1282 if (mdev_bus) {
1283 if ((bus == mdev_bus) && !iommu_present(bus)) {
1284 symbol_put(mdev_bus_type);
1285 if (!iommu->external_domain) {
1286 INIT_LIST_HEAD(&domain->group_list);
1287 iommu->external_domain = domain;
1288 } else
1289 kfree(domain);
1290
1291 list_add(&group->next,
1292 &iommu->external_domain->group_list);
1293 mutex_unlock(&iommu->lock);
1294 return 0;
1295 }
1296 symbol_put(mdev_bus_type);
1297 }
1298
1ef3e2bc
AW
1299 domain->domain = iommu_domain_alloc(bus);
1300 if (!domain->domain) {
1301 ret = -EIO;
1302 goto out_free;
1303 }
1304
f5c9eceb
WD
1305 if (iommu->nesting) {
1306 int attr = 1;
1307
1308 ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING,
1309 &attr);
1310 if (ret)
1311 goto out_domain;
1312 }
1313
1ef3e2bc
AW
1314 ret = iommu_attach_group(domain->domain, iommu_group);
1315 if (ret)
1316 goto out_domain;
1317
9d3a4de4 1318 resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
5d704992 1319
1ef3e2bc
AW
1320 INIT_LIST_HEAD(&domain->group_list);
1321 list_add(&group->next, &domain->group_list);
1322
db406cc0
RM
1323 msi_remap = irq_domain_check_msi_remap() ||
1324 iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
9d72f87b
EA
1325
1326 if (!allow_unsafe_interrupts && !msi_remap) {
1ef3e2bc
AW
1327 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
1328 __func__);
1329 ret = -EPERM;
1330 goto out_detach;
1331 }
1332
eb165f05 1333 if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
1ef3e2bc
AW
1334 domain->prot |= IOMMU_CACHE;
1335
73fa0d10 1336 /*
1ef3e2bc
AW
1337 * Try to match an existing compatible domain. We don't want to
1338 * preclude an IOMMU driver supporting multiple bus_types and being
1339 * able to include different bus_types in the same IOMMU domain, so
1340 * we test whether the domains use the same iommu_ops rather than
1341 * testing if they're on the same bus_type.
73fa0d10 1342 */
1ef3e2bc
AW
1343 list_for_each_entry(d, &iommu->domain_list, next) {
1344 if (d->domain->ops == domain->domain->ops &&
1345 d->prot == domain->prot) {
1346 iommu_detach_group(domain->domain, iommu_group);
1347 if (!iommu_attach_group(d->domain, iommu_group)) {
1348 list_add(&group->next, &d->group_list);
1349 iommu_domain_free(domain->domain);
1350 kfree(domain);
1351 mutex_unlock(&iommu->lock);
1352 return 0;
1353 }
1354
1355 ret = iommu_attach_group(domain->domain, iommu_group);
1356 if (ret)
1357 goto out_domain;
1358 }
73fa0d10
AW
1359 }
1360
6fe1010d
AW
1361 vfio_test_domain_fgsp(domain);
1362
1ef3e2bc
AW
1363 /* replay mappings on new domains */
1364 ret = vfio_iommu_replay(iommu, domain);
1365 if (ret)
1366 goto out_detach;
1367
2c9f1af5
WY
1368 if (resv_msi) {
1369 ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
1370 if (ret)
1371 goto out_detach;
1372 }
5d704992 1373
1ef3e2bc 1374 list_add(&domain->next, &iommu->domain_list);
73fa0d10
AW
1375
1376 mutex_unlock(&iommu->lock);
1377
1378 return 0;
1ef3e2bc
AW
1379
1380out_detach:
1381 iommu_detach_group(domain->domain, iommu_group);
1382out_domain:
1383 iommu_domain_free(domain->domain);
1384out_free:
1385 kfree(domain);
1386 kfree(group);
1387 mutex_unlock(&iommu->lock);
1388 return ret;
1389}
1390
1391static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
1392{
1393 struct rb_node *node;
1394
1395 while ((node = rb_first(&iommu->dma_list)))
1396 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
73fa0d10
AW
1397}
1398
a54eb550
KW
1399static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
1400{
1401 struct rb_node *n, *p;
1402
1403 n = rb_first(&iommu->dma_list);
1404 for (; n; n = rb_next(n)) {
1405 struct vfio_dma *dma;
1406 long locked = 0, unlocked = 0;
1407
1408 dma = rb_entry(n, struct vfio_dma, node);
1409 unlocked += vfio_unmap_unpin(iommu, dma, false);
1410 p = rb_first(&dma->pfn_list);
1411 for (; p; p = rb_next(p)) {
1412 struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn,
1413 node);
1414
1415 if (!is_invalid_reserved_pfn(vpfn->pfn))
1416 locked++;
1417 }
ddfd451d 1418 vfio_lock_acct(dma, locked - unlocked, true);
a54eb550
KW
1419 }
1420}
1421
1422static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
1423{
1424 struct rb_node *n;
1425
1426 n = rb_first(&iommu->dma_list);
1427 for (; n; n = rb_next(n)) {
1428 struct vfio_dma *dma;
1429
1430 dma = rb_entry(n, struct vfio_dma, node);
1431
1432 if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)))
1433 break;
1434 }
3cedd7d7
KW
1435 /* mdev vendor driver must unregister notifier */
1436 WARN_ON(iommu->notifier.head);
a54eb550
KW
1437}
1438
73fa0d10
AW
1439static void vfio_iommu_type1_detach_group(void *iommu_data,
1440 struct iommu_group *iommu_group)
1441{
1442 struct vfio_iommu *iommu = iommu_data;
1ef3e2bc 1443 struct vfio_domain *domain;
73fa0d10
AW
1444 struct vfio_group *group;
1445
1446 mutex_lock(&iommu->lock);
1447
a54eb550
KW
1448 if (iommu->external_domain) {
1449 group = find_iommu_group(iommu->external_domain, iommu_group);
1450 if (group) {
1451 list_del(&group->next);
1452 kfree(group);
1453
1454 if (list_empty(&iommu->external_domain->group_list)) {
1455 vfio_sanity_check_pfn_list(iommu);
1456
1457 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
1458 vfio_iommu_unmap_unpin_all(iommu);
1459
1460 kfree(iommu->external_domain);
1461 iommu->external_domain = NULL;
1462 }
1463 goto detach_group_done;
1464 }
1465 }
1466
1ef3e2bc 1467 list_for_each_entry(domain, &iommu->domain_list, next) {
7896c998
KW
1468 group = find_iommu_group(domain, iommu_group);
1469 if (!group)
1470 continue;
1ef3e2bc 1471
7896c998
KW
1472 iommu_detach_group(domain->domain, iommu_group);
1473 list_del(&group->next);
1474 kfree(group);
1475 /*
a54eb550
KW
1476 * Group ownership provides privilege, if the group list is
1477 * empty, the domain goes away. If it's the last domain with
1478 * iommu and external domain doesn't exist, then all the
1479 * mappings go away too. If it's the last domain with iommu and
1480 * external domain exist, update accounting
7896c998
KW
1481 */
1482 if (list_empty(&domain->group_list)) {
a54eb550
KW
1483 if (list_is_singular(&iommu->domain_list)) {
1484 if (!iommu->external_domain)
1485 vfio_iommu_unmap_unpin_all(iommu);
1486 else
1487 vfio_iommu_unmap_unpin_reaccount(iommu);
1488 }
7896c998
KW
1489 iommu_domain_free(domain->domain);
1490 list_del(&domain->next);
1491 kfree(domain);
73fa0d10 1492 }
a54eb550 1493 break;
73fa0d10
AW
1494 }
1495
a54eb550 1496detach_group_done:
73fa0d10
AW
1497 mutex_unlock(&iommu->lock);
1498}
1499
1500static void *vfio_iommu_type1_open(unsigned long arg)
1501{
1502 struct vfio_iommu *iommu;
1503
73fa0d10
AW
1504 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1505 if (!iommu)
1506 return ERR_PTR(-ENOMEM);
1507
f5c9eceb
WD
1508 switch (arg) {
1509 case VFIO_TYPE1_IOMMU:
1510 break;
1511 case VFIO_TYPE1_NESTING_IOMMU:
1512 iommu->nesting = true;
1513 case VFIO_TYPE1v2_IOMMU:
1514 iommu->v2 = true;
1515 break;
1516 default:
1517 kfree(iommu);
1518 return ERR_PTR(-EINVAL);
1519 }
1520
1ef3e2bc 1521 INIT_LIST_HEAD(&iommu->domain_list);
cd9b2268 1522 iommu->dma_list = RB_ROOT;
a5354ea8 1523 iommu->dma_avail = dma_entry_limit;
73fa0d10 1524 mutex_init(&iommu->lock);
c086de81 1525 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
73fa0d10
AW
1526
1527 return iommu;
1528}
1529
a54eb550
KW
1530static void vfio_release_domain(struct vfio_domain *domain, bool external)
1531{
1532 struct vfio_group *group, *group_tmp;
1533
1534 list_for_each_entry_safe(group, group_tmp,
1535 &domain->group_list, next) {
1536 if (!external)
1537 iommu_detach_group(domain->domain, group->iommu_group);
1538 list_del(&group->next);
1539 kfree(group);
1540 }
1541
1542 if (!external)
1543 iommu_domain_free(domain->domain);
1544}
1545
73fa0d10
AW
1546static void vfio_iommu_type1_release(void *iommu_data)
1547{
1548 struct vfio_iommu *iommu = iommu_data;
1ef3e2bc 1549 struct vfio_domain *domain, *domain_tmp;
a54eb550
KW
1550
1551 if (iommu->external_domain) {
1552 vfio_release_domain(iommu->external_domain, true);
1553 vfio_sanity_check_pfn_list(iommu);
1554 kfree(iommu->external_domain);
1555 }
73fa0d10 1556
1ef3e2bc 1557 vfio_iommu_unmap_unpin_all(iommu);
73fa0d10 1558
1ef3e2bc
AW
1559 list_for_each_entry_safe(domain, domain_tmp,
1560 &iommu->domain_list, next) {
a54eb550 1561 vfio_release_domain(domain, false);
1ef3e2bc
AW
1562 list_del(&domain->next);
1563 kfree(domain);
73fa0d10 1564 }
73fa0d10
AW
1565 kfree(iommu);
1566}
1567
aa429318
AW
1568static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
1569{
1570 struct vfio_domain *domain;
1571 int ret = 1;
1572
1573 mutex_lock(&iommu->lock);
1574 list_for_each_entry(domain, &iommu->domain_list, next) {
1575 if (!(domain->prot & IOMMU_CACHE)) {
1576 ret = 0;
f5bfdbf2 1577 break;
aa429318 1578 }
73fa0d10 1579 }
aa429318 1580 mutex_unlock(&iommu->lock);
73fa0d10 1581
aa429318 1582 return ret;
73fa0d10
AW
1583}
1584
1585static long vfio_iommu_type1_ioctl(void *iommu_data,
1586 unsigned int cmd, unsigned long arg)
1587{
1588 struct vfio_iommu *iommu = iommu_data;
1589 unsigned long minsz;
1590
1591 if (cmd == VFIO_CHECK_EXTENSION) {
1592 switch (arg) {
1593 case VFIO_TYPE1_IOMMU:
1ef3e2bc 1594 case VFIO_TYPE1v2_IOMMU:
f5c9eceb 1595 case VFIO_TYPE1_NESTING_IOMMU:
73fa0d10 1596 return 1;
aa429318
AW
1597 case VFIO_DMA_CC_IOMMU:
1598 if (!iommu)
1599 return 0;
1600 return vfio_domains_have_iommu_cache(iommu);
73fa0d10
AW
1601 default:
1602 return 0;
1603 }
1604 } else if (cmd == VFIO_IOMMU_GET_INFO) {
1605 struct vfio_iommu_type1_info info;
1606
1607 minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
1608
1609 if (copy_from_user(&info, (void __user *)arg, minsz))
1610 return -EFAULT;
1611
1612 if (info.argsz < minsz)
1613 return -EINVAL;
1614
d4f50ee2 1615 info.flags = VFIO_IOMMU_INFO_PGSIZES;
73fa0d10 1616
1ef3e2bc 1617 info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
73fa0d10 1618
8160c4e4
MT
1619 return copy_to_user((void __user *)arg, &info, minsz) ?
1620 -EFAULT : 0;
73fa0d10
AW
1621
1622 } else if (cmd == VFIO_IOMMU_MAP_DMA) {
1623 struct vfio_iommu_type1_dma_map map;
1624 uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
1625 VFIO_DMA_MAP_FLAG_WRITE;
1626
1627 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
1628
1629 if (copy_from_user(&map, (void __user *)arg, minsz))
1630 return -EFAULT;
1631
1632 if (map.argsz < minsz || map.flags & ~mask)
1633 return -EINVAL;
1634
1635 return vfio_dma_do_map(iommu, &map);
1636
1637 } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
1638 struct vfio_iommu_type1_dma_unmap unmap;
166fd7d9 1639 long ret;
73fa0d10
AW
1640
1641 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
1642
1643 if (copy_from_user(&unmap, (void __user *)arg, minsz))
1644 return -EFAULT;
1645
1646 if (unmap.argsz < minsz || unmap.flags)
1647 return -EINVAL;
1648
166fd7d9
AW
1649 ret = vfio_dma_do_unmap(iommu, &unmap);
1650 if (ret)
1651 return ret;
1652
8160c4e4
MT
1653 return copy_to_user((void __user *)arg, &unmap, minsz) ?
1654 -EFAULT : 0;
73fa0d10
AW
1655 }
1656
1657 return -ENOTTY;
1658}
1659
c086de81 1660static int vfio_iommu_type1_register_notifier(void *iommu_data,
22195cbd 1661 unsigned long *events,
c086de81
KW
1662 struct notifier_block *nb)
1663{
1664 struct vfio_iommu *iommu = iommu_data;
1665
22195cbd
JS
1666 /* clear known events */
1667 *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP;
1668
1669 /* refuse to register if still events remaining */
1670 if (*events)
1671 return -EINVAL;
1672
c086de81
KW
1673 return blocking_notifier_chain_register(&iommu->notifier, nb);
1674}
1675
1676static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
1677 struct notifier_block *nb)
1678{
1679 struct vfio_iommu *iommu = iommu_data;
1680
1681 return blocking_notifier_chain_unregister(&iommu->notifier, nb);
1682}
1683
73fa0d10 1684static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
c086de81
KW
1685 .name = "vfio-iommu-type1",
1686 .owner = THIS_MODULE,
1687 .open = vfio_iommu_type1_open,
1688 .release = vfio_iommu_type1_release,
1689 .ioctl = vfio_iommu_type1_ioctl,
1690 .attach_group = vfio_iommu_type1_attach_group,
1691 .detach_group = vfio_iommu_type1_detach_group,
1692 .pin_pages = vfio_iommu_type1_pin_pages,
1693 .unpin_pages = vfio_iommu_type1_unpin_pages,
1694 .register_notifier = vfio_iommu_type1_register_notifier,
1695 .unregister_notifier = vfio_iommu_type1_unregister_notifier,
73fa0d10
AW
1696};
1697
1698static int __init vfio_iommu_type1_init(void)
1699{
73fa0d10
AW
1700 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
1701}
1702
1703static void __exit vfio_iommu_type1_cleanup(void)
1704{
1705 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1);
1706}
1707
1708module_init(vfio_iommu_type1_init);
1709module_exit(vfio_iommu_type1_cleanup);
1710
1711MODULE_VERSION(DRIVER_VERSION);
1712MODULE_LICENSE("GPL v2");
1713MODULE_AUTHOR(DRIVER_AUTHOR);
1714MODULE_DESCRIPTION(DRIVER_DESC);