]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/iommu/dma-iommu.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[mirror_ubuntu-jammy-kernel.git] / drivers / iommu / dma-iommu.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
0db2e5d1
RM
2/*
3 * A fairly generic DMA-API to IOMMU-API glue layer.
4 *
5 * Copyright (C) 2014-2015 ARM Ltd.
6 *
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
0db2e5d1
RM
9 */
10
f51dc892 11#include <linux/acpi_iort.h>
0db2e5d1
RM
12#include <linux/device.h>
13#include <linux/dma-iommu.h>
5b11e9cd 14#include <linux/gfp.h>
0db2e5d1
RM
15#include <linux/huge_mm.h>
16#include <linux/iommu.h>
17#include <linux/iova.h>
44bb7e24 18#include <linux/irq.h>
0db2e5d1 19#include <linux/mm.h>
fade1ec0 20#include <linux/pci.h>
5b11e9cd
RM
21#include <linux/scatterlist.h>
22#include <linux/vmalloc.h>
0db2e5d1 23
44bb7e24
RM
24struct iommu_dma_msi_page {
25 struct list_head list;
26 dma_addr_t iova;
27 phys_addr_t phys;
28};
29
fdbe574e
RM
30enum iommu_dma_cookie_type {
31 IOMMU_DMA_IOVA_COOKIE,
32 IOMMU_DMA_MSI_COOKIE,
33};
34
44bb7e24 35struct iommu_dma_cookie {
fdbe574e
RM
36 enum iommu_dma_cookie_type type;
37 union {
38 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
39 struct iova_domain iovad;
40 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
41 dma_addr_t msi_iova;
42 };
43 struct list_head msi_page_list;
44 spinlock_t msi_lock;
2da274cd
ZL
45
46 /* Domain for flush queue callback; NULL if flush queue not in use */
47 struct iommu_domain *fq_domain;
44bb7e24
RM
48};
49
fdbe574e
RM
50static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
51{
52 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
53 return cookie->iovad.granule;
54 return PAGE_SIZE;
55}
56
fdbe574e
RM
57static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
58{
59 struct iommu_dma_cookie *cookie;
60
61 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
62 if (cookie) {
63 spin_lock_init(&cookie->msi_lock);
64 INIT_LIST_HEAD(&cookie->msi_page_list);
65 cookie->type = type;
66 }
67 return cookie;
44bb7e24
RM
68}
69
0db2e5d1
RM
70int iommu_dma_init(void)
71{
72 return iova_cache_get();
73}
74
75/**
76 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
77 * @domain: IOMMU domain to prepare for DMA-API usage
78 *
79 * IOMMU drivers should normally call this from their domain_alloc
80 * callback when domain->type == IOMMU_DOMAIN_DMA.
81 */
82int iommu_get_dma_cookie(struct iommu_domain *domain)
fdbe574e
RM
83{
84 if (domain->iova_cookie)
85 return -EEXIST;
86
87 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
88 if (!domain->iova_cookie)
89 return -ENOMEM;
90
91 return 0;
92}
93EXPORT_SYMBOL(iommu_get_dma_cookie);
94
95/**
96 * iommu_get_msi_cookie - Acquire just MSI remapping resources
97 * @domain: IOMMU domain to prepare
98 * @base: Start address of IOVA region for MSI mappings
99 *
100 * Users who manage their own IOVA allocation and do not want DMA API support,
101 * but would still like to take advantage of automatic MSI remapping, can use
102 * this to initialise their own domain appropriately. Users should reserve a
103 * contiguous IOVA region, starting at @base, large enough to accommodate the
104 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
105 * used by the devices attached to @domain.
106 */
107int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
0db2e5d1 108{
44bb7e24 109 struct iommu_dma_cookie *cookie;
0db2e5d1 110
fdbe574e
RM
111 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
112 return -EINVAL;
113
0db2e5d1
RM
114 if (domain->iova_cookie)
115 return -EEXIST;
116
fdbe574e 117 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
44bb7e24
RM
118 if (!cookie)
119 return -ENOMEM;
0db2e5d1 120
fdbe574e 121 cookie->msi_iova = base;
44bb7e24
RM
122 domain->iova_cookie = cookie;
123 return 0;
0db2e5d1 124}
fdbe574e 125EXPORT_SYMBOL(iommu_get_msi_cookie);
0db2e5d1
RM
126
127/**
128 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
fdbe574e
RM
129 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
130 * iommu_get_msi_cookie()
0db2e5d1
RM
131 *
132 * IOMMU drivers should normally call this from their domain_free callback.
133 */
134void iommu_put_dma_cookie(struct iommu_domain *domain)
135{
44bb7e24
RM
136 struct iommu_dma_cookie *cookie = domain->iova_cookie;
137 struct iommu_dma_msi_page *msi, *tmp;
0db2e5d1 138
44bb7e24 139 if (!cookie)
0db2e5d1
RM
140 return;
141
fdbe574e 142 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
44bb7e24
RM
143 put_iova_domain(&cookie->iovad);
144
145 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
146 list_del(&msi->list);
147 kfree(msi);
148 }
149 kfree(cookie);
0db2e5d1
RM
150 domain->iova_cookie = NULL;
151}
152EXPORT_SYMBOL(iommu_put_dma_cookie);
153
273df963
RM
154/**
155 * iommu_dma_get_resv_regions - Reserved region driver helper
156 * @dev: Device from iommu_get_resv_regions()
157 * @list: Reserved region list from iommu_get_resv_regions()
158 *
159 * IOMMU drivers can use this to implement their .get_resv_regions callback
cd2c9fcf
SK
160 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
161 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
162 * reservation.
273df963
RM
163 */
164void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
fade1ec0 165{
fade1ec0 166
98cc4f71 167 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
cd2c9fcf 168 iort_iommu_msi_get_resv_regions(dev, list);
273df963 169
fade1ec0 170}
273df963 171EXPORT_SYMBOL(iommu_dma_get_resv_regions);
fade1ec0 172
7c1b058c
RM
173static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
174 phys_addr_t start, phys_addr_t end)
175{
176 struct iova_domain *iovad = &cookie->iovad;
177 struct iommu_dma_msi_page *msi_page;
178 int i, num_pages;
179
180 start -= iova_offset(iovad, start);
181 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
182
183 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
184 if (!msi_page)
185 return -ENOMEM;
186
187 for (i = 0; i < num_pages; i++) {
188 msi_page[i].phys = start;
189 msi_page[i].iova = start;
190 INIT_LIST_HEAD(&msi_page[i].list);
191 list_add(&msi_page[i].list, &cookie->msi_page_list);
192 start += iovad->granule;
193 }
194
195 return 0;
196}
197
aadad097 198static int iova_reserve_pci_windows(struct pci_dev *dev,
cd2c9fcf
SK
199 struct iova_domain *iovad)
200{
201 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
202 struct resource_entry *window;
203 unsigned long lo, hi;
aadad097 204 phys_addr_t start = 0, end;
cd2c9fcf
SK
205
206 resource_list_for_each_entry(window, &bridge->windows) {
207 if (resource_type(window->res) != IORESOURCE_MEM)
208 continue;
209
210 lo = iova_pfn(iovad, window->res->start - window->offset);
211 hi = iova_pfn(iovad, window->res->end - window->offset);
212 reserve_iova(iovad, lo, hi);
213 }
aadad097
SM
214
215 /* Get reserved DMA windows from host bridge */
216 resource_list_for_each_entry(window, &bridge->dma_ranges) {
217 end = window->res->start - window->offset;
218resv_iova:
219 if (end > start) {
220 lo = iova_pfn(iovad, start);
221 hi = iova_pfn(iovad, end);
222 reserve_iova(iovad, lo, hi);
223 } else {
224 /* dma_ranges list should be sorted */
225 dev_err(&dev->dev, "Failed to reserve IOVA\n");
226 return -EINVAL;
227 }
228
229 start = window->res->end - window->offset + 1;
230 /* If window is last entry */
231 if (window->node.next == &bridge->dma_ranges &&
232 end != ~(dma_addr_t)0) {
233 end = ~(dma_addr_t)0;
234 goto resv_iova;
235 }
236 }
237
238 return 0;
cd2c9fcf
SK
239}
240
7c1b058c
RM
241static int iova_reserve_iommu_regions(struct device *dev,
242 struct iommu_domain *domain)
243{
244 struct iommu_dma_cookie *cookie = domain->iova_cookie;
245 struct iova_domain *iovad = &cookie->iovad;
246 struct iommu_resv_region *region;
247 LIST_HEAD(resv_regions);
248 int ret = 0;
249
aadad097
SM
250 if (dev_is_pci(dev)) {
251 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
252 if (ret)
253 return ret;
254 }
cd2c9fcf 255
7c1b058c
RM
256 iommu_get_resv_regions(dev, &resv_regions);
257 list_for_each_entry(region, &resv_regions, list) {
258 unsigned long lo, hi;
259
260 /* We ARE the software that manages these! */
261 if (region->type == IOMMU_RESV_SW_MSI)
262 continue;
263
264 lo = iova_pfn(iovad, region->start);
265 hi = iova_pfn(iovad, region->start + region->length - 1);
266 reserve_iova(iovad, lo, hi);
267
268 if (region->type == IOMMU_RESV_MSI)
269 ret = cookie_init_hw_msi_region(cookie, region->start,
270 region->start + region->length);
271 if (ret)
272 break;
273 }
274 iommu_put_resv_regions(dev, &resv_regions);
275
276 return ret;
277}
278
2da274cd
ZL
279static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
280{
281 struct iommu_dma_cookie *cookie;
282 struct iommu_domain *domain;
283
284 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
285 domain = cookie->fq_domain;
286 /*
287 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
288 * implies that ops->flush_iotlb_all must be non-NULL.
289 */
290 domain->ops->flush_iotlb_all(domain);
291}
292
0db2e5d1
RM
293/**
294 * iommu_dma_init_domain - Initialise a DMA mapping domain
295 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
296 * @base: IOVA at which the mappable address space starts
297 * @size: Size of IOVA space
fade1ec0 298 * @dev: Device the domain is being initialised for
0db2e5d1
RM
299 *
300 * @base and @size should be exact multiples of IOMMU page granularity to
301 * avoid rounding surprises. If necessary, we reserve the page at address 0
302 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
303 * any change which could make prior IOVAs invalid will fail.
304 */
fade1ec0
RM
305int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
306 u64 size, struct device *dev)
0db2e5d1 307{
fdbe574e
RM
308 struct iommu_dma_cookie *cookie = domain->iova_cookie;
309 struct iova_domain *iovad = &cookie->iovad;
c61a4633 310 unsigned long order, base_pfn;
2da274cd 311 int attr;
0db2e5d1 312
fdbe574e
RM
313 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
314 return -EINVAL;
0db2e5d1
RM
315
316 /* Use the smallest supported page size for IOVA granularity */
d16e0faa 317 order = __ffs(domain->pgsize_bitmap);
0db2e5d1 318 base_pfn = max_t(unsigned long, 1, base >> order);
0db2e5d1
RM
319
320 /* Check the domain allows at least some access to the device... */
321 if (domain->geometry.force_aperture) {
322 if (base > domain->geometry.aperture_end ||
323 base + size <= domain->geometry.aperture_start) {
324 pr_warn("specified DMA range outside IOMMU capability\n");
325 return -EFAULT;
326 }
327 /* ...then finally give it a kicking to make sure it fits */
328 base_pfn = max_t(unsigned long, base_pfn,
329 domain->geometry.aperture_start >> order);
0db2e5d1
RM
330 }
331
f51d7bb7 332 /* start_pfn is always nonzero for an already-initialised domain */
0db2e5d1
RM
333 if (iovad->start_pfn) {
334 if (1UL << order != iovad->granule ||
f51d7bb7 335 base_pfn != iovad->start_pfn) {
0db2e5d1
RM
336 pr_warn("Incompatible range for DMA domain\n");
337 return -EFAULT;
338 }
7c1b058c
RM
339
340 return 0;
0db2e5d1 341 }
7c1b058c 342
aa3ac946 343 init_iova_domain(iovad, 1UL << order, base_pfn);
2da274cd
ZL
344
345 if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
346 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
347 cookie->fq_domain = domain;
348 init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
349 }
350
7c1b058c
RM
351 if (!dev)
352 return 0;
353
354 return iova_reserve_iommu_regions(dev, domain);
0db2e5d1
RM
355}
356EXPORT_SYMBOL(iommu_dma_init_domain);
357
358/**
737c85ca
MH
359 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
360 * page flags.
0db2e5d1
RM
361 * @dir: Direction of DMA transfer
362 * @coherent: Is the DMA master cache-coherent?
737c85ca 363 * @attrs: DMA attributes for the mapping
0db2e5d1
RM
364 *
365 * Return: corresponding IOMMU API page protection flags
366 */
737c85ca
MH
367int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
368 unsigned long attrs)
0db2e5d1
RM
369{
370 int prot = coherent ? IOMMU_CACHE : 0;
371
737c85ca
MH
372 if (attrs & DMA_ATTR_PRIVILEGED)
373 prot |= IOMMU_PRIV;
374
0db2e5d1
RM
375 switch (dir) {
376 case DMA_BIDIRECTIONAL:
377 return prot | IOMMU_READ | IOMMU_WRITE;
378 case DMA_TO_DEVICE:
379 return prot | IOMMU_READ;
380 case DMA_FROM_DEVICE:
381 return prot | IOMMU_WRITE;
382 default:
383 return 0;
384 }
385}
386
842fe519
RM
387static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
388 size_t size, dma_addr_t dma_limit, struct device *dev)
0db2e5d1 389{
a44e6657
RM
390 struct iommu_dma_cookie *cookie = domain->iova_cookie;
391 struct iova_domain *iovad = &cookie->iovad;
bb65a64c 392 unsigned long shift, iova_len, iova = 0;
0db2e5d1 393
a44e6657
RM
394 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
395 cookie->msi_iova += size;
396 return cookie->msi_iova - size;
397 }
398
399 shift = iova_shift(iovad);
400 iova_len = size >> shift;
bb65a64c
RM
401 /*
402 * Freeing non-power-of-two-sized allocations back into the IOVA caches
403 * will come back to bite us badly, so we have to waste a bit of space
404 * rounding up anything cacheable to make sure that can't happen. The
405 * order of the unadjusted size will still match upon freeing.
406 */
407 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
408 iova_len = roundup_pow_of_two(iova_len);
a44e6657 409
03bfdc31
RM
410 if (dev->bus_dma_mask)
411 dma_limit &= dev->bus_dma_mask;
412
c987ff0d
RM
413 if (domain->geometry.force_aperture)
414 dma_limit = min(dma_limit, domain->geometry.aperture_end);
122fac03
RM
415
416 /* Try to get PCI devices a SAC address */
417 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
538d5b33
TN
418 iova = alloc_iova_fast(iovad, iova_len,
419 DMA_BIT_MASK(32) >> shift, false);
bb65a64c 420
122fac03 421 if (!iova)
538d5b33
TN
422 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
423 true);
122fac03 424
bb65a64c 425 return (dma_addr_t)iova << shift;
0db2e5d1
RM
426}
427
842fe519
RM
428static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
429 dma_addr_t iova, size_t size)
0db2e5d1 430{
842fe519 431 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1 432
a44e6657 433 /* The MSI case is only ever cleaning up its most recent allocation */
bb65a64c 434 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
a44e6657 435 cookie->msi_iova -= size;
2da274cd
ZL
436 else if (cookie->fq_domain) /* non-strict mode */
437 queue_iova(iovad, iova_pfn(iovad, iova),
438 size >> iova_shift(iovad), 0);
bb65a64c 439 else
1cc896ed
RM
440 free_iova_fast(iovad, iova_pfn(iovad, iova),
441 size >> iova_shift(iovad));
842fe519
RM
442}
443
444static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
445 size_t size)
446{
a44e6657
RM
447 struct iommu_dma_cookie *cookie = domain->iova_cookie;
448 struct iova_domain *iovad = &cookie->iovad;
842fe519
RM
449 size_t iova_off = iova_offset(iovad, dma_addr);
450
451 dma_addr -= iova_off;
452 size = iova_align(iovad, size + iova_off);
453
2da274cd
ZL
454 WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
455 if (!cookie->fq_domain)
456 iommu_tlb_sync(domain);
a44e6657 457 iommu_dma_free_iova(cookie, dma_addr, size);
0db2e5d1
RM
458}
459
460static void __iommu_dma_free_pages(struct page **pages, int count)
461{
462 while (count--)
463 __free_page(pages[count]);
464 kvfree(pages);
465}
466
c4b17afb
GK
467static struct page **__iommu_dma_alloc_pages(struct device *dev,
468 unsigned int count, unsigned long order_mask, gfp_t gfp)
0db2e5d1
RM
469{
470 struct page **pages;
c4b17afb 471 unsigned int i = 0, nid = dev_to_node(dev);
3b6b7e19
RM
472
473 order_mask &= (2U << MAX_ORDER) - 1;
474 if (!order_mask)
475 return NULL;
0db2e5d1 476
c4b17afb 477 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
0db2e5d1
RM
478 if (!pages)
479 return NULL;
480
481 /* IOMMU can map any pages, so himem can also be used here */
482 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
483
484 while (count) {
485 struct page *page = NULL;
3b6b7e19 486 unsigned int order_size;
0db2e5d1
RM
487
488 /*
489 * Higher-order allocations are a convenience rather
490 * than a necessity, hence using __GFP_NORETRY until
3b6b7e19 491 * falling back to minimum-order allocations.
0db2e5d1 492 */
3b6b7e19
RM
493 for (order_mask &= (2U << __fls(count)) - 1;
494 order_mask; order_mask &= ~order_size) {
495 unsigned int order = __fls(order_mask);
c4b17afb 496 gfp_t alloc_flags = gfp;
3b6b7e19
RM
497
498 order_size = 1U << order;
c4b17afb
GK
499 if (order_mask > order_size)
500 alloc_flags |= __GFP_NORETRY;
501 page = alloc_pages_node(nid, alloc_flags, order);
0db2e5d1
RM
502 if (!page)
503 continue;
3b6b7e19
RM
504 if (!order)
505 break;
506 if (!PageCompound(page)) {
0db2e5d1
RM
507 split_page(page, order);
508 break;
3b6b7e19
RM
509 } else if (!split_huge_page(page)) {
510 break;
0db2e5d1 511 }
3b6b7e19 512 __free_pages(page, order);
0db2e5d1 513 }
0db2e5d1
RM
514 if (!page) {
515 __iommu_dma_free_pages(pages, i);
516 return NULL;
517 }
3b6b7e19
RM
518 count -= order_size;
519 while (order_size--)
0db2e5d1
RM
520 pages[i++] = page++;
521 }
522 return pages;
523}
524
525/**
526 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
527 * @dev: Device which owns this buffer
528 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
529 * @size: Size of buffer in bytes
530 * @handle: DMA address of buffer
531 *
532 * Frees both the pages associated with the buffer, and the array
533 * describing them
534 */
535void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
536 dma_addr_t *handle)
537{
43c5bf11 538 __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
0db2e5d1 539 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
cad34be7 540 *handle = DMA_MAPPING_ERROR;
0db2e5d1
RM
541}
542
543/**
544 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
545 * @dev: Device to allocate memory for. Must be a real device
546 * attached to an iommu_dma_domain
547 * @size: Size of buffer in bytes
548 * @gfp: Allocation flags
3b6b7e19 549 * @attrs: DMA attributes for this allocation
0db2e5d1
RM
550 * @prot: IOMMU mapping flags
551 * @handle: Out argument for allocated DMA handle
552 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
553 * given VA/PA are visible to the given non-coherent device.
554 *
555 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
556 * but an IOMMU which supports smaller pages might not map the whole thing.
557 *
558 * Return: Array of struct page pointers describing the buffer,
559 * or NULL on failure.
560 */
3b6b7e19 561struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
00085f1e 562 unsigned long attrs, int prot, dma_addr_t *handle,
0db2e5d1
RM
563 void (*flush_page)(struct device *, const void *, phys_addr_t))
564{
43c5bf11 565 struct iommu_domain *domain = iommu_get_dma_domain(dev);
842fe519
RM
566 struct iommu_dma_cookie *cookie = domain->iova_cookie;
567 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1
RM
568 struct page **pages;
569 struct sg_table sgt;
842fe519 570 dma_addr_t iova;
3b6b7e19 571 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
0db2e5d1 572
cad34be7 573 *handle = DMA_MAPPING_ERROR;
0db2e5d1 574
3b6b7e19
RM
575 min_size = alloc_sizes & -alloc_sizes;
576 if (min_size < PAGE_SIZE) {
577 min_size = PAGE_SIZE;
578 alloc_sizes |= PAGE_SIZE;
579 } else {
580 size = ALIGN(size, min_size);
581 }
00085f1e 582 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
3b6b7e19
RM
583 alloc_sizes = min_size;
584
585 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
c4b17afb
GK
586 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
587 gfp);
0db2e5d1
RM
588 if (!pages)
589 return NULL;
590
842fe519
RM
591 size = iova_align(iovad, size);
592 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
0db2e5d1
RM
593 if (!iova)
594 goto out_free_pages;
595
0db2e5d1
RM
596 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
597 goto out_free_iova;
598
599 if (!(prot & IOMMU_CACHE)) {
600 struct sg_mapping_iter miter;
601 /*
602 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
603 * sufficient here, so skip it by using the "wrong" direction.
604 */
605 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
606 while (sg_miter_next(&miter))
607 flush_page(dev, miter.addr, page_to_phys(miter.page));
608 sg_miter_stop(&miter);
609 }
610
842fe519 611 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
0db2e5d1
RM
612 < size)
613 goto out_free_sg;
614
842fe519 615 *handle = iova;
0db2e5d1
RM
616 sg_free_table(&sgt);
617 return pages;
618
619out_free_sg:
620 sg_free_table(&sgt);
621out_free_iova:
842fe519 622 iommu_dma_free_iova(cookie, iova, size);
0db2e5d1
RM
623out_free_pages:
624 __iommu_dma_free_pages(pages, count);
625 return NULL;
626}
627
628/**
629 * iommu_dma_mmap - Map a buffer into provided user VMA
630 * @pages: Array representing buffer from iommu_dma_alloc()
631 * @size: Size of buffer in bytes
632 * @vma: VMA describing requested userspace mapping
633 *
634 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
635 * for verifying the correct size and protection of @vma beforehand.
636 */
637
638int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
639{
b0d0084f 640 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
0db2e5d1
RM
641}
642
51f8cc9e 643static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
43c5bf11 644 size_t size, int prot, struct iommu_domain *domain)
0db2e5d1 645{
842fe519 646 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1cc896ed 647 size_t iova_off = 0;
842fe519 648 dma_addr_t iova;
0db2e5d1 649
1cc896ed
RM
650 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
651 iova_off = iova_offset(&cookie->iovad, phys);
652 size = iova_align(&cookie->iovad, size + iova_off);
653 }
654
842fe519 655 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
0db2e5d1 656 if (!iova)
cad34be7 657 return DMA_MAPPING_ERROR;
0db2e5d1 658
842fe519
RM
659 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
660 iommu_dma_free_iova(cookie, iova, size);
cad34be7 661 return DMA_MAPPING_ERROR;
0db2e5d1 662 }
842fe519 663 return iova + iova_off;
0db2e5d1
RM
664}
665
51f8cc9e
RM
666dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
667 unsigned long offset, size_t size, int prot)
668{
43c5bf11
RM
669 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
670 iommu_get_dma_domain(dev));
51f8cc9e
RM
671}
672
0db2e5d1 673void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
00085f1e 674 enum dma_data_direction dir, unsigned long attrs)
0db2e5d1 675{
43c5bf11 676 __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
0db2e5d1
RM
677}
678
679/*
680 * Prepare a successfully-mapped scatterlist to give back to the caller.
809eac54
RM
681 *
682 * At this point the segments are already laid out by iommu_dma_map_sg() to
683 * avoid individually crossing any boundaries, so we merely need to check a
684 * segment's start address to avoid concatenating across one.
0db2e5d1
RM
685 */
686static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
687 dma_addr_t dma_addr)
688{
809eac54
RM
689 struct scatterlist *s, *cur = sg;
690 unsigned long seg_mask = dma_get_seg_boundary(dev);
691 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
692 int i, count = 0;
0db2e5d1
RM
693
694 for_each_sg(sg, s, nents, i) {
809eac54
RM
695 /* Restore this segment's original unaligned fields first */
696 unsigned int s_iova_off = sg_dma_address(s);
0db2e5d1 697 unsigned int s_length = sg_dma_len(s);
809eac54 698 unsigned int s_iova_len = s->length;
0db2e5d1 699
809eac54 700 s->offset += s_iova_off;
0db2e5d1 701 s->length = s_length;
cad34be7 702 sg_dma_address(s) = DMA_MAPPING_ERROR;
809eac54
RM
703 sg_dma_len(s) = 0;
704
705 /*
706 * Now fill in the real DMA data. If...
707 * - there is a valid output segment to append to
708 * - and this segment starts on an IOVA page boundary
709 * - but doesn't fall at a segment boundary
710 * - and wouldn't make the resulting output segment too long
711 */
712 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
713 (cur_len + s_length <= max_len)) {
714 /* ...then concatenate it with the previous one */
715 cur_len += s_length;
716 } else {
717 /* Otherwise start the next output segment */
718 if (i > 0)
719 cur = sg_next(cur);
720 cur_len = s_length;
721 count++;
722
723 sg_dma_address(cur) = dma_addr + s_iova_off;
724 }
725
726 sg_dma_len(cur) = cur_len;
727 dma_addr += s_iova_len;
728
729 if (s_length + s_iova_off < s_iova_len)
730 cur_len = 0;
0db2e5d1 731 }
809eac54 732 return count;
0db2e5d1
RM
733}
734
735/*
736 * If mapping failed, then just restore the original list,
737 * but making sure the DMA fields are invalidated.
738 */
739static void __invalidate_sg(struct scatterlist *sg, int nents)
740{
741 struct scatterlist *s;
742 int i;
743
744 for_each_sg(sg, s, nents, i) {
cad34be7 745 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
07b48ac4 746 s->offset += sg_dma_address(s);
0db2e5d1
RM
747 if (sg_dma_len(s))
748 s->length = sg_dma_len(s);
cad34be7 749 sg_dma_address(s) = DMA_MAPPING_ERROR;
0db2e5d1
RM
750 sg_dma_len(s) = 0;
751 }
752}
753
754/*
755 * The DMA API client is passing in a scatterlist which could describe
756 * any old buffer layout, but the IOMMU API requires everything to be
757 * aligned to IOMMU pages. Hence the need for this complicated bit of
758 * impedance-matching, to be able to hand off a suitably-aligned list,
759 * but still preserve the original offsets and sizes for the caller.
760 */
761int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
762 int nents, int prot)
763{
43c5bf11 764 struct iommu_domain *domain = iommu_get_dma_domain(dev);
842fe519
RM
765 struct iommu_dma_cookie *cookie = domain->iova_cookie;
766 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1 767 struct scatterlist *s, *prev = NULL;
842fe519 768 dma_addr_t iova;
0db2e5d1 769 size_t iova_len = 0;
809eac54 770 unsigned long mask = dma_get_seg_boundary(dev);
0db2e5d1
RM
771 int i;
772
773 /*
774 * Work out how much IOVA space we need, and align the segments to
775 * IOVA granules for the IOMMU driver to handle. With some clever
776 * trickery we can modify the list in-place, but reversibly, by
809eac54 777 * stashing the unaligned parts in the as-yet-unused DMA fields.
0db2e5d1
RM
778 */
779 for_each_sg(sg, s, nents, i) {
809eac54 780 size_t s_iova_off = iova_offset(iovad, s->offset);
0db2e5d1 781 size_t s_length = s->length;
809eac54 782 size_t pad_len = (mask - iova_len + 1) & mask;
0db2e5d1 783
809eac54 784 sg_dma_address(s) = s_iova_off;
0db2e5d1 785 sg_dma_len(s) = s_length;
809eac54
RM
786 s->offset -= s_iova_off;
787 s_length = iova_align(iovad, s_length + s_iova_off);
0db2e5d1
RM
788 s->length = s_length;
789
790 /*
809eac54
RM
791 * Due to the alignment of our single IOVA allocation, we can
792 * depend on these assumptions about the segment boundary mask:
793 * - If mask size >= IOVA size, then the IOVA range cannot
794 * possibly fall across a boundary, so we don't care.
795 * - If mask size < IOVA size, then the IOVA range must start
796 * exactly on a boundary, therefore we can lay things out
797 * based purely on segment lengths without needing to know
798 * the actual addresses beforehand.
799 * - The mask must be a power of 2, so pad_len == 0 if
800 * iova_len == 0, thus we cannot dereference prev the first
801 * time through here (i.e. before it has a meaningful value).
0db2e5d1 802 */
809eac54 803 if (pad_len && pad_len < s_length - 1) {
0db2e5d1
RM
804 prev->length += pad_len;
805 iova_len += pad_len;
806 }
807
808 iova_len += s_length;
809 prev = s;
810 }
811
842fe519 812 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
0db2e5d1
RM
813 if (!iova)
814 goto out_restore_sg;
815
816 /*
817 * We'll leave any physical concatenation to the IOMMU driver's
818 * implementation - it knows better than we do.
819 */
842fe519 820 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
0db2e5d1
RM
821 goto out_free_iova;
822
842fe519 823 return __finalise_sg(dev, sg, nents, iova);
0db2e5d1
RM
824
825out_free_iova:
842fe519 826 iommu_dma_free_iova(cookie, iova, iova_len);
0db2e5d1
RM
827out_restore_sg:
828 __invalidate_sg(sg, nents);
829 return 0;
830}
831
832void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e 833 enum dma_data_direction dir, unsigned long attrs)
0db2e5d1 834{
842fe519
RM
835 dma_addr_t start, end;
836 struct scatterlist *tmp;
837 int i;
0db2e5d1
RM
838 /*
839 * The scatterlist segments are mapped into a single
840 * contiguous IOVA allocation, so this is incredibly easy.
841 */
842fe519
RM
842 start = sg_dma_address(sg);
843 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
844 if (sg_dma_len(tmp) == 0)
845 break;
846 sg = tmp;
847 }
848 end = sg_dma_address(sg) + sg_dma_len(sg);
43c5bf11 849 __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
0db2e5d1
RM
850}
851
51f8cc9e
RM
852dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
853 size_t size, enum dma_data_direction dir, unsigned long attrs)
854{
855 return __iommu_dma_map(dev, phys, size,
43c5bf11
RM
856 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
857 iommu_get_dma_domain(dev));
51f8cc9e
RM
858}
859
860void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
861 size_t size, enum dma_data_direction dir, unsigned long attrs)
862{
43c5bf11 863 __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
51f8cc9e
RM
864}
865
44bb7e24
RM
866static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
867 phys_addr_t msi_addr, struct iommu_domain *domain)
868{
869 struct iommu_dma_cookie *cookie = domain->iova_cookie;
870 struct iommu_dma_msi_page *msi_page;
842fe519 871 dma_addr_t iova;
44bb7e24 872 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
fdbe574e 873 size_t size = cookie_msi_granule(cookie);
44bb7e24 874
fdbe574e 875 msi_addr &= ~(phys_addr_t)(size - 1);
44bb7e24
RM
876 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
877 if (msi_page->phys == msi_addr)
878 return msi_page;
879
880 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
881 if (!msi_page)
882 return NULL;
883
43c5bf11 884 iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
cad34be7 885 if (iova == DMA_MAPPING_ERROR)
a44e6657 886 goto out_free_page;
44bb7e24
RM
887
888 INIT_LIST_HEAD(&msi_page->list);
a44e6657
RM
889 msi_page->phys = msi_addr;
890 msi_page->iova = iova;
44bb7e24
RM
891 list_add(&msi_page->list, &cookie->msi_page_list);
892 return msi_page;
893
44bb7e24
RM
894out_free_page:
895 kfree(msi_page);
896 return NULL;
897}
898
ece6e6f0 899int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
44bb7e24 900{
ece6e6f0 901 struct device *dev = msi_desc_to_dev(desc);
44bb7e24
RM
902 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
903 struct iommu_dma_cookie *cookie;
904 struct iommu_dma_msi_page *msi_page;
44bb7e24
RM
905 unsigned long flags;
906
ece6e6f0
JG
907 if (!domain || !domain->iova_cookie) {
908 desc->iommu_cookie = NULL;
909 return 0;
910 }
44bb7e24
RM
911
912 cookie = domain->iova_cookie;
913
914 /*
915 * We disable IRQs to rule out a possible inversion against
916 * irq_desc_lock if, say, someone tries to retarget the affinity
917 * of an MSI from within an IPI handler.
918 */
919 spin_lock_irqsave(&cookie->msi_lock, flags);
920 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
921 spin_unlock_irqrestore(&cookie->msi_lock, flags);
922
ece6e6f0
JG
923 msi_desc_set_iommu_cookie(desc, msi_page);
924
925 if (!msi_page)
926 return -ENOMEM;
927 return 0;
928}
929
930void iommu_dma_compose_msi_msg(struct msi_desc *desc,
931 struct msi_msg *msg)
932{
933 struct device *dev = msi_desc_to_dev(desc);
934 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
935 const struct iommu_dma_msi_page *msi_page;
936
937 msi_page = msi_desc_get_iommu_cookie(desc);
938
939 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
940 return;
941
942 msg->address_hi = upper_32_bits(msi_page->iova);
943 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
944 msg->address_lo += lower_32_bits(msi_page->iova);
44bb7e24 945}