]>
Commit | Line | Data |
---|---|---|
0db2e5d1 RM |
1 | /* |
2 | * A fairly generic DMA-API to IOMMU-API glue layer. | |
3 | * | |
4 | * Copyright (C) 2014-2015 ARM Ltd. | |
5 | * | |
6 | * based in part on arch/arm/mm/dma-mapping.c: | |
7 | * Copyright (C) 2000-2004 Russell King | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
f51dc892 | 22 | #include <linux/acpi_iort.h> |
0db2e5d1 RM |
23 | #include <linux/device.h> |
24 | #include <linux/dma-iommu.h> | |
5b11e9cd | 25 | #include <linux/gfp.h> |
0db2e5d1 RM |
26 | #include <linux/huge_mm.h> |
27 | #include <linux/iommu.h> | |
28 | #include <linux/iova.h> | |
44bb7e24 | 29 | #include <linux/irq.h> |
0db2e5d1 | 30 | #include <linux/mm.h> |
fade1ec0 | 31 | #include <linux/pci.h> |
5b11e9cd RM |
32 | #include <linux/scatterlist.h> |
33 | #include <linux/vmalloc.h> | |
0db2e5d1 | 34 | |
81a5a316 CH |
35 | #define IOMMU_MAPPING_ERROR 0 |
36 | ||
44bb7e24 RM |
37 | struct iommu_dma_msi_page { |
38 | struct list_head list; | |
39 | dma_addr_t iova; | |
40 | phys_addr_t phys; | |
41 | }; | |
42 | ||
fdbe574e RM |
43 | enum iommu_dma_cookie_type { |
44 | IOMMU_DMA_IOVA_COOKIE, | |
45 | IOMMU_DMA_MSI_COOKIE, | |
46 | }; | |
47 | ||
44bb7e24 | 48 | struct iommu_dma_cookie { |
fdbe574e RM |
49 | enum iommu_dma_cookie_type type; |
50 | union { | |
51 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ | |
52 | struct iova_domain iovad; | |
53 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ | |
54 | dma_addr_t msi_iova; | |
55 | }; | |
56 | struct list_head msi_page_list; | |
57 | spinlock_t msi_lock; | |
44bb7e24 RM |
58 | }; |
59 | ||
fdbe574e RM |
60 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
61 | { | |
62 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | |
63 | return cookie->iovad.granule; | |
64 | return PAGE_SIZE; | |
65 | } | |
66 | ||
fdbe574e RM |
67 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
68 | { | |
69 | struct iommu_dma_cookie *cookie; | |
70 | ||
71 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | |
72 | if (cookie) { | |
73 | spin_lock_init(&cookie->msi_lock); | |
74 | INIT_LIST_HEAD(&cookie->msi_page_list); | |
75 | cookie->type = type; | |
76 | } | |
77 | return cookie; | |
44bb7e24 RM |
78 | } |
79 | ||
0db2e5d1 RM |
80 | int iommu_dma_init(void) |
81 | { | |
82 | return iova_cache_get(); | |
83 | } | |
84 | ||
85 | /** | |
86 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain | |
87 | * @domain: IOMMU domain to prepare for DMA-API usage | |
88 | * | |
89 | * IOMMU drivers should normally call this from their domain_alloc | |
90 | * callback when domain->type == IOMMU_DOMAIN_DMA. | |
91 | */ | |
92 | int iommu_get_dma_cookie(struct iommu_domain *domain) | |
fdbe574e RM |
93 | { |
94 | if (domain->iova_cookie) | |
95 | return -EEXIST; | |
96 | ||
97 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | |
98 | if (!domain->iova_cookie) | |
99 | return -ENOMEM; | |
100 | ||
101 | return 0; | |
102 | } | |
103 | EXPORT_SYMBOL(iommu_get_dma_cookie); | |
104 | ||
105 | /** | |
106 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | |
107 | * @domain: IOMMU domain to prepare | |
108 | * @base: Start address of IOVA region for MSI mappings | |
109 | * | |
110 | * Users who manage their own IOVA allocation and do not want DMA API support, | |
111 | * but would still like to take advantage of automatic MSI remapping, can use | |
112 | * this to initialise their own domain appropriately. Users should reserve a | |
113 | * contiguous IOVA region, starting at @base, large enough to accommodate the | |
114 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | |
115 | * used by the devices attached to @domain. | |
116 | */ | |
117 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | |
0db2e5d1 | 118 | { |
44bb7e24 | 119 | struct iommu_dma_cookie *cookie; |
0db2e5d1 | 120 | |
fdbe574e RM |
121 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
122 | return -EINVAL; | |
123 | ||
0db2e5d1 RM |
124 | if (domain->iova_cookie) |
125 | return -EEXIST; | |
126 | ||
fdbe574e | 127 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
44bb7e24 RM |
128 | if (!cookie) |
129 | return -ENOMEM; | |
0db2e5d1 | 130 | |
fdbe574e | 131 | cookie->msi_iova = base; |
44bb7e24 RM |
132 | domain->iova_cookie = cookie; |
133 | return 0; | |
0db2e5d1 | 134 | } |
fdbe574e | 135 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
0db2e5d1 RM |
136 | |
137 | /** | |
138 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | |
fdbe574e RM |
139 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
140 | * iommu_get_msi_cookie() | |
0db2e5d1 RM |
141 | * |
142 | * IOMMU drivers should normally call this from their domain_free callback. | |
143 | */ | |
144 | void iommu_put_dma_cookie(struct iommu_domain *domain) | |
145 | { | |
44bb7e24 RM |
146 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
147 | struct iommu_dma_msi_page *msi, *tmp; | |
0db2e5d1 | 148 | |
44bb7e24 | 149 | if (!cookie) |
0db2e5d1 RM |
150 | return; |
151 | ||
fdbe574e | 152 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
44bb7e24 RM |
153 | put_iova_domain(&cookie->iovad); |
154 | ||
155 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | |
156 | list_del(&msi->list); | |
157 | kfree(msi); | |
158 | } | |
159 | kfree(cookie); | |
0db2e5d1 RM |
160 | domain->iova_cookie = NULL; |
161 | } | |
162 | EXPORT_SYMBOL(iommu_put_dma_cookie); | |
163 | ||
273df963 RM |
164 | /** |
165 | * iommu_dma_get_resv_regions - Reserved region driver helper | |
166 | * @dev: Device from iommu_get_resv_regions() | |
167 | * @list: Reserved region list from iommu_get_resv_regions() | |
168 | * | |
169 | * IOMMU drivers can use this to implement their .get_resv_regions callback | |
cd2c9fcf SK |
170 | * for general non-IOMMU-specific reservations. Currently, this covers GICv3 |
171 | * ITS region reservation on ACPI based ARM platforms that may require HW MSI | |
172 | * reservation. | |
273df963 RM |
173 | */ |
174 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) | |
fade1ec0 | 175 | { |
fade1ec0 | 176 | |
cd2c9fcf SK |
177 | if (!is_of_node(dev->iommu_fwspec->iommu_fwnode)) |
178 | iort_iommu_msi_get_resv_regions(dev, list); | |
273df963 | 179 | |
fade1ec0 | 180 | } |
273df963 | 181 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
fade1ec0 | 182 | |
7c1b058c RM |
183 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
184 | phys_addr_t start, phys_addr_t end) | |
185 | { | |
186 | struct iova_domain *iovad = &cookie->iovad; | |
187 | struct iommu_dma_msi_page *msi_page; | |
188 | int i, num_pages; | |
189 | ||
190 | start -= iova_offset(iovad, start); | |
191 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); | |
192 | ||
193 | msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); | |
194 | if (!msi_page) | |
195 | return -ENOMEM; | |
196 | ||
197 | for (i = 0; i < num_pages; i++) { | |
198 | msi_page[i].phys = start; | |
199 | msi_page[i].iova = start; | |
200 | INIT_LIST_HEAD(&msi_page[i].list); | |
201 | list_add(&msi_page[i].list, &cookie->msi_page_list); | |
202 | start += iovad->granule; | |
203 | } | |
204 | ||
205 | return 0; | |
206 | } | |
207 | ||
cd2c9fcf SK |
208 | static void iova_reserve_pci_windows(struct pci_dev *dev, |
209 | struct iova_domain *iovad) | |
210 | { | |
211 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); | |
212 | struct resource_entry *window; | |
213 | unsigned long lo, hi; | |
214 | ||
215 | resource_list_for_each_entry(window, &bridge->windows) { | |
216 | if (resource_type(window->res) != IORESOURCE_MEM) | |
217 | continue; | |
218 | ||
219 | lo = iova_pfn(iovad, window->res->start - window->offset); | |
220 | hi = iova_pfn(iovad, window->res->end - window->offset); | |
221 | reserve_iova(iovad, lo, hi); | |
222 | } | |
223 | } | |
224 | ||
7c1b058c RM |
225 | static int iova_reserve_iommu_regions(struct device *dev, |
226 | struct iommu_domain *domain) | |
227 | { | |
228 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
229 | struct iova_domain *iovad = &cookie->iovad; | |
230 | struct iommu_resv_region *region; | |
231 | LIST_HEAD(resv_regions); | |
232 | int ret = 0; | |
233 | ||
cd2c9fcf SK |
234 | if (dev_is_pci(dev)) |
235 | iova_reserve_pci_windows(to_pci_dev(dev), iovad); | |
236 | ||
7c1b058c RM |
237 | iommu_get_resv_regions(dev, &resv_regions); |
238 | list_for_each_entry(region, &resv_regions, list) { | |
239 | unsigned long lo, hi; | |
240 | ||
241 | /* We ARE the software that manages these! */ | |
242 | if (region->type == IOMMU_RESV_SW_MSI) | |
243 | continue; | |
244 | ||
245 | lo = iova_pfn(iovad, region->start); | |
246 | hi = iova_pfn(iovad, region->start + region->length - 1); | |
247 | reserve_iova(iovad, lo, hi); | |
248 | ||
249 | if (region->type == IOMMU_RESV_MSI) | |
250 | ret = cookie_init_hw_msi_region(cookie, region->start, | |
251 | region->start + region->length); | |
252 | if (ret) | |
253 | break; | |
254 | } | |
255 | iommu_put_resv_regions(dev, &resv_regions); | |
256 | ||
257 | return ret; | |
258 | } | |
259 | ||
0db2e5d1 RM |
260 | /** |
261 | * iommu_dma_init_domain - Initialise a DMA mapping domain | |
262 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | |
263 | * @base: IOVA at which the mappable address space starts | |
264 | * @size: Size of IOVA space | |
fade1ec0 | 265 | * @dev: Device the domain is being initialised for |
0db2e5d1 RM |
266 | * |
267 | * @base and @size should be exact multiples of IOMMU page granularity to | |
268 | * avoid rounding surprises. If necessary, we reserve the page at address 0 | |
269 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | |
270 | * any change which could make prior IOVAs invalid will fail. | |
271 | */ | |
fade1ec0 RM |
272 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
273 | u64 size, struct device *dev) | |
0db2e5d1 | 274 | { |
fdbe574e RM |
275 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
276 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 RM |
277 | unsigned long order, base_pfn, end_pfn; |
278 | ||
fdbe574e RM |
279 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
280 | return -EINVAL; | |
0db2e5d1 RM |
281 | |
282 | /* Use the smallest supported page size for IOVA granularity */ | |
d16e0faa | 283 | order = __ffs(domain->pgsize_bitmap); |
0db2e5d1 RM |
284 | base_pfn = max_t(unsigned long, 1, base >> order); |
285 | end_pfn = (base + size - 1) >> order; | |
286 | ||
287 | /* Check the domain allows at least some access to the device... */ | |
288 | if (domain->geometry.force_aperture) { | |
289 | if (base > domain->geometry.aperture_end || | |
290 | base + size <= domain->geometry.aperture_start) { | |
291 | pr_warn("specified DMA range outside IOMMU capability\n"); | |
292 | return -EFAULT; | |
293 | } | |
294 | /* ...then finally give it a kicking to make sure it fits */ | |
295 | base_pfn = max_t(unsigned long, base_pfn, | |
296 | domain->geometry.aperture_start >> order); | |
0db2e5d1 RM |
297 | } |
298 | ||
f51d7bb7 | 299 | /* start_pfn is always nonzero for an already-initialised domain */ |
0db2e5d1 RM |
300 | if (iovad->start_pfn) { |
301 | if (1UL << order != iovad->granule || | |
f51d7bb7 | 302 | base_pfn != iovad->start_pfn) { |
0db2e5d1 RM |
303 | pr_warn("Incompatible range for DMA domain\n"); |
304 | return -EFAULT; | |
305 | } | |
7c1b058c RM |
306 | |
307 | return 0; | |
0db2e5d1 | 308 | } |
7c1b058c | 309 | |
aa3ac946 | 310 | init_iova_domain(iovad, 1UL << order, base_pfn); |
7c1b058c RM |
311 | if (!dev) |
312 | return 0; | |
313 | ||
314 | return iova_reserve_iommu_regions(dev, domain); | |
0db2e5d1 RM |
315 | } |
316 | EXPORT_SYMBOL(iommu_dma_init_domain); | |
317 | ||
318 | /** | |
737c85ca MH |
319 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
320 | * page flags. | |
0db2e5d1 RM |
321 | * @dir: Direction of DMA transfer |
322 | * @coherent: Is the DMA master cache-coherent? | |
737c85ca | 323 | * @attrs: DMA attributes for the mapping |
0db2e5d1 RM |
324 | * |
325 | * Return: corresponding IOMMU API page protection flags | |
326 | */ | |
737c85ca MH |
327 | int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
328 | unsigned long attrs) | |
0db2e5d1 RM |
329 | { |
330 | int prot = coherent ? IOMMU_CACHE : 0; | |
331 | ||
737c85ca MH |
332 | if (attrs & DMA_ATTR_PRIVILEGED) |
333 | prot |= IOMMU_PRIV; | |
334 | ||
0db2e5d1 RM |
335 | switch (dir) { |
336 | case DMA_BIDIRECTIONAL: | |
337 | return prot | IOMMU_READ | IOMMU_WRITE; | |
338 | case DMA_TO_DEVICE: | |
339 | return prot | IOMMU_READ; | |
340 | case DMA_FROM_DEVICE: | |
341 | return prot | IOMMU_WRITE; | |
342 | default: | |
343 | return 0; | |
344 | } | |
345 | } | |
346 | ||
842fe519 RM |
347 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
348 | size_t size, dma_addr_t dma_limit, struct device *dev) | |
0db2e5d1 | 349 | { |
a44e6657 RM |
350 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
351 | struct iova_domain *iovad = &cookie->iovad; | |
bb65a64c | 352 | unsigned long shift, iova_len, iova = 0; |
0db2e5d1 | 353 | |
a44e6657 RM |
354 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { |
355 | cookie->msi_iova += size; | |
356 | return cookie->msi_iova - size; | |
357 | } | |
358 | ||
359 | shift = iova_shift(iovad); | |
360 | iova_len = size >> shift; | |
bb65a64c RM |
361 | /* |
362 | * Freeing non-power-of-two-sized allocations back into the IOVA caches | |
363 | * will come back to bite us badly, so we have to waste a bit of space | |
364 | * rounding up anything cacheable to make sure that can't happen. The | |
365 | * order of the unadjusted size will still match upon freeing. | |
366 | */ | |
367 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) | |
368 | iova_len = roundup_pow_of_two(iova_len); | |
a44e6657 | 369 | |
03bfdc31 RM |
370 | if (dev->bus_dma_mask) |
371 | dma_limit &= dev->bus_dma_mask; | |
372 | ||
c987ff0d RM |
373 | if (domain->geometry.force_aperture) |
374 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | |
122fac03 RM |
375 | |
376 | /* Try to get PCI devices a SAC address */ | |
377 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) | |
538d5b33 TN |
378 | iova = alloc_iova_fast(iovad, iova_len, |
379 | DMA_BIT_MASK(32) >> shift, false); | |
bb65a64c | 380 | |
122fac03 | 381 | if (!iova) |
538d5b33 TN |
382 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, |
383 | true); | |
122fac03 | 384 | |
bb65a64c | 385 | return (dma_addr_t)iova << shift; |
0db2e5d1 RM |
386 | } |
387 | ||
842fe519 RM |
388 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
389 | dma_addr_t iova, size_t size) | |
0db2e5d1 | 390 | { |
842fe519 | 391 | struct iova_domain *iovad = &cookie->iovad; |
0db2e5d1 | 392 | |
a44e6657 | 393 | /* The MSI case is only ever cleaning up its most recent allocation */ |
bb65a64c | 394 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
a44e6657 | 395 | cookie->msi_iova -= size; |
bb65a64c | 396 | else |
1cc896ed RM |
397 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
398 | size >> iova_shift(iovad)); | |
842fe519 RM |
399 | } |
400 | ||
401 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, | |
402 | size_t size) | |
403 | { | |
a44e6657 RM |
404 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
405 | struct iova_domain *iovad = &cookie->iovad; | |
842fe519 RM |
406 | size_t iova_off = iova_offset(iovad, dma_addr); |
407 | ||
408 | dma_addr -= iova_off; | |
409 | size = iova_align(iovad, size + iova_off); | |
410 | ||
411 | WARN_ON(iommu_unmap(domain, dma_addr, size) != size); | |
a44e6657 | 412 | iommu_dma_free_iova(cookie, dma_addr, size); |
0db2e5d1 RM |
413 | } |
414 | ||
415 | static void __iommu_dma_free_pages(struct page **pages, int count) | |
416 | { | |
417 | while (count--) | |
418 | __free_page(pages[count]); | |
419 | kvfree(pages); | |
420 | } | |
421 | ||
3b6b7e19 RM |
422 | static struct page **__iommu_dma_alloc_pages(unsigned int count, |
423 | unsigned long order_mask, gfp_t gfp) | |
0db2e5d1 RM |
424 | { |
425 | struct page **pages; | |
426 | unsigned int i = 0, array_size = count * sizeof(*pages); | |
3b6b7e19 RM |
427 | |
428 | order_mask &= (2U << MAX_ORDER) - 1; | |
429 | if (!order_mask) | |
430 | return NULL; | |
0db2e5d1 RM |
431 | |
432 | if (array_size <= PAGE_SIZE) | |
433 | pages = kzalloc(array_size, GFP_KERNEL); | |
434 | else | |
435 | pages = vzalloc(array_size); | |
436 | if (!pages) | |
437 | return NULL; | |
438 | ||
439 | /* IOMMU can map any pages, so himem can also be used here */ | |
440 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; | |
441 | ||
442 | while (count) { | |
443 | struct page *page = NULL; | |
3b6b7e19 | 444 | unsigned int order_size; |
0db2e5d1 RM |
445 | |
446 | /* | |
447 | * Higher-order allocations are a convenience rather | |
448 | * than a necessity, hence using __GFP_NORETRY until | |
3b6b7e19 | 449 | * falling back to minimum-order allocations. |
0db2e5d1 | 450 | */ |
3b6b7e19 RM |
451 | for (order_mask &= (2U << __fls(count)) - 1; |
452 | order_mask; order_mask &= ~order_size) { | |
453 | unsigned int order = __fls(order_mask); | |
454 | ||
455 | order_size = 1U << order; | |
456 | page = alloc_pages((order_mask - order_size) ? | |
457 | gfp | __GFP_NORETRY : gfp, order); | |
0db2e5d1 RM |
458 | if (!page) |
459 | continue; | |
3b6b7e19 RM |
460 | if (!order) |
461 | break; | |
462 | if (!PageCompound(page)) { | |
0db2e5d1 RM |
463 | split_page(page, order); |
464 | break; | |
3b6b7e19 RM |
465 | } else if (!split_huge_page(page)) { |
466 | break; | |
0db2e5d1 | 467 | } |
3b6b7e19 | 468 | __free_pages(page, order); |
0db2e5d1 | 469 | } |
0db2e5d1 RM |
470 | if (!page) { |
471 | __iommu_dma_free_pages(pages, i); | |
472 | return NULL; | |
473 | } | |
3b6b7e19 RM |
474 | count -= order_size; |
475 | while (order_size--) | |
0db2e5d1 RM |
476 | pages[i++] = page++; |
477 | } | |
478 | return pages; | |
479 | } | |
480 | ||
481 | /** | |
482 | * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc() | |
483 | * @dev: Device which owns this buffer | |
484 | * @pages: Array of buffer pages as returned by iommu_dma_alloc() | |
485 | * @size: Size of buffer in bytes | |
486 | * @handle: DMA address of buffer | |
487 | * | |
488 | * Frees both the pages associated with the buffer, and the array | |
489 | * describing them | |
490 | */ | |
491 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | |
492 | dma_addr_t *handle) | |
493 | { | |
842fe519 | 494 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); |
0db2e5d1 | 495 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
81a5a316 | 496 | *handle = IOMMU_MAPPING_ERROR; |
0db2e5d1 RM |
497 | } |
498 | ||
499 | /** | |
500 | * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space | |
501 | * @dev: Device to allocate memory for. Must be a real device | |
502 | * attached to an iommu_dma_domain | |
503 | * @size: Size of buffer in bytes | |
504 | * @gfp: Allocation flags | |
3b6b7e19 | 505 | * @attrs: DMA attributes for this allocation |
0db2e5d1 RM |
506 | * @prot: IOMMU mapping flags |
507 | * @handle: Out argument for allocated DMA handle | |
508 | * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the | |
509 | * given VA/PA are visible to the given non-coherent device. | |
510 | * | |
511 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, | |
512 | * but an IOMMU which supports smaller pages might not map the whole thing. | |
513 | * | |
514 | * Return: Array of struct page pointers describing the buffer, | |
515 | * or NULL on failure. | |
516 | */ | |
3b6b7e19 | 517 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, |
00085f1e | 518 | unsigned long attrs, int prot, dma_addr_t *handle, |
0db2e5d1 RM |
519 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
520 | { | |
521 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
842fe519 RM |
522 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
523 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 RM |
524 | struct page **pages; |
525 | struct sg_table sgt; | |
842fe519 | 526 | dma_addr_t iova; |
3b6b7e19 | 527 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
0db2e5d1 | 528 | |
81a5a316 | 529 | *handle = IOMMU_MAPPING_ERROR; |
0db2e5d1 | 530 | |
3b6b7e19 RM |
531 | min_size = alloc_sizes & -alloc_sizes; |
532 | if (min_size < PAGE_SIZE) { | |
533 | min_size = PAGE_SIZE; | |
534 | alloc_sizes |= PAGE_SIZE; | |
535 | } else { | |
536 | size = ALIGN(size, min_size); | |
537 | } | |
00085f1e | 538 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
3b6b7e19 RM |
539 | alloc_sizes = min_size; |
540 | ||
541 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
542 | pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp); | |
0db2e5d1 RM |
543 | if (!pages) |
544 | return NULL; | |
545 | ||
842fe519 RM |
546 | size = iova_align(iovad, size); |
547 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); | |
0db2e5d1 RM |
548 | if (!iova) |
549 | goto out_free_pages; | |
550 | ||
0db2e5d1 RM |
551 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
552 | goto out_free_iova; | |
553 | ||
554 | if (!(prot & IOMMU_CACHE)) { | |
555 | struct sg_mapping_iter miter; | |
556 | /* | |
557 | * The CPU-centric flushing implied by SG_MITER_TO_SG isn't | |
558 | * sufficient here, so skip it by using the "wrong" direction. | |
559 | */ | |
560 | sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); | |
561 | while (sg_miter_next(&miter)) | |
562 | flush_page(dev, miter.addr, page_to_phys(miter.page)); | |
563 | sg_miter_stop(&miter); | |
564 | } | |
565 | ||
842fe519 | 566 | if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) |
0db2e5d1 RM |
567 | < size) |
568 | goto out_free_sg; | |
569 | ||
842fe519 | 570 | *handle = iova; |
0db2e5d1 RM |
571 | sg_free_table(&sgt); |
572 | return pages; | |
573 | ||
574 | out_free_sg: | |
575 | sg_free_table(&sgt); | |
576 | out_free_iova: | |
842fe519 | 577 | iommu_dma_free_iova(cookie, iova, size); |
0db2e5d1 RM |
578 | out_free_pages: |
579 | __iommu_dma_free_pages(pages, count); | |
580 | return NULL; | |
581 | } | |
582 | ||
583 | /** | |
584 | * iommu_dma_mmap - Map a buffer into provided user VMA | |
585 | * @pages: Array representing buffer from iommu_dma_alloc() | |
586 | * @size: Size of buffer in bytes | |
587 | * @vma: VMA describing requested userspace mapping | |
588 | * | |
589 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible | |
590 | * for verifying the correct size and protection of @vma beforehand. | |
591 | */ | |
592 | ||
593 | int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) | |
594 | { | |
595 | unsigned long uaddr = vma->vm_start; | |
596 | unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
597 | int ret = -ENXIO; | |
598 | ||
599 | for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { | |
600 | ret = vm_insert_page(vma, uaddr, pages[i]); | |
601 | if (ret) | |
602 | break; | |
603 | uaddr += PAGE_SIZE; | |
604 | } | |
605 | return ret; | |
606 | } | |
607 | ||
51f8cc9e RM |
608 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
609 | size_t size, int prot) | |
0db2e5d1 | 610 | { |
0db2e5d1 | 611 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
842fe519 | 612 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
1cc896ed | 613 | size_t iova_off = 0; |
842fe519 | 614 | dma_addr_t iova; |
0db2e5d1 | 615 | |
1cc896ed RM |
616 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { |
617 | iova_off = iova_offset(&cookie->iovad, phys); | |
618 | size = iova_align(&cookie->iovad, size + iova_off); | |
619 | } | |
620 | ||
842fe519 | 621 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
0db2e5d1 | 622 | if (!iova) |
81a5a316 | 623 | return IOMMU_MAPPING_ERROR; |
0db2e5d1 | 624 | |
842fe519 RM |
625 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { |
626 | iommu_dma_free_iova(cookie, iova, size); | |
81a5a316 | 627 | return IOMMU_MAPPING_ERROR; |
0db2e5d1 | 628 | } |
842fe519 | 629 | return iova + iova_off; |
0db2e5d1 RM |
630 | } |
631 | ||
51f8cc9e RM |
632 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
633 | unsigned long offset, size_t size, int prot) | |
634 | { | |
635 | return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); | |
636 | } | |
637 | ||
0db2e5d1 | 638 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
00085f1e | 639 | enum dma_data_direction dir, unsigned long attrs) |
0db2e5d1 | 640 | { |
842fe519 | 641 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
0db2e5d1 RM |
642 | } |
643 | ||
644 | /* | |
645 | * Prepare a successfully-mapped scatterlist to give back to the caller. | |
809eac54 RM |
646 | * |
647 | * At this point the segments are already laid out by iommu_dma_map_sg() to | |
648 | * avoid individually crossing any boundaries, so we merely need to check a | |
649 | * segment's start address to avoid concatenating across one. | |
0db2e5d1 RM |
650 | */ |
651 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | |
652 | dma_addr_t dma_addr) | |
653 | { | |
809eac54 RM |
654 | struct scatterlist *s, *cur = sg; |
655 | unsigned long seg_mask = dma_get_seg_boundary(dev); | |
656 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); | |
657 | int i, count = 0; | |
0db2e5d1 RM |
658 | |
659 | for_each_sg(sg, s, nents, i) { | |
809eac54 RM |
660 | /* Restore this segment's original unaligned fields first */ |
661 | unsigned int s_iova_off = sg_dma_address(s); | |
0db2e5d1 | 662 | unsigned int s_length = sg_dma_len(s); |
809eac54 | 663 | unsigned int s_iova_len = s->length; |
0db2e5d1 | 664 | |
809eac54 | 665 | s->offset += s_iova_off; |
0db2e5d1 | 666 | s->length = s_length; |
81a5a316 | 667 | sg_dma_address(s) = IOMMU_MAPPING_ERROR; |
809eac54 RM |
668 | sg_dma_len(s) = 0; |
669 | ||
670 | /* | |
671 | * Now fill in the real DMA data. If... | |
672 | * - there is a valid output segment to append to | |
673 | * - and this segment starts on an IOVA page boundary | |
674 | * - but doesn't fall at a segment boundary | |
675 | * - and wouldn't make the resulting output segment too long | |
676 | */ | |
677 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && | |
678 | (cur_len + s_length <= max_len)) { | |
679 | /* ...then concatenate it with the previous one */ | |
680 | cur_len += s_length; | |
681 | } else { | |
682 | /* Otherwise start the next output segment */ | |
683 | if (i > 0) | |
684 | cur = sg_next(cur); | |
685 | cur_len = s_length; | |
686 | count++; | |
687 | ||
688 | sg_dma_address(cur) = dma_addr + s_iova_off; | |
689 | } | |
690 | ||
691 | sg_dma_len(cur) = cur_len; | |
692 | dma_addr += s_iova_len; | |
693 | ||
694 | if (s_length + s_iova_off < s_iova_len) | |
695 | cur_len = 0; | |
0db2e5d1 | 696 | } |
809eac54 | 697 | return count; |
0db2e5d1 RM |
698 | } |
699 | ||
700 | /* | |
701 | * If mapping failed, then just restore the original list, | |
702 | * but making sure the DMA fields are invalidated. | |
703 | */ | |
704 | static void __invalidate_sg(struct scatterlist *sg, int nents) | |
705 | { | |
706 | struct scatterlist *s; | |
707 | int i; | |
708 | ||
709 | for_each_sg(sg, s, nents, i) { | |
81a5a316 | 710 | if (sg_dma_address(s) != IOMMU_MAPPING_ERROR) |
07b48ac4 | 711 | s->offset += sg_dma_address(s); |
0db2e5d1 RM |
712 | if (sg_dma_len(s)) |
713 | s->length = sg_dma_len(s); | |
81a5a316 | 714 | sg_dma_address(s) = IOMMU_MAPPING_ERROR; |
0db2e5d1 RM |
715 | sg_dma_len(s) = 0; |
716 | } | |
717 | } | |
718 | ||
719 | /* | |
720 | * The DMA API client is passing in a scatterlist which could describe | |
721 | * any old buffer layout, but the IOMMU API requires everything to be | |
722 | * aligned to IOMMU pages. Hence the need for this complicated bit of | |
723 | * impedance-matching, to be able to hand off a suitably-aligned list, | |
724 | * but still preserve the original offsets and sizes for the caller. | |
725 | */ | |
726 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |
727 | int nents, int prot) | |
728 | { | |
729 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
842fe519 RM |
730 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
731 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 | 732 | struct scatterlist *s, *prev = NULL; |
842fe519 | 733 | dma_addr_t iova; |
0db2e5d1 | 734 | size_t iova_len = 0; |
809eac54 | 735 | unsigned long mask = dma_get_seg_boundary(dev); |
0db2e5d1 RM |
736 | int i; |
737 | ||
738 | /* | |
739 | * Work out how much IOVA space we need, and align the segments to | |
740 | * IOVA granules for the IOMMU driver to handle. With some clever | |
741 | * trickery we can modify the list in-place, but reversibly, by | |
809eac54 | 742 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
0db2e5d1 RM |
743 | */ |
744 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 745 | size_t s_iova_off = iova_offset(iovad, s->offset); |
0db2e5d1 | 746 | size_t s_length = s->length; |
809eac54 | 747 | size_t pad_len = (mask - iova_len + 1) & mask; |
0db2e5d1 | 748 | |
809eac54 | 749 | sg_dma_address(s) = s_iova_off; |
0db2e5d1 | 750 | sg_dma_len(s) = s_length; |
809eac54 RM |
751 | s->offset -= s_iova_off; |
752 | s_length = iova_align(iovad, s_length + s_iova_off); | |
0db2e5d1 RM |
753 | s->length = s_length; |
754 | ||
755 | /* | |
809eac54 RM |
756 | * Due to the alignment of our single IOVA allocation, we can |
757 | * depend on these assumptions about the segment boundary mask: | |
758 | * - If mask size >= IOVA size, then the IOVA range cannot | |
759 | * possibly fall across a boundary, so we don't care. | |
760 | * - If mask size < IOVA size, then the IOVA range must start | |
761 | * exactly on a boundary, therefore we can lay things out | |
762 | * based purely on segment lengths without needing to know | |
763 | * the actual addresses beforehand. | |
764 | * - The mask must be a power of 2, so pad_len == 0 if | |
765 | * iova_len == 0, thus we cannot dereference prev the first | |
766 | * time through here (i.e. before it has a meaningful value). | |
0db2e5d1 | 767 | */ |
809eac54 | 768 | if (pad_len && pad_len < s_length - 1) { |
0db2e5d1 RM |
769 | prev->length += pad_len; |
770 | iova_len += pad_len; | |
771 | } | |
772 | ||
773 | iova_len += s_length; | |
774 | prev = s; | |
775 | } | |
776 | ||
842fe519 | 777 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
0db2e5d1 RM |
778 | if (!iova) |
779 | goto out_restore_sg; | |
780 | ||
781 | /* | |
782 | * We'll leave any physical concatenation to the IOMMU driver's | |
783 | * implementation - it knows better than we do. | |
784 | */ | |
842fe519 | 785 | if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) |
0db2e5d1 RM |
786 | goto out_free_iova; |
787 | ||
842fe519 | 788 | return __finalise_sg(dev, sg, nents, iova); |
0db2e5d1 RM |
789 | |
790 | out_free_iova: | |
842fe519 | 791 | iommu_dma_free_iova(cookie, iova, iova_len); |
0db2e5d1 RM |
792 | out_restore_sg: |
793 | __invalidate_sg(sg, nents); | |
794 | return 0; | |
795 | } | |
796 | ||
797 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
00085f1e | 798 | enum dma_data_direction dir, unsigned long attrs) |
0db2e5d1 | 799 | { |
842fe519 RM |
800 | dma_addr_t start, end; |
801 | struct scatterlist *tmp; | |
802 | int i; | |
0db2e5d1 RM |
803 | /* |
804 | * The scatterlist segments are mapped into a single | |
805 | * contiguous IOVA allocation, so this is incredibly easy. | |
806 | */ | |
842fe519 RM |
807 | start = sg_dma_address(sg); |
808 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { | |
809 | if (sg_dma_len(tmp) == 0) | |
810 | break; | |
811 | sg = tmp; | |
812 | } | |
813 | end = sg_dma_address(sg) + sg_dma_len(sg); | |
814 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); | |
0db2e5d1 RM |
815 | } |
816 | ||
51f8cc9e RM |
817 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
818 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
819 | { | |
820 | return __iommu_dma_map(dev, phys, size, | |
737c85ca | 821 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); |
51f8cc9e RM |
822 | } |
823 | ||
824 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | |
825 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
826 | { | |
842fe519 | 827 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
51f8cc9e RM |
828 | } |
829 | ||
0db2e5d1 RM |
830 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
831 | { | |
81a5a316 | 832 | return dma_addr == IOMMU_MAPPING_ERROR; |
0db2e5d1 | 833 | } |
44bb7e24 RM |
834 | |
835 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |
836 | phys_addr_t msi_addr, struct iommu_domain *domain) | |
837 | { | |
838 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
839 | struct iommu_dma_msi_page *msi_page; | |
842fe519 | 840 | dma_addr_t iova; |
44bb7e24 | 841 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
fdbe574e | 842 | size_t size = cookie_msi_granule(cookie); |
44bb7e24 | 843 | |
fdbe574e | 844 | msi_addr &= ~(phys_addr_t)(size - 1); |
44bb7e24 RM |
845 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
846 | if (msi_page->phys == msi_addr) | |
847 | return msi_page; | |
848 | ||
849 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); | |
850 | if (!msi_page) | |
851 | return NULL; | |
852 | ||
a44e6657 RM |
853 | iova = __iommu_dma_map(dev, msi_addr, size, prot); |
854 | if (iommu_dma_mapping_error(dev, iova)) | |
855 | goto out_free_page; | |
44bb7e24 RM |
856 | |
857 | INIT_LIST_HEAD(&msi_page->list); | |
a44e6657 RM |
858 | msi_page->phys = msi_addr; |
859 | msi_page->iova = iova; | |
44bb7e24 RM |
860 | list_add(&msi_page->list, &cookie->msi_page_list); |
861 | return msi_page; | |
862 | ||
44bb7e24 RM |
863 | out_free_page: |
864 | kfree(msi_page); | |
865 | return NULL; | |
866 | } | |
867 | ||
868 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | |
869 | { | |
870 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); | |
871 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
872 | struct iommu_dma_cookie *cookie; | |
873 | struct iommu_dma_msi_page *msi_page; | |
874 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; | |
875 | unsigned long flags; | |
876 | ||
877 | if (!domain || !domain->iova_cookie) | |
878 | return; | |
879 | ||
880 | cookie = domain->iova_cookie; | |
881 | ||
882 | /* | |
883 | * We disable IRQs to rule out a possible inversion against | |
884 | * irq_desc_lock if, say, someone tries to retarget the affinity | |
885 | * of an MSI from within an IPI handler. | |
886 | */ | |
887 | spin_lock_irqsave(&cookie->msi_lock, flags); | |
888 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | |
889 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | |
890 | ||
891 | if (WARN_ON(!msi_page)) { | |
892 | /* | |
893 | * We're called from a void callback, so the best we can do is | |
894 | * 'fail' by filling the message with obviously bogus values. | |
895 | * Since we got this far due to an IOMMU being present, it's | |
896 | * not like the existing address would have worked anyway... | |
897 | */ | |
898 | msg->address_hi = ~0U; | |
899 | msg->address_lo = ~0U; | |
900 | msg->data = ~0U; | |
901 | } else { | |
902 | msg->address_hi = upper_32_bits(msi_page->iova); | |
fdbe574e | 903 | msg->address_lo &= cookie_msi_granule(cookie) - 1; |
44bb7e24 RM |
904 | msg->address_lo += lower_32_bits(msi_page->iova); |
905 | } | |
906 | } |