]>
Commit | Line | Data |
---|---|---|
0db2e5d1 RM |
1 | /* |
2 | * A fairly generic DMA-API to IOMMU-API glue layer. | |
3 | * | |
4 | * Copyright (C) 2014-2015 ARM Ltd. | |
5 | * | |
6 | * based in part on arch/arm/mm/dma-mapping.c: | |
7 | * Copyright (C) 2000-2004 Russell King | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #include <linux/device.h> | |
23 | #include <linux/dma-iommu.h> | |
5b11e9cd | 24 | #include <linux/gfp.h> |
0db2e5d1 RM |
25 | #include <linux/huge_mm.h> |
26 | #include <linux/iommu.h> | |
27 | #include <linux/iova.h> | |
44bb7e24 | 28 | #include <linux/irq.h> |
0db2e5d1 | 29 | #include <linux/mm.h> |
fade1ec0 | 30 | #include <linux/pci.h> |
5b11e9cd RM |
31 | #include <linux/scatterlist.h> |
32 | #include <linux/vmalloc.h> | |
0db2e5d1 | 33 | |
81a5a316 CH |
34 | #define IOMMU_MAPPING_ERROR 0 |
35 | ||
44bb7e24 RM |
36 | struct iommu_dma_msi_page { |
37 | struct list_head list; | |
38 | dma_addr_t iova; | |
39 | phys_addr_t phys; | |
40 | }; | |
41 | ||
fdbe574e RM |
42 | enum iommu_dma_cookie_type { |
43 | IOMMU_DMA_IOVA_COOKIE, | |
44 | IOMMU_DMA_MSI_COOKIE, | |
45 | }; | |
46 | ||
44bb7e24 | 47 | struct iommu_dma_cookie { |
fdbe574e RM |
48 | enum iommu_dma_cookie_type type; |
49 | union { | |
50 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ | |
51 | struct iova_domain iovad; | |
52 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ | |
53 | dma_addr_t msi_iova; | |
54 | }; | |
55 | struct list_head msi_page_list; | |
56 | spinlock_t msi_lock; | |
44bb7e24 RM |
57 | }; |
58 | ||
fdbe574e RM |
59 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
60 | { | |
61 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | |
62 | return cookie->iovad.granule; | |
63 | return PAGE_SIZE; | |
64 | } | |
65 | ||
fdbe574e RM |
66 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
67 | { | |
68 | struct iommu_dma_cookie *cookie; | |
69 | ||
70 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | |
71 | if (cookie) { | |
72 | spin_lock_init(&cookie->msi_lock); | |
73 | INIT_LIST_HEAD(&cookie->msi_page_list); | |
74 | cookie->type = type; | |
75 | } | |
76 | return cookie; | |
44bb7e24 RM |
77 | } |
78 | ||
0db2e5d1 RM |
79 | int iommu_dma_init(void) |
80 | { | |
81 | return iova_cache_get(); | |
82 | } | |
83 | ||
84 | /** | |
85 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain | |
86 | * @domain: IOMMU domain to prepare for DMA-API usage | |
87 | * | |
88 | * IOMMU drivers should normally call this from their domain_alloc | |
89 | * callback when domain->type == IOMMU_DOMAIN_DMA. | |
90 | */ | |
91 | int iommu_get_dma_cookie(struct iommu_domain *domain) | |
fdbe574e RM |
92 | { |
93 | if (domain->iova_cookie) | |
94 | return -EEXIST; | |
95 | ||
96 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | |
97 | if (!domain->iova_cookie) | |
98 | return -ENOMEM; | |
99 | ||
100 | return 0; | |
101 | } | |
102 | EXPORT_SYMBOL(iommu_get_dma_cookie); | |
103 | ||
104 | /** | |
105 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | |
106 | * @domain: IOMMU domain to prepare | |
107 | * @base: Start address of IOVA region for MSI mappings | |
108 | * | |
109 | * Users who manage their own IOVA allocation and do not want DMA API support, | |
110 | * but would still like to take advantage of automatic MSI remapping, can use | |
111 | * this to initialise their own domain appropriately. Users should reserve a | |
112 | * contiguous IOVA region, starting at @base, large enough to accommodate the | |
113 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | |
114 | * used by the devices attached to @domain. | |
115 | */ | |
116 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | |
0db2e5d1 | 117 | { |
44bb7e24 | 118 | struct iommu_dma_cookie *cookie; |
0db2e5d1 | 119 | |
fdbe574e RM |
120 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
121 | return -EINVAL; | |
122 | ||
0db2e5d1 RM |
123 | if (domain->iova_cookie) |
124 | return -EEXIST; | |
125 | ||
fdbe574e | 126 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
44bb7e24 RM |
127 | if (!cookie) |
128 | return -ENOMEM; | |
0db2e5d1 | 129 | |
fdbe574e | 130 | cookie->msi_iova = base; |
44bb7e24 RM |
131 | domain->iova_cookie = cookie; |
132 | return 0; | |
0db2e5d1 | 133 | } |
fdbe574e | 134 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
0db2e5d1 RM |
135 | |
136 | /** | |
137 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | |
fdbe574e RM |
138 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
139 | * iommu_get_msi_cookie() | |
0db2e5d1 RM |
140 | * |
141 | * IOMMU drivers should normally call this from their domain_free callback. | |
142 | */ | |
143 | void iommu_put_dma_cookie(struct iommu_domain *domain) | |
144 | { | |
44bb7e24 RM |
145 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
146 | struct iommu_dma_msi_page *msi, *tmp; | |
0db2e5d1 | 147 | |
44bb7e24 | 148 | if (!cookie) |
0db2e5d1 RM |
149 | return; |
150 | ||
fdbe574e | 151 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
44bb7e24 RM |
152 | put_iova_domain(&cookie->iovad); |
153 | ||
154 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | |
155 | list_del(&msi->list); | |
156 | kfree(msi); | |
157 | } | |
158 | kfree(cookie); | |
0db2e5d1 RM |
159 | domain->iova_cookie = NULL; |
160 | } | |
161 | EXPORT_SYMBOL(iommu_put_dma_cookie); | |
162 | ||
273df963 RM |
163 | /** |
164 | * iommu_dma_get_resv_regions - Reserved region driver helper | |
165 | * @dev: Device from iommu_get_resv_regions() | |
166 | * @list: Reserved region list from iommu_get_resv_regions() | |
167 | * | |
168 | * IOMMU drivers can use this to implement their .get_resv_regions callback | |
169 | * for general non-IOMMU-specific reservations. Currently, this covers host | |
170 | * bridge windows for PCI devices. | |
171 | */ | |
172 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) | |
fade1ec0 | 173 | { |
273df963 | 174 | struct pci_host_bridge *bridge; |
fade1ec0 | 175 | struct resource_entry *window; |
fade1ec0 | 176 | |
273df963 RM |
177 | if (!dev_is_pci(dev)) |
178 | return; | |
179 | ||
180 | bridge = pci_find_host_bridge(to_pci_dev(dev)->bus); | |
fade1ec0 | 181 | resource_list_for_each_entry(window, &bridge->windows) { |
273df963 RM |
182 | struct iommu_resv_region *region; |
183 | phys_addr_t start; | |
184 | size_t length; | |
185 | ||
938f1bbe | 186 | if (resource_type(window->res) != IORESOURCE_MEM) |
fade1ec0 RM |
187 | continue; |
188 | ||
273df963 RM |
189 | start = window->res->start - window->offset; |
190 | length = window->res->end - window->res->start + 1; | |
191 | region = iommu_alloc_resv_region(start, length, 0, | |
192 | IOMMU_RESV_RESERVED); | |
193 | if (!region) | |
194 | return; | |
195 | ||
196 | list_add_tail(®ion->list, list); | |
fade1ec0 RM |
197 | } |
198 | } | |
273df963 | 199 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
fade1ec0 | 200 | |
7c1b058c RM |
201 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
202 | phys_addr_t start, phys_addr_t end) | |
203 | { | |
204 | struct iova_domain *iovad = &cookie->iovad; | |
205 | struct iommu_dma_msi_page *msi_page; | |
206 | int i, num_pages; | |
207 | ||
208 | start -= iova_offset(iovad, start); | |
209 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); | |
210 | ||
211 | msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); | |
212 | if (!msi_page) | |
213 | return -ENOMEM; | |
214 | ||
215 | for (i = 0; i < num_pages; i++) { | |
216 | msi_page[i].phys = start; | |
217 | msi_page[i].iova = start; | |
218 | INIT_LIST_HEAD(&msi_page[i].list); | |
219 | list_add(&msi_page[i].list, &cookie->msi_page_list); | |
220 | start += iovad->granule; | |
221 | } | |
222 | ||
223 | return 0; | |
224 | } | |
225 | ||
226 | static int iova_reserve_iommu_regions(struct device *dev, | |
227 | struct iommu_domain *domain) | |
228 | { | |
229 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
230 | struct iova_domain *iovad = &cookie->iovad; | |
231 | struct iommu_resv_region *region; | |
232 | LIST_HEAD(resv_regions); | |
233 | int ret = 0; | |
234 | ||
7c1b058c RM |
235 | iommu_get_resv_regions(dev, &resv_regions); |
236 | list_for_each_entry(region, &resv_regions, list) { | |
237 | unsigned long lo, hi; | |
238 | ||
239 | /* We ARE the software that manages these! */ | |
240 | if (region->type == IOMMU_RESV_SW_MSI) | |
241 | continue; | |
242 | ||
243 | lo = iova_pfn(iovad, region->start); | |
244 | hi = iova_pfn(iovad, region->start + region->length - 1); | |
245 | reserve_iova(iovad, lo, hi); | |
246 | ||
247 | if (region->type == IOMMU_RESV_MSI) | |
248 | ret = cookie_init_hw_msi_region(cookie, region->start, | |
249 | region->start + region->length); | |
250 | if (ret) | |
251 | break; | |
252 | } | |
253 | iommu_put_resv_regions(dev, &resv_regions); | |
254 | ||
255 | return ret; | |
256 | } | |
257 | ||
0db2e5d1 RM |
258 | /** |
259 | * iommu_dma_init_domain - Initialise a DMA mapping domain | |
260 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | |
261 | * @base: IOVA at which the mappable address space starts | |
262 | * @size: Size of IOVA space | |
fade1ec0 | 263 | * @dev: Device the domain is being initialised for |
0db2e5d1 RM |
264 | * |
265 | * @base and @size should be exact multiples of IOMMU page granularity to | |
266 | * avoid rounding surprises. If necessary, we reserve the page at address 0 | |
267 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | |
268 | * any change which could make prior IOVAs invalid will fail. | |
269 | */ | |
fade1ec0 RM |
270 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
271 | u64 size, struct device *dev) | |
0db2e5d1 | 272 | { |
fdbe574e RM |
273 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
274 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 RM |
275 | unsigned long order, base_pfn, end_pfn; |
276 | ||
fdbe574e RM |
277 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
278 | return -EINVAL; | |
0db2e5d1 RM |
279 | |
280 | /* Use the smallest supported page size for IOVA granularity */ | |
d16e0faa | 281 | order = __ffs(domain->pgsize_bitmap); |
0db2e5d1 RM |
282 | base_pfn = max_t(unsigned long, 1, base >> order); |
283 | end_pfn = (base + size - 1) >> order; | |
284 | ||
285 | /* Check the domain allows at least some access to the device... */ | |
286 | if (domain->geometry.force_aperture) { | |
287 | if (base > domain->geometry.aperture_end || | |
288 | base + size <= domain->geometry.aperture_start) { | |
289 | pr_warn("specified DMA range outside IOMMU capability\n"); | |
290 | return -EFAULT; | |
291 | } | |
292 | /* ...then finally give it a kicking to make sure it fits */ | |
293 | base_pfn = max_t(unsigned long, base_pfn, | |
294 | domain->geometry.aperture_start >> order); | |
0db2e5d1 RM |
295 | } |
296 | ||
f51d7bb7 | 297 | /* start_pfn is always nonzero for an already-initialised domain */ |
0db2e5d1 RM |
298 | if (iovad->start_pfn) { |
299 | if (1UL << order != iovad->granule || | |
f51d7bb7 | 300 | base_pfn != iovad->start_pfn) { |
0db2e5d1 RM |
301 | pr_warn("Incompatible range for DMA domain\n"); |
302 | return -EFAULT; | |
303 | } | |
7c1b058c RM |
304 | |
305 | return 0; | |
0db2e5d1 | 306 | } |
7c1b058c | 307 | |
aa3ac946 | 308 | init_iova_domain(iovad, 1UL << order, base_pfn); |
7c1b058c RM |
309 | if (!dev) |
310 | return 0; | |
311 | ||
312 | return iova_reserve_iommu_regions(dev, domain); | |
0db2e5d1 RM |
313 | } |
314 | EXPORT_SYMBOL(iommu_dma_init_domain); | |
315 | ||
316 | /** | |
737c85ca MH |
317 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
318 | * page flags. | |
0db2e5d1 RM |
319 | * @dir: Direction of DMA transfer |
320 | * @coherent: Is the DMA master cache-coherent? | |
737c85ca | 321 | * @attrs: DMA attributes for the mapping |
0db2e5d1 RM |
322 | * |
323 | * Return: corresponding IOMMU API page protection flags | |
324 | */ | |
737c85ca MH |
325 | int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
326 | unsigned long attrs) | |
0db2e5d1 RM |
327 | { |
328 | int prot = coherent ? IOMMU_CACHE : 0; | |
329 | ||
737c85ca MH |
330 | if (attrs & DMA_ATTR_PRIVILEGED) |
331 | prot |= IOMMU_PRIV; | |
332 | ||
0db2e5d1 RM |
333 | switch (dir) { |
334 | case DMA_BIDIRECTIONAL: | |
335 | return prot | IOMMU_READ | IOMMU_WRITE; | |
336 | case DMA_TO_DEVICE: | |
337 | return prot | IOMMU_READ; | |
338 | case DMA_FROM_DEVICE: | |
339 | return prot | IOMMU_WRITE; | |
340 | default: | |
341 | return 0; | |
342 | } | |
343 | } | |
344 | ||
842fe519 RM |
345 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
346 | size_t size, dma_addr_t dma_limit, struct device *dev) | |
0db2e5d1 | 347 | { |
a44e6657 RM |
348 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
349 | struct iova_domain *iovad = &cookie->iovad; | |
bb65a64c | 350 | unsigned long shift, iova_len, iova = 0; |
0db2e5d1 | 351 | |
a44e6657 RM |
352 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { |
353 | cookie->msi_iova += size; | |
354 | return cookie->msi_iova - size; | |
355 | } | |
356 | ||
357 | shift = iova_shift(iovad); | |
358 | iova_len = size >> shift; | |
bb65a64c RM |
359 | /* |
360 | * Freeing non-power-of-two-sized allocations back into the IOVA caches | |
361 | * will come back to bite us badly, so we have to waste a bit of space | |
362 | * rounding up anything cacheable to make sure that can't happen. The | |
363 | * order of the unadjusted size will still match upon freeing. | |
364 | */ | |
365 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) | |
366 | iova_len = roundup_pow_of_two(iova_len); | |
a44e6657 | 367 | |
c987ff0d RM |
368 | if (domain->geometry.force_aperture) |
369 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | |
122fac03 RM |
370 | |
371 | /* Try to get PCI devices a SAC address */ | |
372 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) | |
538d5b33 TN |
373 | iova = alloc_iova_fast(iovad, iova_len, |
374 | DMA_BIT_MASK(32) >> shift, false); | |
bb65a64c | 375 | |
122fac03 | 376 | if (!iova) |
538d5b33 TN |
377 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, |
378 | true); | |
122fac03 | 379 | |
bb65a64c | 380 | return (dma_addr_t)iova << shift; |
0db2e5d1 RM |
381 | } |
382 | ||
842fe519 RM |
383 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
384 | dma_addr_t iova, size_t size) | |
0db2e5d1 | 385 | { |
842fe519 | 386 | struct iova_domain *iovad = &cookie->iovad; |
0db2e5d1 | 387 | |
a44e6657 | 388 | /* The MSI case is only ever cleaning up its most recent allocation */ |
bb65a64c | 389 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
a44e6657 | 390 | cookie->msi_iova -= size; |
bb65a64c | 391 | else |
1cc896ed RM |
392 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
393 | size >> iova_shift(iovad)); | |
842fe519 RM |
394 | } |
395 | ||
396 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, | |
397 | size_t size) | |
398 | { | |
a44e6657 RM |
399 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
400 | struct iova_domain *iovad = &cookie->iovad; | |
842fe519 RM |
401 | size_t iova_off = iova_offset(iovad, dma_addr); |
402 | ||
403 | dma_addr -= iova_off; | |
404 | size = iova_align(iovad, size + iova_off); | |
405 | ||
406 | WARN_ON(iommu_unmap(domain, dma_addr, size) != size); | |
a44e6657 | 407 | iommu_dma_free_iova(cookie, dma_addr, size); |
0db2e5d1 RM |
408 | } |
409 | ||
410 | static void __iommu_dma_free_pages(struct page **pages, int count) | |
411 | { | |
412 | while (count--) | |
413 | __free_page(pages[count]); | |
414 | kvfree(pages); | |
415 | } | |
416 | ||
3b6b7e19 RM |
417 | static struct page **__iommu_dma_alloc_pages(unsigned int count, |
418 | unsigned long order_mask, gfp_t gfp) | |
0db2e5d1 RM |
419 | { |
420 | struct page **pages; | |
421 | unsigned int i = 0, array_size = count * sizeof(*pages); | |
3b6b7e19 RM |
422 | |
423 | order_mask &= (2U << MAX_ORDER) - 1; | |
424 | if (!order_mask) | |
425 | return NULL; | |
0db2e5d1 RM |
426 | |
427 | if (array_size <= PAGE_SIZE) | |
428 | pages = kzalloc(array_size, GFP_KERNEL); | |
429 | else | |
430 | pages = vzalloc(array_size); | |
431 | if (!pages) | |
432 | return NULL; | |
433 | ||
434 | /* IOMMU can map any pages, so himem can also be used here */ | |
435 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; | |
436 | ||
437 | while (count) { | |
438 | struct page *page = NULL; | |
3b6b7e19 | 439 | unsigned int order_size; |
0db2e5d1 RM |
440 | |
441 | /* | |
442 | * Higher-order allocations are a convenience rather | |
443 | * than a necessity, hence using __GFP_NORETRY until | |
3b6b7e19 | 444 | * falling back to minimum-order allocations. |
0db2e5d1 | 445 | */ |
3b6b7e19 RM |
446 | for (order_mask &= (2U << __fls(count)) - 1; |
447 | order_mask; order_mask &= ~order_size) { | |
448 | unsigned int order = __fls(order_mask); | |
449 | ||
450 | order_size = 1U << order; | |
451 | page = alloc_pages((order_mask - order_size) ? | |
452 | gfp | __GFP_NORETRY : gfp, order); | |
0db2e5d1 RM |
453 | if (!page) |
454 | continue; | |
3b6b7e19 RM |
455 | if (!order) |
456 | break; | |
457 | if (!PageCompound(page)) { | |
0db2e5d1 RM |
458 | split_page(page, order); |
459 | break; | |
3b6b7e19 RM |
460 | } else if (!split_huge_page(page)) { |
461 | break; | |
0db2e5d1 | 462 | } |
3b6b7e19 | 463 | __free_pages(page, order); |
0db2e5d1 | 464 | } |
0db2e5d1 RM |
465 | if (!page) { |
466 | __iommu_dma_free_pages(pages, i); | |
467 | return NULL; | |
468 | } | |
3b6b7e19 RM |
469 | count -= order_size; |
470 | while (order_size--) | |
0db2e5d1 RM |
471 | pages[i++] = page++; |
472 | } | |
473 | return pages; | |
474 | } | |
475 | ||
476 | /** | |
477 | * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc() | |
478 | * @dev: Device which owns this buffer | |
479 | * @pages: Array of buffer pages as returned by iommu_dma_alloc() | |
480 | * @size: Size of buffer in bytes | |
481 | * @handle: DMA address of buffer | |
482 | * | |
483 | * Frees both the pages associated with the buffer, and the array | |
484 | * describing them | |
485 | */ | |
486 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | |
487 | dma_addr_t *handle) | |
488 | { | |
842fe519 | 489 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); |
0db2e5d1 | 490 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
81a5a316 | 491 | *handle = IOMMU_MAPPING_ERROR; |
0db2e5d1 RM |
492 | } |
493 | ||
494 | /** | |
495 | * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space | |
496 | * @dev: Device to allocate memory for. Must be a real device | |
497 | * attached to an iommu_dma_domain | |
498 | * @size: Size of buffer in bytes | |
499 | * @gfp: Allocation flags | |
3b6b7e19 | 500 | * @attrs: DMA attributes for this allocation |
0db2e5d1 RM |
501 | * @prot: IOMMU mapping flags |
502 | * @handle: Out argument for allocated DMA handle | |
503 | * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the | |
504 | * given VA/PA are visible to the given non-coherent device. | |
505 | * | |
506 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, | |
507 | * but an IOMMU which supports smaller pages might not map the whole thing. | |
508 | * | |
509 | * Return: Array of struct page pointers describing the buffer, | |
510 | * or NULL on failure. | |
511 | */ | |
3b6b7e19 | 512 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, |
00085f1e | 513 | unsigned long attrs, int prot, dma_addr_t *handle, |
0db2e5d1 RM |
514 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
515 | { | |
516 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
842fe519 RM |
517 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
518 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 RM |
519 | struct page **pages; |
520 | struct sg_table sgt; | |
842fe519 | 521 | dma_addr_t iova; |
3b6b7e19 | 522 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
0db2e5d1 | 523 | |
81a5a316 | 524 | *handle = IOMMU_MAPPING_ERROR; |
0db2e5d1 | 525 | |
3b6b7e19 RM |
526 | min_size = alloc_sizes & -alloc_sizes; |
527 | if (min_size < PAGE_SIZE) { | |
528 | min_size = PAGE_SIZE; | |
529 | alloc_sizes |= PAGE_SIZE; | |
530 | } else { | |
531 | size = ALIGN(size, min_size); | |
532 | } | |
00085f1e | 533 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
3b6b7e19 RM |
534 | alloc_sizes = min_size; |
535 | ||
536 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
537 | pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp); | |
0db2e5d1 RM |
538 | if (!pages) |
539 | return NULL; | |
540 | ||
842fe519 RM |
541 | size = iova_align(iovad, size); |
542 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); | |
0db2e5d1 RM |
543 | if (!iova) |
544 | goto out_free_pages; | |
545 | ||
0db2e5d1 RM |
546 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
547 | goto out_free_iova; | |
548 | ||
549 | if (!(prot & IOMMU_CACHE)) { | |
550 | struct sg_mapping_iter miter; | |
551 | /* | |
552 | * The CPU-centric flushing implied by SG_MITER_TO_SG isn't | |
553 | * sufficient here, so skip it by using the "wrong" direction. | |
554 | */ | |
555 | sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); | |
556 | while (sg_miter_next(&miter)) | |
557 | flush_page(dev, miter.addr, page_to_phys(miter.page)); | |
558 | sg_miter_stop(&miter); | |
559 | } | |
560 | ||
842fe519 | 561 | if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) |
0db2e5d1 RM |
562 | < size) |
563 | goto out_free_sg; | |
564 | ||
842fe519 | 565 | *handle = iova; |
0db2e5d1 RM |
566 | sg_free_table(&sgt); |
567 | return pages; | |
568 | ||
569 | out_free_sg: | |
570 | sg_free_table(&sgt); | |
571 | out_free_iova: | |
842fe519 | 572 | iommu_dma_free_iova(cookie, iova, size); |
0db2e5d1 RM |
573 | out_free_pages: |
574 | __iommu_dma_free_pages(pages, count); | |
575 | return NULL; | |
576 | } | |
577 | ||
578 | /** | |
579 | * iommu_dma_mmap - Map a buffer into provided user VMA | |
580 | * @pages: Array representing buffer from iommu_dma_alloc() | |
581 | * @size: Size of buffer in bytes | |
582 | * @vma: VMA describing requested userspace mapping | |
583 | * | |
584 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible | |
585 | * for verifying the correct size and protection of @vma beforehand. | |
586 | */ | |
587 | ||
588 | int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) | |
589 | { | |
590 | unsigned long uaddr = vma->vm_start; | |
591 | unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
592 | int ret = -ENXIO; | |
593 | ||
594 | for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { | |
595 | ret = vm_insert_page(vma, uaddr, pages[i]); | |
596 | if (ret) | |
597 | break; | |
598 | uaddr += PAGE_SIZE; | |
599 | } | |
600 | return ret; | |
601 | } | |
602 | ||
51f8cc9e RM |
603 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
604 | size_t size, int prot) | |
0db2e5d1 | 605 | { |
0db2e5d1 | 606 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
842fe519 | 607 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
1cc896ed | 608 | size_t iova_off = 0; |
842fe519 | 609 | dma_addr_t iova; |
0db2e5d1 | 610 | |
1cc896ed RM |
611 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { |
612 | iova_off = iova_offset(&cookie->iovad, phys); | |
613 | size = iova_align(&cookie->iovad, size + iova_off); | |
614 | } | |
615 | ||
842fe519 | 616 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
0db2e5d1 | 617 | if (!iova) |
81a5a316 | 618 | return IOMMU_MAPPING_ERROR; |
0db2e5d1 | 619 | |
842fe519 RM |
620 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { |
621 | iommu_dma_free_iova(cookie, iova, size); | |
81a5a316 | 622 | return IOMMU_MAPPING_ERROR; |
0db2e5d1 | 623 | } |
842fe519 | 624 | return iova + iova_off; |
0db2e5d1 RM |
625 | } |
626 | ||
51f8cc9e RM |
627 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
628 | unsigned long offset, size_t size, int prot) | |
629 | { | |
630 | return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); | |
631 | } | |
632 | ||
0db2e5d1 | 633 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
00085f1e | 634 | enum dma_data_direction dir, unsigned long attrs) |
0db2e5d1 | 635 | { |
842fe519 | 636 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
0db2e5d1 RM |
637 | } |
638 | ||
639 | /* | |
640 | * Prepare a successfully-mapped scatterlist to give back to the caller. | |
809eac54 RM |
641 | * |
642 | * At this point the segments are already laid out by iommu_dma_map_sg() to | |
643 | * avoid individually crossing any boundaries, so we merely need to check a | |
644 | * segment's start address to avoid concatenating across one. | |
0db2e5d1 RM |
645 | */ |
646 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | |
647 | dma_addr_t dma_addr) | |
648 | { | |
809eac54 RM |
649 | struct scatterlist *s, *cur = sg; |
650 | unsigned long seg_mask = dma_get_seg_boundary(dev); | |
651 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); | |
652 | int i, count = 0; | |
0db2e5d1 RM |
653 | |
654 | for_each_sg(sg, s, nents, i) { | |
809eac54 RM |
655 | /* Restore this segment's original unaligned fields first */ |
656 | unsigned int s_iova_off = sg_dma_address(s); | |
0db2e5d1 | 657 | unsigned int s_length = sg_dma_len(s); |
809eac54 | 658 | unsigned int s_iova_len = s->length; |
0db2e5d1 | 659 | |
809eac54 | 660 | s->offset += s_iova_off; |
0db2e5d1 | 661 | s->length = s_length; |
81a5a316 | 662 | sg_dma_address(s) = IOMMU_MAPPING_ERROR; |
809eac54 RM |
663 | sg_dma_len(s) = 0; |
664 | ||
665 | /* | |
666 | * Now fill in the real DMA data. If... | |
667 | * - there is a valid output segment to append to | |
668 | * - and this segment starts on an IOVA page boundary | |
669 | * - but doesn't fall at a segment boundary | |
670 | * - and wouldn't make the resulting output segment too long | |
671 | */ | |
672 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && | |
673 | (cur_len + s_length <= max_len)) { | |
674 | /* ...then concatenate it with the previous one */ | |
675 | cur_len += s_length; | |
676 | } else { | |
677 | /* Otherwise start the next output segment */ | |
678 | if (i > 0) | |
679 | cur = sg_next(cur); | |
680 | cur_len = s_length; | |
681 | count++; | |
682 | ||
683 | sg_dma_address(cur) = dma_addr + s_iova_off; | |
684 | } | |
685 | ||
686 | sg_dma_len(cur) = cur_len; | |
687 | dma_addr += s_iova_len; | |
688 | ||
689 | if (s_length + s_iova_off < s_iova_len) | |
690 | cur_len = 0; | |
0db2e5d1 | 691 | } |
809eac54 | 692 | return count; |
0db2e5d1 RM |
693 | } |
694 | ||
695 | /* | |
696 | * If mapping failed, then just restore the original list, | |
697 | * but making sure the DMA fields are invalidated. | |
698 | */ | |
699 | static void __invalidate_sg(struct scatterlist *sg, int nents) | |
700 | { | |
701 | struct scatterlist *s; | |
702 | int i; | |
703 | ||
704 | for_each_sg(sg, s, nents, i) { | |
81a5a316 | 705 | if (sg_dma_address(s) != IOMMU_MAPPING_ERROR) |
07b48ac4 | 706 | s->offset += sg_dma_address(s); |
0db2e5d1 RM |
707 | if (sg_dma_len(s)) |
708 | s->length = sg_dma_len(s); | |
81a5a316 | 709 | sg_dma_address(s) = IOMMU_MAPPING_ERROR; |
0db2e5d1 RM |
710 | sg_dma_len(s) = 0; |
711 | } | |
712 | } | |
713 | ||
714 | /* | |
715 | * The DMA API client is passing in a scatterlist which could describe | |
716 | * any old buffer layout, but the IOMMU API requires everything to be | |
717 | * aligned to IOMMU pages. Hence the need for this complicated bit of | |
718 | * impedance-matching, to be able to hand off a suitably-aligned list, | |
719 | * but still preserve the original offsets and sizes for the caller. | |
720 | */ | |
721 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |
722 | int nents, int prot) | |
723 | { | |
724 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
842fe519 RM |
725 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
726 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 | 727 | struct scatterlist *s, *prev = NULL; |
842fe519 | 728 | dma_addr_t iova; |
0db2e5d1 | 729 | size_t iova_len = 0; |
809eac54 | 730 | unsigned long mask = dma_get_seg_boundary(dev); |
0db2e5d1 RM |
731 | int i; |
732 | ||
733 | /* | |
734 | * Work out how much IOVA space we need, and align the segments to | |
735 | * IOVA granules for the IOMMU driver to handle. With some clever | |
736 | * trickery we can modify the list in-place, but reversibly, by | |
809eac54 | 737 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
0db2e5d1 RM |
738 | */ |
739 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 740 | size_t s_iova_off = iova_offset(iovad, s->offset); |
0db2e5d1 | 741 | size_t s_length = s->length; |
809eac54 | 742 | size_t pad_len = (mask - iova_len + 1) & mask; |
0db2e5d1 | 743 | |
809eac54 | 744 | sg_dma_address(s) = s_iova_off; |
0db2e5d1 | 745 | sg_dma_len(s) = s_length; |
809eac54 RM |
746 | s->offset -= s_iova_off; |
747 | s_length = iova_align(iovad, s_length + s_iova_off); | |
0db2e5d1 RM |
748 | s->length = s_length; |
749 | ||
750 | /* | |
809eac54 RM |
751 | * Due to the alignment of our single IOVA allocation, we can |
752 | * depend on these assumptions about the segment boundary mask: | |
753 | * - If mask size >= IOVA size, then the IOVA range cannot | |
754 | * possibly fall across a boundary, so we don't care. | |
755 | * - If mask size < IOVA size, then the IOVA range must start | |
756 | * exactly on a boundary, therefore we can lay things out | |
757 | * based purely on segment lengths without needing to know | |
758 | * the actual addresses beforehand. | |
759 | * - The mask must be a power of 2, so pad_len == 0 if | |
760 | * iova_len == 0, thus we cannot dereference prev the first | |
761 | * time through here (i.e. before it has a meaningful value). | |
0db2e5d1 | 762 | */ |
809eac54 | 763 | if (pad_len && pad_len < s_length - 1) { |
0db2e5d1 RM |
764 | prev->length += pad_len; |
765 | iova_len += pad_len; | |
766 | } | |
767 | ||
768 | iova_len += s_length; | |
769 | prev = s; | |
770 | } | |
771 | ||
842fe519 | 772 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
0db2e5d1 RM |
773 | if (!iova) |
774 | goto out_restore_sg; | |
775 | ||
776 | /* | |
777 | * We'll leave any physical concatenation to the IOMMU driver's | |
778 | * implementation - it knows better than we do. | |
779 | */ | |
842fe519 | 780 | if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) |
0db2e5d1 RM |
781 | goto out_free_iova; |
782 | ||
842fe519 | 783 | return __finalise_sg(dev, sg, nents, iova); |
0db2e5d1 RM |
784 | |
785 | out_free_iova: | |
842fe519 | 786 | iommu_dma_free_iova(cookie, iova, iova_len); |
0db2e5d1 RM |
787 | out_restore_sg: |
788 | __invalidate_sg(sg, nents); | |
789 | return 0; | |
790 | } | |
791 | ||
792 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
00085f1e | 793 | enum dma_data_direction dir, unsigned long attrs) |
0db2e5d1 | 794 | { |
842fe519 RM |
795 | dma_addr_t start, end; |
796 | struct scatterlist *tmp; | |
797 | int i; | |
0db2e5d1 RM |
798 | /* |
799 | * The scatterlist segments are mapped into a single | |
800 | * contiguous IOVA allocation, so this is incredibly easy. | |
801 | */ | |
842fe519 RM |
802 | start = sg_dma_address(sg); |
803 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { | |
804 | if (sg_dma_len(tmp) == 0) | |
805 | break; | |
806 | sg = tmp; | |
807 | } | |
808 | end = sg_dma_address(sg) + sg_dma_len(sg); | |
809 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); | |
0db2e5d1 RM |
810 | } |
811 | ||
51f8cc9e RM |
812 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
813 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
814 | { | |
815 | return __iommu_dma_map(dev, phys, size, | |
737c85ca | 816 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); |
51f8cc9e RM |
817 | } |
818 | ||
819 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | |
820 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
821 | { | |
842fe519 | 822 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
51f8cc9e RM |
823 | } |
824 | ||
0db2e5d1 RM |
825 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
826 | { | |
81a5a316 | 827 | return dma_addr == IOMMU_MAPPING_ERROR; |
0db2e5d1 | 828 | } |
44bb7e24 RM |
829 | |
830 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |
831 | phys_addr_t msi_addr, struct iommu_domain *domain) | |
832 | { | |
833 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
834 | struct iommu_dma_msi_page *msi_page; | |
842fe519 | 835 | dma_addr_t iova; |
44bb7e24 | 836 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
fdbe574e | 837 | size_t size = cookie_msi_granule(cookie); |
44bb7e24 | 838 | |
fdbe574e | 839 | msi_addr &= ~(phys_addr_t)(size - 1); |
44bb7e24 RM |
840 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
841 | if (msi_page->phys == msi_addr) | |
842 | return msi_page; | |
843 | ||
844 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); | |
845 | if (!msi_page) | |
846 | return NULL; | |
847 | ||
a44e6657 RM |
848 | iova = __iommu_dma_map(dev, msi_addr, size, prot); |
849 | if (iommu_dma_mapping_error(dev, iova)) | |
850 | goto out_free_page; | |
44bb7e24 RM |
851 | |
852 | INIT_LIST_HEAD(&msi_page->list); | |
a44e6657 RM |
853 | msi_page->phys = msi_addr; |
854 | msi_page->iova = iova; | |
44bb7e24 RM |
855 | list_add(&msi_page->list, &cookie->msi_page_list); |
856 | return msi_page; | |
857 | ||
44bb7e24 RM |
858 | out_free_page: |
859 | kfree(msi_page); | |
860 | return NULL; | |
861 | } | |
862 | ||
863 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | |
864 | { | |
865 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); | |
866 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
867 | struct iommu_dma_cookie *cookie; | |
868 | struct iommu_dma_msi_page *msi_page; | |
869 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; | |
870 | unsigned long flags; | |
871 | ||
872 | if (!domain || !domain->iova_cookie) | |
873 | return; | |
874 | ||
875 | cookie = domain->iova_cookie; | |
876 | ||
877 | /* | |
878 | * We disable IRQs to rule out a possible inversion against | |
879 | * irq_desc_lock if, say, someone tries to retarget the affinity | |
880 | * of an MSI from within an IPI handler. | |
881 | */ | |
882 | spin_lock_irqsave(&cookie->msi_lock, flags); | |
883 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | |
884 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | |
885 | ||
886 | if (WARN_ON(!msi_page)) { | |
887 | /* | |
888 | * We're called from a void callback, so the best we can do is | |
889 | * 'fail' by filling the message with obviously bogus values. | |
890 | * Since we got this far due to an IOMMU being present, it's | |
891 | * not like the existing address would have worked anyway... | |
892 | */ | |
893 | msg->address_hi = ~0U; | |
894 | msg->address_lo = ~0U; | |
895 | msg->data = ~0U; | |
896 | } else { | |
897 | msg->address_hi = upper_32_bits(msi_page->iova); | |
fdbe574e | 898 | msg->address_lo &= cookie_msi_granule(cookie) - 1; |
44bb7e24 RM |
899 | msg->address_lo += lower_32_bits(msi_page->iova); |
900 | } | |
901 | } |