]>
Commit | Line | Data |
---|---|---|
0db2e5d1 RM |
1 | /* |
2 | * A fairly generic DMA-API to IOMMU-API glue layer. | |
3 | * | |
4 | * Copyright (C) 2014-2015 ARM Ltd. | |
5 | * | |
6 | * based in part on arch/arm/mm/dma-mapping.c: | |
7 | * Copyright (C) 2000-2004 Russell King | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #include <linux/device.h> | |
23 | #include <linux/dma-iommu.h> | |
5b11e9cd | 24 | #include <linux/gfp.h> |
0db2e5d1 RM |
25 | #include <linux/huge_mm.h> |
26 | #include <linux/iommu.h> | |
27 | #include <linux/iova.h> | |
44bb7e24 | 28 | #include <linux/irq.h> |
0db2e5d1 | 29 | #include <linux/mm.h> |
5b11e9cd RM |
30 | #include <linux/scatterlist.h> |
31 | #include <linux/vmalloc.h> | |
0db2e5d1 | 32 | |
44bb7e24 RM |
33 | struct iommu_dma_msi_page { |
34 | struct list_head list; | |
35 | dma_addr_t iova; | |
36 | phys_addr_t phys; | |
37 | }; | |
38 | ||
39 | struct iommu_dma_cookie { | |
40 | struct iova_domain iovad; | |
41 | struct list_head msi_page_list; | |
42 | spinlock_t msi_lock; | |
43 | }; | |
44 | ||
45 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) | |
46 | { | |
47 | return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; | |
48 | } | |
49 | ||
0db2e5d1 RM |
50 | int iommu_dma_init(void) |
51 | { | |
52 | return iova_cache_get(); | |
53 | } | |
54 | ||
55 | /** | |
56 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain | |
57 | * @domain: IOMMU domain to prepare for DMA-API usage | |
58 | * | |
59 | * IOMMU drivers should normally call this from their domain_alloc | |
60 | * callback when domain->type == IOMMU_DOMAIN_DMA. | |
61 | */ | |
62 | int iommu_get_dma_cookie(struct iommu_domain *domain) | |
63 | { | |
44bb7e24 | 64 | struct iommu_dma_cookie *cookie; |
0db2e5d1 RM |
65 | |
66 | if (domain->iova_cookie) | |
67 | return -EEXIST; | |
68 | ||
44bb7e24 RM |
69 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
70 | if (!cookie) | |
71 | return -ENOMEM; | |
0db2e5d1 | 72 | |
44bb7e24 RM |
73 | spin_lock_init(&cookie->msi_lock); |
74 | INIT_LIST_HEAD(&cookie->msi_page_list); | |
75 | domain->iova_cookie = cookie; | |
76 | return 0; | |
0db2e5d1 RM |
77 | } |
78 | EXPORT_SYMBOL(iommu_get_dma_cookie); | |
79 | ||
80 | /** | |
81 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | |
82 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | |
83 | * | |
84 | * IOMMU drivers should normally call this from their domain_free callback. | |
85 | */ | |
86 | void iommu_put_dma_cookie(struct iommu_domain *domain) | |
87 | { | |
44bb7e24 RM |
88 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
89 | struct iommu_dma_msi_page *msi, *tmp; | |
0db2e5d1 | 90 | |
44bb7e24 | 91 | if (!cookie) |
0db2e5d1 RM |
92 | return; |
93 | ||
44bb7e24 RM |
94 | if (cookie->iovad.granule) |
95 | put_iova_domain(&cookie->iovad); | |
96 | ||
97 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | |
98 | list_del(&msi->list); | |
99 | kfree(msi); | |
100 | } | |
101 | kfree(cookie); | |
0db2e5d1 RM |
102 | domain->iova_cookie = NULL; |
103 | } | |
104 | EXPORT_SYMBOL(iommu_put_dma_cookie); | |
105 | ||
106 | /** | |
107 | * iommu_dma_init_domain - Initialise a DMA mapping domain | |
108 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | |
109 | * @base: IOVA at which the mappable address space starts | |
110 | * @size: Size of IOVA space | |
111 | * | |
112 | * @base and @size should be exact multiples of IOMMU page granularity to | |
113 | * avoid rounding surprises. If necessary, we reserve the page at address 0 | |
114 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | |
115 | * any change which could make prior IOVAs invalid will fail. | |
116 | */ | |
117 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) | |
118 | { | |
44bb7e24 | 119 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
120 | unsigned long order, base_pfn, end_pfn; |
121 | ||
122 | if (!iovad) | |
123 | return -ENODEV; | |
124 | ||
125 | /* Use the smallest supported page size for IOVA granularity */ | |
d16e0faa | 126 | order = __ffs(domain->pgsize_bitmap); |
0db2e5d1 RM |
127 | base_pfn = max_t(unsigned long, 1, base >> order); |
128 | end_pfn = (base + size - 1) >> order; | |
129 | ||
130 | /* Check the domain allows at least some access to the device... */ | |
131 | if (domain->geometry.force_aperture) { | |
132 | if (base > domain->geometry.aperture_end || | |
133 | base + size <= domain->geometry.aperture_start) { | |
134 | pr_warn("specified DMA range outside IOMMU capability\n"); | |
135 | return -EFAULT; | |
136 | } | |
137 | /* ...then finally give it a kicking to make sure it fits */ | |
138 | base_pfn = max_t(unsigned long, base_pfn, | |
139 | domain->geometry.aperture_start >> order); | |
140 | end_pfn = min_t(unsigned long, end_pfn, | |
141 | domain->geometry.aperture_end >> order); | |
142 | } | |
143 | ||
144 | /* All we can safely do with an existing domain is enlarge it */ | |
145 | if (iovad->start_pfn) { | |
146 | if (1UL << order != iovad->granule || | |
147 | base_pfn != iovad->start_pfn || | |
148 | end_pfn < iovad->dma_32bit_pfn) { | |
149 | pr_warn("Incompatible range for DMA domain\n"); | |
150 | return -EFAULT; | |
151 | } | |
152 | iovad->dma_32bit_pfn = end_pfn; | |
153 | } else { | |
154 | init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); | |
155 | } | |
156 | return 0; | |
157 | } | |
158 | EXPORT_SYMBOL(iommu_dma_init_domain); | |
159 | ||
160 | /** | |
161 | * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags | |
162 | * @dir: Direction of DMA transfer | |
163 | * @coherent: Is the DMA master cache-coherent? | |
164 | * | |
165 | * Return: corresponding IOMMU API page protection flags | |
166 | */ | |
167 | int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) | |
168 | { | |
169 | int prot = coherent ? IOMMU_CACHE : 0; | |
170 | ||
171 | switch (dir) { | |
172 | case DMA_BIDIRECTIONAL: | |
173 | return prot | IOMMU_READ | IOMMU_WRITE; | |
174 | case DMA_TO_DEVICE: | |
175 | return prot | IOMMU_READ; | |
176 | case DMA_FROM_DEVICE: | |
177 | return prot | IOMMU_WRITE; | |
178 | default: | |
179 | return 0; | |
180 | } | |
181 | } | |
182 | ||
c987ff0d | 183 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, |
0db2e5d1 RM |
184 | dma_addr_t dma_limit) |
185 | { | |
44bb7e24 | 186 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
187 | unsigned long shift = iova_shift(iovad); |
188 | unsigned long length = iova_align(iovad, size) >> shift; | |
189 | ||
c987ff0d RM |
190 | if (domain->geometry.force_aperture) |
191 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | |
0db2e5d1 RM |
192 | /* |
193 | * Enforce size-alignment to be safe - there could perhaps be an | |
194 | * attribute to control this per-device, or at least per-domain... | |
195 | */ | |
196 | return alloc_iova(iovad, length, dma_limit >> shift, true); | |
197 | } | |
198 | ||
199 | /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ | |
200 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) | |
201 | { | |
44bb7e24 | 202 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
203 | unsigned long shift = iova_shift(iovad); |
204 | unsigned long pfn = dma_addr >> shift; | |
205 | struct iova *iova = find_iova(iovad, pfn); | |
206 | size_t size; | |
207 | ||
208 | if (WARN_ON(!iova)) | |
209 | return; | |
210 | ||
211 | size = iova_size(iova) << shift; | |
212 | size -= iommu_unmap(domain, pfn << shift, size); | |
213 | /* ...and if we can't, then something is horribly, horribly wrong */ | |
214 | WARN_ON(size > 0); | |
215 | __free_iova(iovad, iova); | |
216 | } | |
217 | ||
218 | static void __iommu_dma_free_pages(struct page **pages, int count) | |
219 | { | |
220 | while (count--) | |
221 | __free_page(pages[count]); | |
222 | kvfree(pages); | |
223 | } | |
224 | ||
3b6b7e19 RM |
225 | static struct page **__iommu_dma_alloc_pages(unsigned int count, |
226 | unsigned long order_mask, gfp_t gfp) | |
0db2e5d1 RM |
227 | { |
228 | struct page **pages; | |
229 | unsigned int i = 0, array_size = count * sizeof(*pages); | |
3b6b7e19 RM |
230 | |
231 | order_mask &= (2U << MAX_ORDER) - 1; | |
232 | if (!order_mask) | |
233 | return NULL; | |
0db2e5d1 RM |
234 | |
235 | if (array_size <= PAGE_SIZE) | |
236 | pages = kzalloc(array_size, GFP_KERNEL); | |
237 | else | |
238 | pages = vzalloc(array_size); | |
239 | if (!pages) | |
240 | return NULL; | |
241 | ||
242 | /* IOMMU can map any pages, so himem can also be used here */ | |
243 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; | |
244 | ||
245 | while (count) { | |
246 | struct page *page = NULL; | |
3b6b7e19 | 247 | unsigned int order_size; |
0db2e5d1 RM |
248 | |
249 | /* | |
250 | * Higher-order allocations are a convenience rather | |
251 | * than a necessity, hence using __GFP_NORETRY until | |
3b6b7e19 | 252 | * falling back to minimum-order allocations. |
0db2e5d1 | 253 | */ |
3b6b7e19 RM |
254 | for (order_mask &= (2U << __fls(count)) - 1; |
255 | order_mask; order_mask &= ~order_size) { | |
256 | unsigned int order = __fls(order_mask); | |
257 | ||
258 | order_size = 1U << order; | |
259 | page = alloc_pages((order_mask - order_size) ? | |
260 | gfp | __GFP_NORETRY : gfp, order); | |
0db2e5d1 RM |
261 | if (!page) |
262 | continue; | |
3b6b7e19 RM |
263 | if (!order) |
264 | break; | |
265 | if (!PageCompound(page)) { | |
0db2e5d1 RM |
266 | split_page(page, order); |
267 | break; | |
3b6b7e19 RM |
268 | } else if (!split_huge_page(page)) { |
269 | break; | |
0db2e5d1 | 270 | } |
3b6b7e19 | 271 | __free_pages(page, order); |
0db2e5d1 | 272 | } |
0db2e5d1 RM |
273 | if (!page) { |
274 | __iommu_dma_free_pages(pages, i); | |
275 | return NULL; | |
276 | } | |
3b6b7e19 RM |
277 | count -= order_size; |
278 | while (order_size--) | |
0db2e5d1 RM |
279 | pages[i++] = page++; |
280 | } | |
281 | return pages; | |
282 | } | |
283 | ||
284 | /** | |
285 | * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc() | |
286 | * @dev: Device which owns this buffer | |
287 | * @pages: Array of buffer pages as returned by iommu_dma_alloc() | |
288 | * @size: Size of buffer in bytes | |
289 | * @handle: DMA address of buffer | |
290 | * | |
291 | * Frees both the pages associated with the buffer, and the array | |
292 | * describing them | |
293 | */ | |
294 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | |
295 | dma_addr_t *handle) | |
296 | { | |
297 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle); | |
298 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); | |
299 | *handle = DMA_ERROR_CODE; | |
300 | } | |
301 | ||
302 | /** | |
303 | * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space | |
304 | * @dev: Device to allocate memory for. Must be a real device | |
305 | * attached to an iommu_dma_domain | |
306 | * @size: Size of buffer in bytes | |
307 | * @gfp: Allocation flags | |
3b6b7e19 | 308 | * @attrs: DMA attributes for this allocation |
0db2e5d1 RM |
309 | * @prot: IOMMU mapping flags |
310 | * @handle: Out argument for allocated DMA handle | |
311 | * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the | |
312 | * given VA/PA are visible to the given non-coherent device. | |
313 | * | |
314 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, | |
315 | * but an IOMMU which supports smaller pages might not map the whole thing. | |
316 | * | |
317 | * Return: Array of struct page pointers describing the buffer, | |
318 | * or NULL on failure. | |
319 | */ | |
3b6b7e19 | 320 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, |
00085f1e | 321 | unsigned long attrs, int prot, dma_addr_t *handle, |
0db2e5d1 RM |
322 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
323 | { | |
324 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
44bb7e24 | 325 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
326 | struct iova *iova; |
327 | struct page **pages; | |
328 | struct sg_table sgt; | |
329 | dma_addr_t dma_addr; | |
3b6b7e19 | 330 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
0db2e5d1 RM |
331 | |
332 | *handle = DMA_ERROR_CODE; | |
333 | ||
3b6b7e19 RM |
334 | min_size = alloc_sizes & -alloc_sizes; |
335 | if (min_size < PAGE_SIZE) { | |
336 | min_size = PAGE_SIZE; | |
337 | alloc_sizes |= PAGE_SIZE; | |
338 | } else { | |
339 | size = ALIGN(size, min_size); | |
340 | } | |
00085f1e | 341 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
3b6b7e19 RM |
342 | alloc_sizes = min_size; |
343 | ||
344 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
345 | pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp); | |
0db2e5d1 RM |
346 | if (!pages) |
347 | return NULL; | |
348 | ||
c987ff0d | 349 | iova = __alloc_iova(domain, size, dev->coherent_dma_mask); |
0db2e5d1 RM |
350 | if (!iova) |
351 | goto out_free_pages; | |
352 | ||
353 | size = iova_align(iovad, size); | |
354 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) | |
355 | goto out_free_iova; | |
356 | ||
357 | if (!(prot & IOMMU_CACHE)) { | |
358 | struct sg_mapping_iter miter; | |
359 | /* | |
360 | * The CPU-centric flushing implied by SG_MITER_TO_SG isn't | |
361 | * sufficient here, so skip it by using the "wrong" direction. | |
362 | */ | |
363 | sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); | |
364 | while (sg_miter_next(&miter)) | |
365 | flush_page(dev, miter.addr, page_to_phys(miter.page)); | |
366 | sg_miter_stop(&miter); | |
367 | } | |
368 | ||
369 | dma_addr = iova_dma_addr(iovad, iova); | |
370 | if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot) | |
371 | < size) | |
372 | goto out_free_sg; | |
373 | ||
374 | *handle = dma_addr; | |
375 | sg_free_table(&sgt); | |
376 | return pages; | |
377 | ||
378 | out_free_sg: | |
379 | sg_free_table(&sgt); | |
380 | out_free_iova: | |
381 | __free_iova(iovad, iova); | |
382 | out_free_pages: | |
383 | __iommu_dma_free_pages(pages, count); | |
384 | return NULL; | |
385 | } | |
386 | ||
387 | /** | |
388 | * iommu_dma_mmap - Map a buffer into provided user VMA | |
389 | * @pages: Array representing buffer from iommu_dma_alloc() | |
390 | * @size: Size of buffer in bytes | |
391 | * @vma: VMA describing requested userspace mapping | |
392 | * | |
393 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible | |
394 | * for verifying the correct size and protection of @vma beforehand. | |
395 | */ | |
396 | ||
397 | int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) | |
398 | { | |
399 | unsigned long uaddr = vma->vm_start; | |
400 | unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
401 | int ret = -ENXIO; | |
402 | ||
403 | for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { | |
404 | ret = vm_insert_page(vma, uaddr, pages[i]); | |
405 | if (ret) | |
406 | break; | |
407 | uaddr += PAGE_SIZE; | |
408 | } | |
409 | return ret; | |
410 | } | |
411 | ||
412 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | |
413 | unsigned long offset, size_t size, int prot) | |
414 | { | |
415 | dma_addr_t dma_addr; | |
416 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
44bb7e24 | 417 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
418 | phys_addr_t phys = page_to_phys(page) + offset; |
419 | size_t iova_off = iova_offset(iovad, phys); | |
420 | size_t len = iova_align(iovad, size + iova_off); | |
c987ff0d | 421 | struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); |
0db2e5d1 RM |
422 | |
423 | if (!iova) | |
424 | return DMA_ERROR_CODE; | |
425 | ||
426 | dma_addr = iova_dma_addr(iovad, iova); | |
427 | if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) { | |
428 | __free_iova(iovad, iova); | |
429 | return DMA_ERROR_CODE; | |
430 | } | |
431 | return dma_addr + iova_off; | |
432 | } | |
433 | ||
434 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | |
00085f1e | 435 | enum dma_data_direction dir, unsigned long attrs) |
0db2e5d1 RM |
436 | { |
437 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); | |
438 | } | |
439 | ||
440 | /* | |
441 | * Prepare a successfully-mapped scatterlist to give back to the caller. | |
809eac54 RM |
442 | * |
443 | * At this point the segments are already laid out by iommu_dma_map_sg() to | |
444 | * avoid individually crossing any boundaries, so we merely need to check a | |
445 | * segment's start address to avoid concatenating across one. | |
0db2e5d1 RM |
446 | */ |
447 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | |
448 | dma_addr_t dma_addr) | |
449 | { | |
809eac54 RM |
450 | struct scatterlist *s, *cur = sg; |
451 | unsigned long seg_mask = dma_get_seg_boundary(dev); | |
452 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); | |
453 | int i, count = 0; | |
0db2e5d1 RM |
454 | |
455 | for_each_sg(sg, s, nents, i) { | |
809eac54 RM |
456 | /* Restore this segment's original unaligned fields first */ |
457 | unsigned int s_iova_off = sg_dma_address(s); | |
0db2e5d1 | 458 | unsigned int s_length = sg_dma_len(s); |
809eac54 | 459 | unsigned int s_iova_len = s->length; |
0db2e5d1 | 460 | |
809eac54 | 461 | s->offset += s_iova_off; |
0db2e5d1 | 462 | s->length = s_length; |
809eac54 RM |
463 | sg_dma_address(s) = DMA_ERROR_CODE; |
464 | sg_dma_len(s) = 0; | |
465 | ||
466 | /* | |
467 | * Now fill in the real DMA data. If... | |
468 | * - there is a valid output segment to append to | |
469 | * - and this segment starts on an IOVA page boundary | |
470 | * - but doesn't fall at a segment boundary | |
471 | * - and wouldn't make the resulting output segment too long | |
472 | */ | |
473 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && | |
474 | (cur_len + s_length <= max_len)) { | |
475 | /* ...then concatenate it with the previous one */ | |
476 | cur_len += s_length; | |
477 | } else { | |
478 | /* Otherwise start the next output segment */ | |
479 | if (i > 0) | |
480 | cur = sg_next(cur); | |
481 | cur_len = s_length; | |
482 | count++; | |
483 | ||
484 | sg_dma_address(cur) = dma_addr + s_iova_off; | |
485 | } | |
486 | ||
487 | sg_dma_len(cur) = cur_len; | |
488 | dma_addr += s_iova_len; | |
489 | ||
490 | if (s_length + s_iova_off < s_iova_len) | |
491 | cur_len = 0; | |
0db2e5d1 | 492 | } |
809eac54 | 493 | return count; |
0db2e5d1 RM |
494 | } |
495 | ||
496 | /* | |
497 | * If mapping failed, then just restore the original list, | |
498 | * but making sure the DMA fields are invalidated. | |
499 | */ | |
500 | static void __invalidate_sg(struct scatterlist *sg, int nents) | |
501 | { | |
502 | struct scatterlist *s; | |
503 | int i; | |
504 | ||
505 | for_each_sg(sg, s, nents, i) { | |
506 | if (sg_dma_address(s) != DMA_ERROR_CODE) | |
07b48ac4 | 507 | s->offset += sg_dma_address(s); |
0db2e5d1 RM |
508 | if (sg_dma_len(s)) |
509 | s->length = sg_dma_len(s); | |
510 | sg_dma_address(s) = DMA_ERROR_CODE; | |
511 | sg_dma_len(s) = 0; | |
512 | } | |
513 | } | |
514 | ||
515 | /* | |
516 | * The DMA API client is passing in a scatterlist which could describe | |
517 | * any old buffer layout, but the IOMMU API requires everything to be | |
518 | * aligned to IOMMU pages. Hence the need for this complicated bit of | |
519 | * impedance-matching, to be able to hand off a suitably-aligned list, | |
520 | * but still preserve the original offsets and sizes for the caller. | |
521 | */ | |
522 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |
523 | int nents, int prot) | |
524 | { | |
525 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
44bb7e24 | 526 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
527 | struct iova *iova; |
528 | struct scatterlist *s, *prev = NULL; | |
529 | dma_addr_t dma_addr; | |
530 | size_t iova_len = 0; | |
809eac54 | 531 | unsigned long mask = dma_get_seg_boundary(dev); |
0db2e5d1 RM |
532 | int i; |
533 | ||
534 | /* | |
535 | * Work out how much IOVA space we need, and align the segments to | |
536 | * IOVA granules for the IOMMU driver to handle. With some clever | |
537 | * trickery we can modify the list in-place, but reversibly, by | |
809eac54 | 538 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
0db2e5d1 RM |
539 | */ |
540 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 541 | size_t s_iova_off = iova_offset(iovad, s->offset); |
0db2e5d1 | 542 | size_t s_length = s->length; |
809eac54 | 543 | size_t pad_len = (mask - iova_len + 1) & mask; |
0db2e5d1 | 544 | |
809eac54 | 545 | sg_dma_address(s) = s_iova_off; |
0db2e5d1 | 546 | sg_dma_len(s) = s_length; |
809eac54 RM |
547 | s->offset -= s_iova_off; |
548 | s_length = iova_align(iovad, s_length + s_iova_off); | |
0db2e5d1 RM |
549 | s->length = s_length; |
550 | ||
551 | /* | |
809eac54 RM |
552 | * Due to the alignment of our single IOVA allocation, we can |
553 | * depend on these assumptions about the segment boundary mask: | |
554 | * - If mask size >= IOVA size, then the IOVA range cannot | |
555 | * possibly fall across a boundary, so we don't care. | |
556 | * - If mask size < IOVA size, then the IOVA range must start | |
557 | * exactly on a boundary, therefore we can lay things out | |
558 | * based purely on segment lengths without needing to know | |
559 | * the actual addresses beforehand. | |
560 | * - The mask must be a power of 2, so pad_len == 0 if | |
561 | * iova_len == 0, thus we cannot dereference prev the first | |
562 | * time through here (i.e. before it has a meaningful value). | |
0db2e5d1 | 563 | */ |
809eac54 | 564 | if (pad_len && pad_len < s_length - 1) { |
0db2e5d1 RM |
565 | prev->length += pad_len; |
566 | iova_len += pad_len; | |
567 | } | |
568 | ||
569 | iova_len += s_length; | |
570 | prev = s; | |
571 | } | |
572 | ||
c987ff0d | 573 | iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); |
0db2e5d1 RM |
574 | if (!iova) |
575 | goto out_restore_sg; | |
576 | ||
577 | /* | |
578 | * We'll leave any physical concatenation to the IOMMU driver's | |
579 | * implementation - it knows better than we do. | |
580 | */ | |
581 | dma_addr = iova_dma_addr(iovad, iova); | |
582 | if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len) | |
583 | goto out_free_iova; | |
584 | ||
585 | return __finalise_sg(dev, sg, nents, dma_addr); | |
586 | ||
587 | out_free_iova: | |
588 | __free_iova(iovad, iova); | |
589 | out_restore_sg: | |
590 | __invalidate_sg(sg, nents); | |
591 | return 0; | |
592 | } | |
593 | ||
594 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
00085f1e | 595 | enum dma_data_direction dir, unsigned long attrs) |
0db2e5d1 RM |
596 | { |
597 | /* | |
598 | * The scatterlist segments are mapped into a single | |
599 | * contiguous IOVA allocation, so this is incredibly easy. | |
600 | */ | |
601 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg)); | |
602 | } | |
603 | ||
604 | int iommu_dma_supported(struct device *dev, u64 mask) | |
605 | { | |
606 | /* | |
607 | * 'Special' IOMMUs which don't have the same addressing capability | |
608 | * as the CPU will have to wait until we have some way to query that | |
609 | * before they'll be able to use this framework. | |
610 | */ | |
611 | return 1; | |
612 | } | |
613 | ||
614 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
615 | { | |
616 | return dma_addr == DMA_ERROR_CODE; | |
617 | } | |
44bb7e24 RM |
618 | |
619 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |
620 | phys_addr_t msi_addr, struct iommu_domain *domain) | |
621 | { | |
622 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
623 | struct iommu_dma_msi_page *msi_page; | |
624 | struct iova_domain *iovad = &cookie->iovad; | |
625 | struct iova *iova; | |
626 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | |
627 | ||
628 | msi_addr &= ~(phys_addr_t)iova_mask(iovad); | |
629 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) | |
630 | if (msi_page->phys == msi_addr) | |
631 | return msi_page; | |
632 | ||
633 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); | |
634 | if (!msi_page) | |
635 | return NULL; | |
636 | ||
637 | iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); | |
638 | if (!iova) | |
639 | goto out_free_page; | |
640 | ||
641 | msi_page->phys = msi_addr; | |
642 | msi_page->iova = iova_dma_addr(iovad, iova); | |
643 | if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) | |
644 | goto out_free_iova; | |
645 | ||
646 | INIT_LIST_HEAD(&msi_page->list); | |
647 | list_add(&msi_page->list, &cookie->msi_page_list); | |
648 | return msi_page; | |
649 | ||
650 | out_free_iova: | |
651 | __free_iova(iovad, iova); | |
652 | out_free_page: | |
653 | kfree(msi_page); | |
654 | return NULL; | |
655 | } | |
656 | ||
657 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | |
658 | { | |
659 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); | |
660 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
661 | struct iommu_dma_cookie *cookie; | |
662 | struct iommu_dma_msi_page *msi_page; | |
663 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; | |
664 | unsigned long flags; | |
665 | ||
666 | if (!domain || !domain->iova_cookie) | |
667 | return; | |
668 | ||
669 | cookie = domain->iova_cookie; | |
670 | ||
671 | /* | |
672 | * We disable IRQs to rule out a possible inversion against | |
673 | * irq_desc_lock if, say, someone tries to retarget the affinity | |
674 | * of an MSI from within an IPI handler. | |
675 | */ | |
676 | spin_lock_irqsave(&cookie->msi_lock, flags); | |
677 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | |
678 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | |
679 | ||
680 | if (WARN_ON(!msi_page)) { | |
681 | /* | |
682 | * We're called from a void callback, so the best we can do is | |
683 | * 'fail' by filling the message with obviously bogus values. | |
684 | * Since we got this far due to an IOMMU being present, it's | |
685 | * not like the existing address would have worked anyway... | |
686 | */ | |
687 | msg->address_hi = ~0U; | |
688 | msg->address_lo = ~0U; | |
689 | msg->data = ~0U; | |
690 | } else { | |
691 | msg->address_hi = upper_32_bits(msi_page->iova); | |
692 | msg->address_lo &= iova_mask(&cookie->iovad); | |
693 | msg->address_lo += lower_32_bits(msi_page->iova); | |
694 | } | |
695 | } |