]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/mm/dma-mapping.c
powerpc/mm: Ensure cpumask update is ordered
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / mm / dma-mapping.c
1 /*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/bootmem.h>
23 #include <linux/cache.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/genalloc.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dma-contiguous.h>
29 #include <linux/vmalloc.h>
30 #include <linux/swiotlb.h>
31 #include <linux/pci.h>
32
33 #include <asm/cacheflush.h>
34
35 static int swiotlb __ro_after_init;
36
37 static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
38 bool coherent)
39 {
40 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
41 return pgprot_writecombine(prot);
42 return prot;
43 }
44
45 static struct gen_pool *atomic_pool;
46
47 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
48 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
49
50 static int __init early_coherent_pool(char *p)
51 {
52 atomic_pool_size = memparse(p, &p);
53 return 0;
54 }
55 early_param("coherent_pool", early_coherent_pool);
56
57 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
58 {
59 unsigned long val;
60 void *ptr = NULL;
61
62 if (!atomic_pool) {
63 WARN(1, "coherent pool not initialised!\n");
64 return NULL;
65 }
66
67 val = gen_pool_alloc(atomic_pool, size);
68 if (val) {
69 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
70
71 *ret_page = phys_to_page(phys);
72 ptr = (void *)val;
73 memset(ptr, 0, size);
74 }
75
76 return ptr;
77 }
78
79 static bool __in_atomic_pool(void *start, size_t size)
80 {
81 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
82 }
83
84 static int __free_from_pool(void *start, size_t size)
85 {
86 if (!__in_atomic_pool(start, size))
87 return 0;
88
89 gen_pool_free(atomic_pool, (unsigned long)start, size);
90
91 return 1;
92 }
93
94 static void *__dma_alloc_coherent(struct device *dev, size_t size,
95 dma_addr_t *dma_handle, gfp_t flags,
96 unsigned long attrs)
97 {
98 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
99 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
100 flags |= GFP_DMA;
101 if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
102 struct page *page;
103 void *addr;
104
105 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
106 get_order(size), flags);
107 if (!page)
108 return NULL;
109
110 *dma_handle = phys_to_dma(dev, page_to_phys(page));
111 addr = page_address(page);
112 memset(addr, 0, size);
113 return addr;
114 } else {
115 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
116 }
117 }
118
119 static void __dma_free_coherent(struct device *dev, size_t size,
120 void *vaddr, dma_addr_t dma_handle,
121 unsigned long attrs)
122 {
123 bool freed;
124 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
125
126
127 freed = dma_release_from_contiguous(dev,
128 phys_to_page(paddr),
129 size >> PAGE_SHIFT);
130 if (!freed)
131 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
132 }
133
134 static void *__dma_alloc(struct device *dev, size_t size,
135 dma_addr_t *dma_handle, gfp_t flags,
136 unsigned long attrs)
137 {
138 struct page *page;
139 void *ptr, *coherent_ptr;
140 bool coherent = is_device_dma_coherent(dev);
141 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
142
143 size = PAGE_ALIGN(size);
144
145 if (!coherent && !gfpflags_allow_blocking(flags)) {
146 struct page *page = NULL;
147 void *addr = __alloc_from_pool(size, &page, flags);
148
149 if (addr)
150 *dma_handle = phys_to_dma(dev, page_to_phys(page));
151
152 return addr;
153 }
154
155 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
156 if (!ptr)
157 goto no_mem;
158
159 /* no need for non-cacheable mapping if coherent */
160 if (coherent)
161 return ptr;
162
163 /* remove any dirty cache lines on the kernel alias */
164 __dma_flush_area(ptr, size);
165
166 /* create a coherent mapping */
167 page = virt_to_page(ptr);
168 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
169 prot, NULL);
170 if (!coherent_ptr)
171 goto no_map;
172
173 return coherent_ptr;
174
175 no_map:
176 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
177 no_mem:
178 return NULL;
179 }
180
181 static void __dma_free(struct device *dev, size_t size,
182 void *vaddr, dma_addr_t dma_handle,
183 unsigned long attrs)
184 {
185 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
186
187 size = PAGE_ALIGN(size);
188
189 if (!is_device_dma_coherent(dev)) {
190 if (__free_from_pool(vaddr, size))
191 return;
192 vunmap(vaddr);
193 }
194 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
195 }
196
197 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
198 unsigned long offset, size_t size,
199 enum dma_data_direction dir,
200 unsigned long attrs)
201 {
202 dma_addr_t dev_addr;
203
204 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
205 if (!is_device_dma_coherent(dev) &&
206 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
207 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
208
209 return dev_addr;
210 }
211
212
213 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
214 size_t size, enum dma_data_direction dir,
215 unsigned long attrs)
216 {
217 if (!is_device_dma_coherent(dev) &&
218 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
219 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
220 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
221 }
222
223 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
224 int nelems, enum dma_data_direction dir,
225 unsigned long attrs)
226 {
227 struct scatterlist *sg;
228 int i, ret;
229
230 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
231 if (!is_device_dma_coherent(dev) &&
232 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
233 for_each_sg(sgl, sg, ret, i)
234 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
235 sg->length, dir);
236
237 return ret;
238 }
239
240 static void __swiotlb_unmap_sg_attrs(struct device *dev,
241 struct scatterlist *sgl, int nelems,
242 enum dma_data_direction dir,
243 unsigned long attrs)
244 {
245 struct scatterlist *sg;
246 int i;
247
248 if (!is_device_dma_coherent(dev) &&
249 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
250 for_each_sg(sgl, sg, nelems, i)
251 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
252 sg->length, dir);
253 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
254 }
255
256 static void __swiotlb_sync_single_for_cpu(struct device *dev,
257 dma_addr_t dev_addr, size_t size,
258 enum dma_data_direction dir)
259 {
260 if (!is_device_dma_coherent(dev))
261 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
262 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
263 }
264
265 static void __swiotlb_sync_single_for_device(struct device *dev,
266 dma_addr_t dev_addr, size_t size,
267 enum dma_data_direction dir)
268 {
269 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
270 if (!is_device_dma_coherent(dev))
271 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
272 }
273
274 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
275 struct scatterlist *sgl, int nelems,
276 enum dma_data_direction dir)
277 {
278 struct scatterlist *sg;
279 int i;
280
281 if (!is_device_dma_coherent(dev))
282 for_each_sg(sgl, sg, nelems, i)
283 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
284 sg->length, dir);
285 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
286 }
287
288 static void __swiotlb_sync_sg_for_device(struct device *dev,
289 struct scatterlist *sgl, int nelems,
290 enum dma_data_direction dir)
291 {
292 struct scatterlist *sg;
293 int i;
294
295 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
296 if (!is_device_dma_coherent(dev))
297 for_each_sg(sgl, sg, nelems, i)
298 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
299 sg->length, dir);
300 }
301
302 static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
303 unsigned long pfn, size_t size)
304 {
305 int ret = -ENXIO;
306 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
307 PAGE_SHIFT;
308 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
309 unsigned long off = vma->vm_pgoff;
310
311 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
312 ret = remap_pfn_range(vma, vma->vm_start,
313 pfn + off,
314 vma->vm_end - vma->vm_start,
315 vma->vm_page_prot);
316 }
317
318 return ret;
319 }
320
321 static int __swiotlb_mmap(struct device *dev,
322 struct vm_area_struct *vma,
323 void *cpu_addr, dma_addr_t dma_addr, size_t size,
324 unsigned long attrs)
325 {
326 int ret;
327 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
328
329 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
330 is_device_dma_coherent(dev));
331
332 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
333 return ret;
334
335 return __swiotlb_mmap_pfn(vma, pfn, size);
336 }
337
338 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
339 struct page *page, size_t size)
340 {
341 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
342
343 if (!ret)
344 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
345
346 return ret;
347 }
348
349 static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
350 void *cpu_addr, dma_addr_t handle, size_t size,
351 unsigned long attrs)
352 {
353 struct page *page = phys_to_page(dma_to_phys(dev, handle));
354
355 return __swiotlb_get_sgtable_page(sgt, page, size);
356 }
357
358 static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
359 {
360 if (swiotlb)
361 return swiotlb_dma_supported(hwdev, mask);
362 return 1;
363 }
364
365 static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
366 {
367 if (swiotlb)
368 return swiotlb_dma_mapping_error(hwdev, addr);
369 return 0;
370 }
371
372 static const struct dma_map_ops swiotlb_dma_ops = {
373 .alloc = __dma_alloc,
374 .free = __dma_free,
375 .mmap = __swiotlb_mmap,
376 .get_sgtable = __swiotlb_get_sgtable,
377 .map_page = __swiotlb_map_page,
378 .unmap_page = __swiotlb_unmap_page,
379 .map_sg = __swiotlb_map_sg_attrs,
380 .unmap_sg = __swiotlb_unmap_sg_attrs,
381 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
382 .sync_single_for_device = __swiotlb_sync_single_for_device,
383 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
384 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
385 .dma_supported = __swiotlb_dma_supported,
386 .mapping_error = __swiotlb_dma_mapping_error,
387 };
388
389 static int __init atomic_pool_init(void)
390 {
391 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
392 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
393 struct page *page;
394 void *addr;
395 unsigned int pool_size_order = get_order(atomic_pool_size);
396
397 if (dev_get_cma_area(NULL))
398 page = dma_alloc_from_contiguous(NULL, nr_pages,
399 pool_size_order, GFP_KERNEL);
400 else
401 page = alloc_pages(GFP_DMA, pool_size_order);
402
403 if (page) {
404 int ret;
405 void *page_addr = page_address(page);
406
407 memset(page_addr, 0, atomic_pool_size);
408 __dma_flush_area(page_addr, atomic_pool_size);
409
410 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
411 if (!atomic_pool)
412 goto free_page;
413
414 addr = dma_common_contiguous_remap(page, atomic_pool_size,
415 VM_USERMAP, prot, atomic_pool_init);
416
417 if (!addr)
418 goto destroy_genpool;
419
420 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
421 page_to_phys(page),
422 atomic_pool_size, -1);
423 if (ret)
424 goto remove_mapping;
425
426 gen_pool_set_algo(atomic_pool,
427 gen_pool_first_fit_order_align,
428 (void *)PAGE_SHIFT);
429
430 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
431 atomic_pool_size / 1024);
432 return 0;
433 }
434 goto out;
435
436 remove_mapping:
437 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
438 destroy_genpool:
439 gen_pool_destroy(atomic_pool);
440 atomic_pool = NULL;
441 free_page:
442 if (!dma_release_from_contiguous(NULL, page, nr_pages))
443 __free_pages(page, pool_size_order);
444 out:
445 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
446 atomic_pool_size / 1024);
447 return -ENOMEM;
448 }
449
450 /********************************************
451 * The following APIs are for dummy DMA ops *
452 ********************************************/
453
454 static void *__dummy_alloc(struct device *dev, size_t size,
455 dma_addr_t *dma_handle, gfp_t flags,
456 unsigned long attrs)
457 {
458 return NULL;
459 }
460
461 static void __dummy_free(struct device *dev, size_t size,
462 void *vaddr, dma_addr_t dma_handle,
463 unsigned long attrs)
464 {
465 }
466
467 static int __dummy_mmap(struct device *dev,
468 struct vm_area_struct *vma,
469 void *cpu_addr, dma_addr_t dma_addr, size_t size,
470 unsigned long attrs)
471 {
472 return -ENXIO;
473 }
474
475 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
476 unsigned long offset, size_t size,
477 enum dma_data_direction dir,
478 unsigned long attrs)
479 {
480 return 0;
481 }
482
483 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
484 size_t size, enum dma_data_direction dir,
485 unsigned long attrs)
486 {
487 }
488
489 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
490 int nelems, enum dma_data_direction dir,
491 unsigned long attrs)
492 {
493 return 0;
494 }
495
496 static void __dummy_unmap_sg(struct device *dev,
497 struct scatterlist *sgl, int nelems,
498 enum dma_data_direction dir,
499 unsigned long attrs)
500 {
501 }
502
503 static void __dummy_sync_single(struct device *dev,
504 dma_addr_t dev_addr, size_t size,
505 enum dma_data_direction dir)
506 {
507 }
508
509 static void __dummy_sync_sg(struct device *dev,
510 struct scatterlist *sgl, int nelems,
511 enum dma_data_direction dir)
512 {
513 }
514
515 static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
516 {
517 return 1;
518 }
519
520 static int __dummy_dma_supported(struct device *hwdev, u64 mask)
521 {
522 return 0;
523 }
524
525 const struct dma_map_ops dummy_dma_ops = {
526 .alloc = __dummy_alloc,
527 .free = __dummy_free,
528 .mmap = __dummy_mmap,
529 .map_page = __dummy_map_page,
530 .unmap_page = __dummy_unmap_page,
531 .map_sg = __dummy_map_sg,
532 .unmap_sg = __dummy_unmap_sg,
533 .sync_single_for_cpu = __dummy_sync_single,
534 .sync_single_for_device = __dummy_sync_single,
535 .sync_sg_for_cpu = __dummy_sync_sg,
536 .sync_sg_for_device = __dummy_sync_sg,
537 .mapping_error = __dummy_mapping_error,
538 .dma_supported = __dummy_dma_supported,
539 };
540 EXPORT_SYMBOL(dummy_dma_ops);
541
542 static int __init arm64_dma_init(void)
543 {
544 if (swiotlb_force == SWIOTLB_FORCE ||
545 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
546 swiotlb = 1;
547
548 return atomic_pool_init();
549 }
550 arch_initcall(arm64_dma_init);
551
552 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
553
554 static int __init dma_debug_do_init(void)
555 {
556 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
557 return 0;
558 }
559 fs_initcall(dma_debug_do_init);
560
561
562 #ifdef CONFIG_IOMMU_DMA
563 #include <linux/dma-iommu.h>
564 #include <linux/platform_device.h>
565 #include <linux/amba/bus.h>
566
567 /* Thankfully, all cache ops are by VA so we can ignore phys here */
568 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
569 {
570 __dma_flush_area(virt, PAGE_SIZE);
571 }
572
573 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
574 dma_addr_t *handle, gfp_t gfp,
575 unsigned long attrs)
576 {
577 bool coherent = is_device_dma_coherent(dev);
578 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
579 size_t iosize = size;
580 void *addr;
581
582 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
583 return NULL;
584
585 size = PAGE_ALIGN(size);
586
587 /*
588 * Some drivers rely on this, and we probably don't want the
589 * possibility of stale kernel data being read by devices anyway.
590 */
591 gfp |= __GFP_ZERO;
592
593 if (!gfpflags_allow_blocking(gfp)) {
594 struct page *page;
595 /*
596 * In atomic context we can't remap anything, so we'll only
597 * get the virtually contiguous buffer we need by way of a
598 * physically contiguous allocation.
599 */
600 if (coherent) {
601 page = alloc_pages(gfp, get_order(size));
602 addr = page ? page_address(page) : NULL;
603 } else {
604 addr = __alloc_from_pool(size, &page, gfp);
605 }
606 if (!addr)
607 return NULL;
608
609 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
610 if (iommu_dma_mapping_error(dev, *handle)) {
611 if (coherent)
612 __free_pages(page, get_order(size));
613 else
614 __free_from_pool(addr, size);
615 addr = NULL;
616 }
617 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
618 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
619 struct page *page;
620
621 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
622 get_order(size), gfp);
623 if (!page)
624 return NULL;
625
626 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
627 if (iommu_dma_mapping_error(dev, *handle)) {
628 dma_release_from_contiguous(dev, page,
629 size >> PAGE_SHIFT);
630 return NULL;
631 }
632 if (!coherent)
633 __dma_flush_area(page_to_virt(page), iosize);
634
635 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
636 prot,
637 __builtin_return_address(0));
638 if (!addr) {
639 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
640 dma_release_from_contiguous(dev, page,
641 size >> PAGE_SHIFT);
642 }
643 } else {
644 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
645 struct page **pages;
646
647 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
648 handle, flush_page);
649 if (!pages)
650 return NULL;
651
652 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
653 __builtin_return_address(0));
654 if (!addr)
655 iommu_dma_free(dev, pages, iosize, handle);
656 }
657 return addr;
658 }
659
660 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
661 dma_addr_t handle, unsigned long attrs)
662 {
663 size_t iosize = size;
664
665 size = PAGE_ALIGN(size);
666 /*
667 * @cpu_addr will be one of 4 things depending on how it was allocated:
668 * - A remapped array of pages for contiguous allocations.
669 * - A remapped array of pages from iommu_dma_alloc(), for all
670 * non-atomic allocations.
671 * - A non-cacheable alias from the atomic pool, for atomic
672 * allocations by non-coherent devices.
673 * - A normal lowmem address, for atomic allocations by
674 * coherent devices.
675 * Hence how dodgy the below logic looks...
676 */
677 if (__in_atomic_pool(cpu_addr, size)) {
678 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
679 __free_from_pool(cpu_addr, size);
680 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
681 struct page *page = vmalloc_to_page(cpu_addr);
682
683 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
684 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
685 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
686 } else if (is_vmalloc_addr(cpu_addr)){
687 struct vm_struct *area = find_vm_area(cpu_addr);
688
689 if (WARN_ON(!area || !area->pages))
690 return;
691 iommu_dma_free(dev, area->pages, iosize, &handle);
692 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
693 } else {
694 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
695 __free_pages(virt_to_page(cpu_addr), get_order(size));
696 }
697 }
698
699 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
700 void *cpu_addr, dma_addr_t dma_addr, size_t size,
701 unsigned long attrs)
702 {
703 struct vm_struct *area;
704 int ret;
705
706 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
707 is_device_dma_coherent(dev));
708
709 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
710 return ret;
711
712 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
713 /*
714 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
715 * hence in the vmalloc space.
716 */
717 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
718 return __swiotlb_mmap_pfn(vma, pfn, size);
719 }
720
721 area = find_vm_area(cpu_addr);
722 if (WARN_ON(!area || !area->pages))
723 return -ENXIO;
724
725 return iommu_dma_mmap(area->pages, size, vma);
726 }
727
728 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
729 void *cpu_addr, dma_addr_t dma_addr,
730 size_t size, unsigned long attrs)
731 {
732 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
733 struct vm_struct *area = find_vm_area(cpu_addr);
734
735 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
736 /*
737 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
738 * hence in the vmalloc space.
739 */
740 struct page *page = vmalloc_to_page(cpu_addr);
741 return __swiotlb_get_sgtable_page(sgt, page, size);
742 }
743
744 if (WARN_ON(!area || !area->pages))
745 return -ENXIO;
746
747 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
748 GFP_KERNEL);
749 }
750
751 static void __iommu_sync_single_for_cpu(struct device *dev,
752 dma_addr_t dev_addr, size_t size,
753 enum dma_data_direction dir)
754 {
755 phys_addr_t phys;
756
757 if (is_device_dma_coherent(dev))
758 return;
759
760 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
761 __dma_unmap_area(phys_to_virt(phys), size, dir);
762 }
763
764 static void __iommu_sync_single_for_device(struct device *dev,
765 dma_addr_t dev_addr, size_t size,
766 enum dma_data_direction dir)
767 {
768 phys_addr_t phys;
769
770 if (is_device_dma_coherent(dev))
771 return;
772
773 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
774 __dma_map_area(phys_to_virt(phys), size, dir);
775 }
776
777 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
778 unsigned long offset, size_t size,
779 enum dma_data_direction dir,
780 unsigned long attrs)
781 {
782 bool coherent = is_device_dma_coherent(dev);
783 int prot = dma_info_to_prot(dir, coherent, attrs);
784 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
785
786 if (!iommu_dma_mapping_error(dev, dev_addr) &&
787 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
788 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
789
790 return dev_addr;
791 }
792
793 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
794 size_t size, enum dma_data_direction dir,
795 unsigned long attrs)
796 {
797 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
798 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
799
800 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
801 }
802
803 static void __iommu_sync_sg_for_cpu(struct device *dev,
804 struct scatterlist *sgl, int nelems,
805 enum dma_data_direction dir)
806 {
807 struct scatterlist *sg;
808 int i;
809
810 if (is_device_dma_coherent(dev))
811 return;
812
813 for_each_sg(sgl, sg, nelems, i)
814 __dma_unmap_area(sg_virt(sg), sg->length, dir);
815 }
816
817 static void __iommu_sync_sg_for_device(struct device *dev,
818 struct scatterlist *sgl, int nelems,
819 enum dma_data_direction dir)
820 {
821 struct scatterlist *sg;
822 int i;
823
824 if (is_device_dma_coherent(dev))
825 return;
826
827 for_each_sg(sgl, sg, nelems, i)
828 __dma_map_area(sg_virt(sg), sg->length, dir);
829 }
830
831 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
832 int nelems, enum dma_data_direction dir,
833 unsigned long attrs)
834 {
835 bool coherent = is_device_dma_coherent(dev);
836
837 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
838 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
839
840 return iommu_dma_map_sg(dev, sgl, nelems,
841 dma_info_to_prot(dir, coherent, attrs));
842 }
843
844 static void __iommu_unmap_sg_attrs(struct device *dev,
845 struct scatterlist *sgl, int nelems,
846 enum dma_data_direction dir,
847 unsigned long attrs)
848 {
849 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
850 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
851
852 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
853 }
854
855 static const struct dma_map_ops iommu_dma_ops = {
856 .alloc = __iommu_alloc_attrs,
857 .free = __iommu_free_attrs,
858 .mmap = __iommu_mmap_attrs,
859 .get_sgtable = __iommu_get_sgtable,
860 .map_page = __iommu_map_page,
861 .unmap_page = __iommu_unmap_page,
862 .map_sg = __iommu_map_sg_attrs,
863 .unmap_sg = __iommu_unmap_sg_attrs,
864 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
865 .sync_single_for_device = __iommu_sync_single_for_device,
866 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
867 .sync_sg_for_device = __iommu_sync_sg_for_device,
868 .map_resource = iommu_dma_map_resource,
869 .unmap_resource = iommu_dma_unmap_resource,
870 .mapping_error = iommu_dma_mapping_error,
871 };
872
873 static int __init __iommu_dma_init(void)
874 {
875 return iommu_dma_init();
876 }
877 arch_initcall(__iommu_dma_init);
878
879 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
880 const struct iommu_ops *ops)
881 {
882 struct iommu_domain *domain;
883
884 if (!ops)
885 return;
886
887 /*
888 * The IOMMU core code allocates the default DMA domain, which the
889 * underlying IOMMU driver needs to support via the dma-iommu layer.
890 */
891 domain = iommu_get_domain_for_dev(dev);
892
893 if (!domain)
894 goto out_err;
895
896 if (domain->type == IOMMU_DOMAIN_DMA) {
897 if (iommu_dma_init_domain(domain, dma_base, size, dev))
898 goto out_err;
899
900 dev->dma_ops = &iommu_dma_ops;
901 }
902
903 return;
904
905 out_err:
906 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
907 dev_name(dev));
908 }
909
910 void arch_teardown_dma_ops(struct device *dev)
911 {
912 dev->dma_ops = NULL;
913 }
914
915 #else
916
917 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
918 const struct iommu_ops *iommu)
919 { }
920
921 #endif /* CONFIG_IOMMU_DMA */
922
923 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
924 const struct iommu_ops *iommu, bool coherent)
925 {
926 if (!dev->dma_ops)
927 dev->dma_ops = &swiotlb_dma_ops;
928
929 dev->archdata.dma_coherent = coherent;
930 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
931
932 #ifdef CONFIG_XEN
933 if (xen_initial_domain()) {
934 dev->archdata.dev_dma_ops = dev->dma_ops;
935 dev->dma_ops = xen_dma_ops;
936 }
937 #endif
938 }