2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/genalloc.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dma-contiguous.h>
26 #include <linux/vmalloc.h>
27 #include <linux/swiotlb.h>
29 #include <asm/cacheflush.h>
31 struct dma_map_ops
*dma_ops
;
32 EXPORT_SYMBOL(dma_ops
);
34 static pgprot_t
__get_dma_pgprot(struct dma_attrs
*attrs
, pgprot_t prot
,
37 if (!coherent
|| dma_get_attr(DMA_ATTR_WRITE_COMBINE
, attrs
))
38 return pgprot_writecombine(prot
);
42 static struct gen_pool
*atomic_pool
;
44 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45 static size_t atomic_pool_size
= DEFAULT_DMA_COHERENT_POOL_SIZE
;
47 static int __init
early_coherent_pool(char *p
)
49 atomic_pool_size
= memparse(p
, &p
);
52 early_param("coherent_pool", early_coherent_pool
);
54 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
, gfp_t flags
)
60 WARN(1, "coherent pool not initialised!\n");
64 val
= gen_pool_alloc(atomic_pool
, size
);
66 phys_addr_t phys
= gen_pool_virt_to_phys(atomic_pool
, val
);
68 *ret_page
= phys_to_page(phys
);
76 static bool __in_atomic_pool(void *start
, size_t size
)
78 return addr_in_gen_pool(atomic_pool
, (unsigned long)start
, size
);
81 static int __free_from_pool(void *start
, size_t size
)
83 if (!__in_atomic_pool(start
, size
))
86 gen_pool_free(atomic_pool
, (unsigned long)start
, size
);
91 static void *__dma_alloc_coherent(struct device
*dev
, size_t size
,
92 dma_addr_t
*dma_handle
, gfp_t flags
,
93 struct dma_attrs
*attrs
)
96 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
100 if (IS_ENABLED(CONFIG_ZONE_DMA
) &&
101 dev
->coherent_dma_mask
<= DMA_BIT_MASK(32))
103 if (dev_get_cma_area(dev
) && gfpflags_allow_blocking(flags
)) {
107 page
= dma_alloc_from_contiguous(dev
, size
>> PAGE_SHIFT
,
112 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
113 addr
= page_address(page
);
114 memset(addr
, 0, size
);
117 return swiotlb_alloc_coherent(dev
, size
, dma_handle
, flags
);
121 static void __dma_free_coherent(struct device
*dev
, size_t size
,
122 void *vaddr
, dma_addr_t dma_handle
,
123 struct dma_attrs
*attrs
)
126 phys_addr_t paddr
= dma_to_phys(dev
, dma_handle
);
129 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
133 freed
= dma_release_from_contiguous(dev
,
137 swiotlb_free_coherent(dev
, size
, vaddr
, dma_handle
);
140 static void *__dma_alloc(struct device
*dev
, size_t size
,
141 dma_addr_t
*dma_handle
, gfp_t flags
,
142 struct dma_attrs
*attrs
)
145 void *ptr
, *coherent_ptr
;
146 bool coherent
= is_device_dma_coherent(dev
);
147 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
, false);
149 size
= PAGE_ALIGN(size
);
151 if (!coherent
&& !gfpflags_allow_blocking(flags
)) {
152 struct page
*page
= NULL
;
153 void *addr
= __alloc_from_pool(size
, &page
, flags
);
156 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
161 ptr
= __dma_alloc_coherent(dev
, size
, dma_handle
, flags
, attrs
);
165 /* no need for non-cacheable mapping if coherent */
169 /* remove any dirty cache lines on the kernel alias */
170 __dma_flush_range(ptr
, ptr
+ size
);
172 /* create a coherent mapping */
173 page
= virt_to_page(ptr
);
174 coherent_ptr
= dma_common_contiguous_remap(page
, size
, VM_USERMAP
,
182 __dma_free_coherent(dev
, size
, ptr
, *dma_handle
, attrs
);
184 *dma_handle
= DMA_ERROR_CODE
;
188 static void __dma_free(struct device
*dev
, size_t size
,
189 void *vaddr
, dma_addr_t dma_handle
,
190 struct dma_attrs
*attrs
)
192 void *swiotlb_addr
= phys_to_virt(dma_to_phys(dev
, dma_handle
));
194 size
= PAGE_ALIGN(size
);
196 if (!is_device_dma_coherent(dev
)) {
197 if (__free_from_pool(vaddr
, size
))
201 __dma_free_coherent(dev
, size
, swiotlb_addr
, dma_handle
, attrs
);
204 static dma_addr_t
__swiotlb_map_page(struct device
*dev
, struct page
*page
,
205 unsigned long offset
, size_t size
,
206 enum dma_data_direction dir
,
207 struct dma_attrs
*attrs
)
211 dev_addr
= swiotlb_map_page(dev
, page
, offset
, size
, dir
, attrs
);
212 if (!is_device_dma_coherent(dev
))
213 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
219 static void __swiotlb_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
220 size_t size
, enum dma_data_direction dir
,
221 struct dma_attrs
*attrs
)
223 if (!is_device_dma_coherent(dev
))
224 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
225 swiotlb_unmap_page(dev
, dev_addr
, size
, dir
, attrs
);
228 static int __swiotlb_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
229 int nelems
, enum dma_data_direction dir
,
230 struct dma_attrs
*attrs
)
232 struct scatterlist
*sg
;
235 ret
= swiotlb_map_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
236 if (!is_device_dma_coherent(dev
))
237 for_each_sg(sgl
, sg
, ret
, i
)
238 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
244 static void __swiotlb_unmap_sg_attrs(struct device
*dev
,
245 struct scatterlist
*sgl
, int nelems
,
246 enum dma_data_direction dir
,
247 struct dma_attrs
*attrs
)
249 struct scatterlist
*sg
;
252 if (!is_device_dma_coherent(dev
))
253 for_each_sg(sgl
, sg
, nelems
, i
)
254 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
256 swiotlb_unmap_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
259 static void __swiotlb_sync_single_for_cpu(struct device
*dev
,
260 dma_addr_t dev_addr
, size_t size
,
261 enum dma_data_direction dir
)
263 if (!is_device_dma_coherent(dev
))
264 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
265 swiotlb_sync_single_for_cpu(dev
, dev_addr
, size
, dir
);
268 static void __swiotlb_sync_single_for_device(struct device
*dev
,
269 dma_addr_t dev_addr
, size_t size
,
270 enum dma_data_direction dir
)
272 swiotlb_sync_single_for_device(dev
, dev_addr
, size
, dir
);
273 if (!is_device_dma_coherent(dev
))
274 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
277 static void __swiotlb_sync_sg_for_cpu(struct device
*dev
,
278 struct scatterlist
*sgl
, int nelems
,
279 enum dma_data_direction dir
)
281 struct scatterlist
*sg
;
284 if (!is_device_dma_coherent(dev
))
285 for_each_sg(sgl
, sg
, nelems
, i
)
286 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
288 swiotlb_sync_sg_for_cpu(dev
, sgl
, nelems
, dir
);
291 static void __swiotlb_sync_sg_for_device(struct device
*dev
,
292 struct scatterlist
*sgl
, int nelems
,
293 enum dma_data_direction dir
)
295 struct scatterlist
*sg
;
298 swiotlb_sync_sg_for_device(dev
, sgl
, nelems
, dir
);
299 if (!is_device_dma_coherent(dev
))
300 for_each_sg(sgl
, sg
, nelems
, i
)
301 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
305 static int __swiotlb_mmap(struct device
*dev
,
306 struct vm_area_struct
*vma
,
307 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
308 struct dma_attrs
*attrs
)
311 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >>
313 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
314 unsigned long pfn
= dma_to_phys(dev
, dma_addr
) >> PAGE_SHIFT
;
315 unsigned long off
= vma
->vm_pgoff
;
317 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
,
318 is_device_dma_coherent(dev
));
320 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
323 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
324 ret
= remap_pfn_range(vma
, vma
->vm_start
,
326 vma
->vm_end
- vma
->vm_start
,
333 static int __swiotlb_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
334 void *cpu_addr
, dma_addr_t handle
, size_t size
,
335 struct dma_attrs
*attrs
)
337 int ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
340 sg_set_page(sgt
->sgl
, phys_to_page(dma_to_phys(dev
, handle
)),
341 PAGE_ALIGN(size
), 0);
346 static struct dma_map_ops swiotlb_dma_ops
= {
347 .alloc
= __dma_alloc
,
349 .mmap
= __swiotlb_mmap
,
350 .get_sgtable
= __swiotlb_get_sgtable
,
351 .map_page
= __swiotlb_map_page
,
352 .unmap_page
= __swiotlb_unmap_page
,
353 .map_sg
= __swiotlb_map_sg_attrs
,
354 .unmap_sg
= __swiotlb_unmap_sg_attrs
,
355 .sync_single_for_cpu
= __swiotlb_sync_single_for_cpu
,
356 .sync_single_for_device
= __swiotlb_sync_single_for_device
,
357 .sync_sg_for_cpu
= __swiotlb_sync_sg_for_cpu
,
358 .sync_sg_for_device
= __swiotlb_sync_sg_for_device
,
359 .dma_supported
= swiotlb_dma_supported
,
360 .mapping_error
= swiotlb_dma_mapping_error
,
363 static int __init
atomic_pool_init(void)
365 pgprot_t prot
= __pgprot(PROT_NORMAL_NC
);
366 unsigned long nr_pages
= atomic_pool_size
>> PAGE_SHIFT
;
369 unsigned int pool_size_order
= get_order(atomic_pool_size
);
371 if (dev_get_cma_area(NULL
))
372 page
= dma_alloc_from_contiguous(NULL
, nr_pages
,
375 page
= alloc_pages(GFP_DMA
, pool_size_order
);
379 void *page_addr
= page_address(page
);
381 memset(page_addr
, 0, atomic_pool_size
);
382 __dma_flush_range(page_addr
, page_addr
+ atomic_pool_size
);
384 atomic_pool
= gen_pool_create(PAGE_SHIFT
, -1);
388 addr
= dma_common_contiguous_remap(page
, atomic_pool_size
,
389 VM_USERMAP
, prot
, atomic_pool_init
);
392 goto destroy_genpool
;
394 ret
= gen_pool_add_virt(atomic_pool
, (unsigned long)addr
,
396 atomic_pool_size
, -1);
400 gen_pool_set_algo(atomic_pool
,
401 gen_pool_first_fit_order_align
,
404 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
405 atomic_pool_size
/ 1024);
411 dma_common_free_remap(addr
, atomic_pool_size
, VM_USERMAP
);
413 gen_pool_destroy(atomic_pool
);
416 if (!dma_release_from_contiguous(NULL
, page
, nr_pages
))
417 __free_pages(page
, pool_size_order
);
419 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
420 atomic_pool_size
/ 1024);
424 /********************************************
425 * The following APIs are for dummy DMA ops *
426 ********************************************/
428 static void *__dummy_alloc(struct device
*dev
, size_t size
,
429 dma_addr_t
*dma_handle
, gfp_t flags
,
430 struct dma_attrs
*attrs
)
435 static void __dummy_free(struct device
*dev
, size_t size
,
436 void *vaddr
, dma_addr_t dma_handle
,
437 struct dma_attrs
*attrs
)
441 static int __dummy_mmap(struct device
*dev
,
442 struct vm_area_struct
*vma
,
443 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
444 struct dma_attrs
*attrs
)
449 static dma_addr_t
__dummy_map_page(struct device
*dev
, struct page
*page
,
450 unsigned long offset
, size_t size
,
451 enum dma_data_direction dir
,
452 struct dma_attrs
*attrs
)
454 return DMA_ERROR_CODE
;
457 static void __dummy_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
458 size_t size
, enum dma_data_direction dir
,
459 struct dma_attrs
*attrs
)
463 static int __dummy_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
464 int nelems
, enum dma_data_direction dir
,
465 struct dma_attrs
*attrs
)
470 static void __dummy_unmap_sg(struct device
*dev
,
471 struct scatterlist
*sgl
, int nelems
,
472 enum dma_data_direction dir
,
473 struct dma_attrs
*attrs
)
477 static void __dummy_sync_single(struct device
*dev
,
478 dma_addr_t dev_addr
, size_t size
,
479 enum dma_data_direction dir
)
483 static void __dummy_sync_sg(struct device
*dev
,
484 struct scatterlist
*sgl
, int nelems
,
485 enum dma_data_direction dir
)
489 static int __dummy_mapping_error(struct device
*hwdev
, dma_addr_t dma_addr
)
494 static int __dummy_dma_supported(struct device
*hwdev
, u64 mask
)
499 struct dma_map_ops dummy_dma_ops
= {
500 .alloc
= __dummy_alloc
,
501 .free
= __dummy_free
,
502 .mmap
= __dummy_mmap
,
503 .map_page
= __dummy_map_page
,
504 .unmap_page
= __dummy_unmap_page
,
505 .map_sg
= __dummy_map_sg
,
506 .unmap_sg
= __dummy_unmap_sg
,
507 .sync_single_for_cpu
= __dummy_sync_single
,
508 .sync_single_for_device
= __dummy_sync_single
,
509 .sync_sg_for_cpu
= __dummy_sync_sg
,
510 .sync_sg_for_device
= __dummy_sync_sg
,
511 .mapping_error
= __dummy_mapping_error
,
512 .dma_supported
= __dummy_dma_supported
,
514 EXPORT_SYMBOL(dummy_dma_ops
);
516 static int __init
arm64_dma_init(void)
520 dma_ops
= &swiotlb_dma_ops
;
522 ret
= atomic_pool_init();
526 arch_initcall(arm64_dma_init
);
528 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
530 static int __init
dma_debug_do_init(void)
532 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
535 fs_initcall(dma_debug_do_init
);
538 #ifdef CONFIG_IOMMU_DMA
539 #include <linux/dma-iommu.h>
540 #include <linux/platform_device.h>
541 #include <linux/amba/bus.h>
543 /* Thankfully, all cache ops are by VA so we can ignore phys here */
544 static void flush_page(struct device
*dev
, const void *virt
, phys_addr_t phys
)
546 __dma_flush_range(virt
, virt
+ PAGE_SIZE
);
549 static void *__iommu_alloc_attrs(struct device
*dev
, size_t size
,
550 dma_addr_t
*handle
, gfp_t gfp
,
551 struct dma_attrs
*attrs
)
553 bool coherent
= is_device_dma_coherent(dev
);
554 int ioprot
= dma_direction_to_prot(DMA_BIDIRECTIONAL
, coherent
);
557 if (WARN(!dev
, "cannot create IOMMU mapping for unknown device\n"))
560 * Some drivers rely on this, and we probably don't want the
561 * possibility of stale kernel data being read by devices anyway.
565 if (gfp
& __GFP_WAIT
) {
567 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
, coherent
);
569 pages
= iommu_dma_alloc(dev
, size
, gfp
, ioprot
, handle
,
574 addr
= dma_common_pages_remap(pages
, size
, VM_USERMAP
, prot
,
575 __builtin_return_address(0));
577 iommu_dma_free(dev
, pages
, size
, handle
);
581 * In atomic context we can't remap anything, so we'll only
582 * get the virtually contiguous buffer we need by way of a
583 * physically contiguous allocation.
586 page
= alloc_pages(gfp
, get_order(size
));
587 addr
= page
? page_address(page
) : NULL
;
589 addr
= __alloc_from_pool(size
, &page
, gfp
);
594 *handle
= iommu_dma_map_page(dev
, page
, 0, size
, ioprot
);
595 if (iommu_dma_mapping_error(dev
, *handle
)) {
597 __free_pages(page
, get_order(size
));
599 __free_from_pool(addr
, size
);
606 static void __iommu_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
607 dma_addr_t handle
, struct dma_attrs
*attrs
)
610 * @cpu_addr will be one of 3 things depending on how it was allocated:
611 * - A remapped array of pages from iommu_dma_alloc(), for all
612 * non-atomic allocations.
613 * - A non-cacheable alias from the atomic pool, for atomic
614 * allocations by non-coherent devices.
615 * - A normal lowmem address, for atomic allocations by
617 * Hence how dodgy the below logic looks...
619 if (__in_atomic_pool(cpu_addr
, size
)) {
620 iommu_dma_unmap_page(dev
, handle
, size
, 0, NULL
);
621 __free_from_pool(cpu_addr
, size
);
622 } else if (is_vmalloc_addr(cpu_addr
)){
623 struct vm_struct
*area
= find_vm_area(cpu_addr
);
625 if (WARN_ON(!area
|| !area
->pages
))
627 iommu_dma_free(dev
, area
->pages
, size
, &handle
);
628 dma_common_free_remap(cpu_addr
, size
, VM_USERMAP
);
630 iommu_dma_unmap_page(dev
, handle
, size
, 0, NULL
);
631 __free_pages(virt_to_page(cpu_addr
), get_order(size
));
635 static int __iommu_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
636 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
637 struct dma_attrs
*attrs
)
639 struct vm_struct
*area
;
642 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
,
643 is_device_dma_coherent(dev
));
645 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
648 area
= find_vm_area(cpu_addr
);
649 if (WARN_ON(!area
|| !area
->pages
))
652 return iommu_dma_mmap(area
->pages
, size
, vma
);
655 static int __iommu_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
656 void *cpu_addr
, dma_addr_t dma_addr
,
657 size_t size
, struct dma_attrs
*attrs
)
659 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
660 struct vm_struct
*area
= find_vm_area(cpu_addr
);
662 if (WARN_ON(!area
|| !area
->pages
))
665 return sg_alloc_table_from_pages(sgt
, area
->pages
, count
, 0, size
,
669 static void __iommu_sync_single_for_cpu(struct device
*dev
,
670 dma_addr_t dev_addr
, size_t size
,
671 enum dma_data_direction dir
)
675 if (is_device_dma_coherent(dev
))
678 phys
= iommu_iova_to_phys(iommu_get_domain_for_dev(dev
), dev_addr
);
679 __dma_unmap_area(phys_to_virt(phys
), size
, dir
);
682 static void __iommu_sync_single_for_device(struct device
*dev
,
683 dma_addr_t dev_addr
, size_t size
,
684 enum dma_data_direction dir
)
688 if (is_device_dma_coherent(dev
))
691 phys
= iommu_iova_to_phys(iommu_get_domain_for_dev(dev
), dev_addr
);
692 __dma_map_area(phys_to_virt(phys
), size
, dir
);
695 static dma_addr_t
__iommu_map_page(struct device
*dev
, struct page
*page
,
696 unsigned long offset
, size_t size
,
697 enum dma_data_direction dir
,
698 struct dma_attrs
*attrs
)
700 bool coherent
= is_device_dma_coherent(dev
);
701 int prot
= dma_direction_to_prot(dir
, coherent
);
702 dma_addr_t dev_addr
= iommu_dma_map_page(dev
, page
, offset
, size
, prot
);
704 if (!iommu_dma_mapping_error(dev
, dev_addr
) &&
705 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
706 __iommu_sync_single_for_device(dev
, dev_addr
, size
, dir
);
711 static void __iommu_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
712 size_t size
, enum dma_data_direction dir
,
713 struct dma_attrs
*attrs
)
715 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
716 __iommu_sync_single_for_cpu(dev
, dev_addr
, size
, dir
);
718 iommu_dma_unmap_page(dev
, dev_addr
, size
, dir
, attrs
);
721 static void __iommu_sync_sg_for_cpu(struct device
*dev
,
722 struct scatterlist
*sgl
, int nelems
,
723 enum dma_data_direction dir
)
725 struct scatterlist
*sg
;
728 if (is_device_dma_coherent(dev
))
731 for_each_sg(sgl
, sg
, nelems
, i
)
732 __dma_unmap_area(sg_virt(sg
), sg
->length
, dir
);
735 static void __iommu_sync_sg_for_device(struct device
*dev
,
736 struct scatterlist
*sgl
, int nelems
,
737 enum dma_data_direction dir
)
739 struct scatterlist
*sg
;
742 if (is_device_dma_coherent(dev
))
745 for_each_sg(sgl
, sg
, nelems
, i
)
746 __dma_map_area(sg_virt(sg
), sg
->length
, dir
);
749 static int __iommu_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
750 int nelems
, enum dma_data_direction dir
,
751 struct dma_attrs
*attrs
)
753 bool coherent
= is_device_dma_coherent(dev
);
755 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
756 __iommu_sync_sg_for_device(dev
, sgl
, nelems
, dir
);
758 return iommu_dma_map_sg(dev
, sgl
, nelems
,
759 dma_direction_to_prot(dir
, coherent
));
762 static void __iommu_unmap_sg_attrs(struct device
*dev
,
763 struct scatterlist
*sgl
, int nelems
,
764 enum dma_data_direction dir
,
765 struct dma_attrs
*attrs
)
767 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
768 __iommu_sync_sg_for_cpu(dev
, sgl
, nelems
, dir
);
770 iommu_dma_unmap_sg(dev
, sgl
, nelems
, dir
, attrs
);
773 static struct dma_map_ops iommu_dma_ops
= {
774 .alloc
= __iommu_alloc_attrs
,
775 .free
= __iommu_free_attrs
,
776 .mmap
= __iommu_mmap_attrs
,
777 .get_sgtable
= __iommu_get_sgtable
,
778 .map_page
= __iommu_map_page
,
779 .unmap_page
= __iommu_unmap_page
,
780 .map_sg
= __iommu_map_sg_attrs
,
781 .unmap_sg
= __iommu_unmap_sg_attrs
,
782 .sync_single_for_cpu
= __iommu_sync_single_for_cpu
,
783 .sync_single_for_device
= __iommu_sync_single_for_device
,
784 .sync_sg_for_cpu
= __iommu_sync_sg_for_cpu
,
785 .sync_sg_for_device
= __iommu_sync_sg_for_device
,
786 .dma_supported
= iommu_dma_supported
,
787 .mapping_error
= iommu_dma_mapping_error
,
791 * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
792 * everything it needs to - the device is only partially created and the
793 * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
794 * need this delayed attachment dance. Once IOMMU probe ordering is sorted
795 * to move the arch_setup_dma_ops() call later, all the notifier bits below
796 * become unnecessary, and will go away.
798 struct iommu_dma_notifier_data
{
799 struct list_head list
;
801 const struct iommu_ops
*ops
;
805 static LIST_HEAD(iommu_dma_masters
);
806 static DEFINE_MUTEX(iommu_dma_notifier_lock
);
809 * Temporarily "borrow" a domain feature flag to to tell if we had to resort
810 * to creating our own domain here, in case we need to clean it up again.
812 #define __IOMMU_DOMAIN_FAKE_DEFAULT (1U << 31)
814 static bool do_iommu_attach(struct device
*dev
, const struct iommu_ops
*ops
,
815 u64 dma_base
, u64 size
)
817 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
820 * Best case: The device is either part of a group which was
821 * already attached to a domain in a previous call, or it's
822 * been put in a default DMA domain by the IOMMU core.
826 * Urgh. The IOMMU core isn't going to do default domains
827 * for non-PCI devices anyway, until it has some means of
828 * abstracting the entirely implementation-specific
829 * sideband data/SoC topology/unicorn dust that may or
830 * may not differentiate upstream masters.
831 * So until then, HORRIBLE HACKS!
833 domain
= ops
->domain_alloc(IOMMU_DOMAIN_DMA
);
838 domain
->type
= IOMMU_DOMAIN_DMA
| __IOMMU_DOMAIN_FAKE_DEFAULT
;
840 if (iommu_attach_device(domain
, dev
))
844 if (iommu_dma_init_domain(domain
, dma_base
, size
))
847 dev
->archdata
.dma_ops
= &iommu_dma_ops
;
851 iommu_detach_device(domain
, dev
);
853 if (domain
->type
& __IOMMU_DOMAIN_FAKE_DEFAULT
)
854 iommu_domain_free(domain
);
856 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
861 static void queue_iommu_attach(struct device
*dev
, const struct iommu_ops
*ops
,
862 u64 dma_base
, u64 size
)
864 struct iommu_dma_notifier_data
*iommudata
;
866 iommudata
= kzalloc(sizeof(*iommudata
), GFP_KERNEL
);
870 iommudata
->dev
= dev
;
871 iommudata
->ops
= ops
;
872 iommudata
->dma_base
= dma_base
;
873 iommudata
->size
= size
;
875 mutex_lock(&iommu_dma_notifier_lock
);
876 list_add(&iommudata
->list
, &iommu_dma_masters
);
877 mutex_unlock(&iommu_dma_notifier_lock
);
880 static int __iommu_attach_notifier(struct notifier_block
*nb
,
881 unsigned long action
, void *data
)
883 struct iommu_dma_notifier_data
*master
, *tmp
;
885 if (action
!= BUS_NOTIFY_ADD_DEVICE
)
888 mutex_lock(&iommu_dma_notifier_lock
);
889 list_for_each_entry_safe(master
, tmp
, &iommu_dma_masters
, list
) {
890 if (do_iommu_attach(master
->dev
, master
->ops
,
891 master
->dma_base
, master
->size
)) {
892 list_del(&master
->list
);
896 mutex_unlock(&iommu_dma_notifier_lock
);
900 static int register_iommu_dma_ops_notifier(struct bus_type
*bus
)
902 struct notifier_block
*nb
= kzalloc(sizeof(*nb
), GFP_KERNEL
);
908 * The device must be attached to a domain before the driver probe
909 * routine gets a chance to start allocating DMA buffers. However,
910 * the IOMMU driver also needs a chance to configure the iommu_group
911 * via its add_device callback first, so we need to make the attach
912 * happen between those two points. Since the IOMMU core uses a bus
913 * notifier with default priority for add_device, do the same but
914 * with a lower priority to ensure the appropriate ordering.
916 nb
->notifier_call
= __iommu_attach_notifier
;
919 ret
= bus_register_notifier(bus
, nb
);
921 pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
928 static int __init
__iommu_dma_init(void)
932 ret
= iommu_dma_init();
934 ret
= register_iommu_dma_ops_notifier(&platform_bus_type
);
936 ret
= register_iommu_dma_ops_notifier(&amba_bustype
);
939 arch_initcall(__iommu_dma_init
);
941 static void __iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
942 const struct iommu_ops
*ops
)
944 struct iommu_group
*group
;
949 * TODO: As a concession to the future, we're ready to handle being
950 * called both early and late (i.e. after bus_add_device). Once all
951 * the platform bus code is reworked to call us late and the notifier
952 * junk above goes away, move the body of do_iommu_attach here.
954 group
= iommu_group_get(dev
);
956 do_iommu_attach(dev
, ops
, dma_base
, size
);
957 iommu_group_put(group
);
959 queue_iommu_attach(dev
, ops
, dma_base
, size
);
963 void arch_teardown_dma_ops(struct device
*dev
)
965 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
968 iommu_detach_device(domain
, dev
);
969 if (domain
->type
& __IOMMU_DOMAIN_FAKE_DEFAULT
)
970 iommu_domain_free(domain
);
973 dev
->archdata
.dma_ops
= NULL
;
978 static void __iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
979 struct iommu_ops
*iommu
)
982 #endif /* CONFIG_IOMMU_DMA */
984 void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
985 struct iommu_ops
*iommu
, bool coherent
)
987 if (!acpi_disabled
&& !dev
->archdata
.dma_ops
)
988 dev
->archdata
.dma_ops
= dma_ops
;
990 dev
->archdata
.dma_coherent
= coherent
;
991 __iommu_setup_dma_ops(dev
, dma_base
, size
, iommu
);