2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/bootmem.h>
23 #include <linux/cache.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/genalloc.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dma-contiguous.h>
29 #include <linux/vmalloc.h>
30 #include <linux/swiotlb.h>
31 #include <linux/pci.h>
33 #include <asm/cacheflush.h>
35 static int swiotlb __ro_after_init
;
37 static pgprot_t
__get_dma_pgprot(unsigned long attrs
, pgprot_t prot
,
40 if (!coherent
|| (attrs
& DMA_ATTR_WRITE_COMBINE
))
41 return pgprot_writecombine(prot
);
45 static struct gen_pool
*atomic_pool __ro_after_init
;
47 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
48 static size_t atomic_pool_size __initdata
= DEFAULT_DMA_COHERENT_POOL_SIZE
;
50 static int __init
early_coherent_pool(char *p
)
52 atomic_pool_size
= memparse(p
, &p
);
55 early_param("coherent_pool", early_coherent_pool
);
57 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
, gfp_t flags
)
63 WARN(1, "coherent pool not initialised!\n");
67 val
= gen_pool_alloc(atomic_pool
, size
);
69 phys_addr_t phys
= gen_pool_virt_to_phys(atomic_pool
, val
);
71 *ret_page
= phys_to_page(phys
);
79 static bool __in_atomic_pool(void *start
, size_t size
)
81 return addr_in_gen_pool(atomic_pool
, (unsigned long)start
, size
);
84 static int __free_from_pool(void *start
, size_t size
)
86 if (!__in_atomic_pool(start
, size
))
89 gen_pool_free(atomic_pool
, (unsigned long)start
, size
);
94 static void *__dma_alloc_coherent(struct device
*dev
, size_t size
,
95 dma_addr_t
*dma_handle
, gfp_t flags
,
98 if (IS_ENABLED(CONFIG_ZONE_DMA
) &&
99 dev
->coherent_dma_mask
<= DMA_BIT_MASK(32))
101 if (dev_get_cma_area(dev
) && gfpflags_allow_blocking(flags
)) {
105 page
= dma_alloc_from_contiguous(dev
, size
>> PAGE_SHIFT
,
106 get_order(size
), flags
);
110 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
111 addr
= page_address(page
);
112 memset(addr
, 0, size
);
115 return swiotlb_alloc_coherent(dev
, size
, dma_handle
, flags
);
119 static void __dma_free_coherent(struct device
*dev
, size_t size
,
120 void *vaddr
, dma_addr_t dma_handle
,
124 phys_addr_t paddr
= dma_to_phys(dev
, dma_handle
);
127 freed
= dma_release_from_contiguous(dev
,
131 swiotlb_free_coherent(dev
, size
, vaddr
, dma_handle
);
134 static void *__dma_alloc(struct device
*dev
, size_t size
,
135 dma_addr_t
*dma_handle
, gfp_t flags
,
139 void *ptr
, *coherent_ptr
;
140 bool coherent
= is_device_dma_coherent(dev
);
141 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
, false);
143 size
= PAGE_ALIGN(size
);
145 if (!coherent
&& !gfpflags_allow_blocking(flags
)) {
146 struct page
*page
= NULL
;
147 void *addr
= __alloc_from_pool(size
, &page
, flags
);
150 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
155 ptr
= __dma_alloc_coherent(dev
, size
, dma_handle
, flags
, attrs
);
159 /* no need for non-cacheable mapping if coherent */
163 /* remove any dirty cache lines on the kernel alias */
164 __dma_flush_area(ptr
, size
);
166 /* create a coherent mapping */
167 page
= virt_to_page(ptr
);
168 coherent_ptr
= dma_common_contiguous_remap(page
, size
, VM_USERMAP
,
169 prot
, __builtin_return_address(0));
176 __dma_free_coherent(dev
, size
, ptr
, *dma_handle
, attrs
);
181 static void __dma_free(struct device
*dev
, size_t size
,
182 void *vaddr
, dma_addr_t dma_handle
,
185 void *swiotlb_addr
= phys_to_virt(dma_to_phys(dev
, dma_handle
));
187 size
= PAGE_ALIGN(size
);
189 if (!is_device_dma_coherent(dev
)) {
190 if (__free_from_pool(vaddr
, size
))
194 __dma_free_coherent(dev
, size
, swiotlb_addr
, dma_handle
, attrs
);
197 static dma_addr_t
__swiotlb_map_page(struct device
*dev
, struct page
*page
,
198 unsigned long offset
, size_t size
,
199 enum dma_data_direction dir
,
204 dev_addr
= swiotlb_map_page(dev
, page
, offset
, size
, dir
, attrs
);
205 if (!is_device_dma_coherent(dev
) &&
206 (attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
207 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
213 static void __swiotlb_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
214 size_t size
, enum dma_data_direction dir
,
217 if (!is_device_dma_coherent(dev
) &&
218 (attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
219 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
220 swiotlb_unmap_page(dev
, dev_addr
, size
, dir
, attrs
);
223 static int __swiotlb_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
224 int nelems
, enum dma_data_direction dir
,
227 struct scatterlist
*sg
;
230 ret
= swiotlb_map_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
231 if (!is_device_dma_coherent(dev
) &&
232 (attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
233 for_each_sg(sgl
, sg
, ret
, i
)
234 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
240 static void __swiotlb_unmap_sg_attrs(struct device
*dev
,
241 struct scatterlist
*sgl
, int nelems
,
242 enum dma_data_direction dir
,
245 struct scatterlist
*sg
;
248 if (!is_device_dma_coherent(dev
) &&
249 (attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
250 for_each_sg(sgl
, sg
, nelems
, i
)
251 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
253 swiotlb_unmap_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
256 static void __swiotlb_sync_single_for_cpu(struct device
*dev
,
257 dma_addr_t dev_addr
, size_t size
,
258 enum dma_data_direction dir
)
260 if (!is_device_dma_coherent(dev
))
261 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
262 swiotlb_sync_single_for_cpu(dev
, dev_addr
, size
, dir
);
265 static void __swiotlb_sync_single_for_device(struct device
*dev
,
266 dma_addr_t dev_addr
, size_t size
,
267 enum dma_data_direction dir
)
269 swiotlb_sync_single_for_device(dev
, dev_addr
, size
, dir
);
270 if (!is_device_dma_coherent(dev
))
271 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
274 static void __swiotlb_sync_sg_for_cpu(struct device
*dev
,
275 struct scatterlist
*sgl
, int nelems
,
276 enum dma_data_direction dir
)
278 struct scatterlist
*sg
;
281 if (!is_device_dma_coherent(dev
))
282 for_each_sg(sgl
, sg
, nelems
, i
)
283 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
285 swiotlb_sync_sg_for_cpu(dev
, sgl
, nelems
, dir
);
288 static void __swiotlb_sync_sg_for_device(struct device
*dev
,
289 struct scatterlist
*sgl
, int nelems
,
290 enum dma_data_direction dir
)
292 struct scatterlist
*sg
;
295 swiotlb_sync_sg_for_device(dev
, sgl
, nelems
, dir
);
296 if (!is_device_dma_coherent(dev
))
297 for_each_sg(sgl
, sg
, nelems
, i
)
298 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
302 static int __swiotlb_mmap_pfn(struct vm_area_struct
*vma
,
303 unsigned long pfn
, size_t size
)
306 unsigned long nr_vma_pages
= vma_pages(vma
);
307 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
308 unsigned long off
= vma
->vm_pgoff
;
310 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
311 ret
= remap_pfn_range(vma
, vma
->vm_start
,
313 vma
->vm_end
- vma
->vm_start
,
320 static int __swiotlb_mmap(struct device
*dev
,
321 struct vm_area_struct
*vma
,
322 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
326 unsigned long pfn
= dma_to_phys(dev
, dma_addr
) >> PAGE_SHIFT
;
328 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
,
329 is_device_dma_coherent(dev
));
331 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
334 return __swiotlb_mmap_pfn(vma
, pfn
, size
);
337 static int __swiotlb_get_sgtable_page(struct sg_table
*sgt
,
338 struct page
*page
, size_t size
)
340 int ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
343 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
348 static int __swiotlb_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
349 void *cpu_addr
, dma_addr_t handle
, size_t size
,
352 struct page
*page
= phys_to_page(dma_to_phys(dev
, handle
));
354 return __swiotlb_get_sgtable_page(sgt
, page
, size
);
357 static int __swiotlb_dma_supported(struct device
*hwdev
, u64 mask
)
360 return swiotlb_dma_supported(hwdev
, mask
);
364 static int __swiotlb_dma_mapping_error(struct device
*hwdev
, dma_addr_t addr
)
367 return swiotlb_dma_mapping_error(hwdev
, addr
);
371 static const struct dma_map_ops swiotlb_dma_ops
= {
372 .alloc
= __dma_alloc
,
374 .mmap
= __swiotlb_mmap
,
375 .get_sgtable
= __swiotlb_get_sgtable
,
376 .map_page
= __swiotlb_map_page
,
377 .unmap_page
= __swiotlb_unmap_page
,
378 .map_sg
= __swiotlb_map_sg_attrs
,
379 .unmap_sg
= __swiotlb_unmap_sg_attrs
,
380 .sync_single_for_cpu
= __swiotlb_sync_single_for_cpu
,
381 .sync_single_for_device
= __swiotlb_sync_single_for_device
,
382 .sync_sg_for_cpu
= __swiotlb_sync_sg_for_cpu
,
383 .sync_sg_for_device
= __swiotlb_sync_sg_for_device
,
384 .dma_supported
= __swiotlb_dma_supported
,
385 .mapping_error
= __swiotlb_dma_mapping_error
,
388 static int __init
atomic_pool_init(void)
390 pgprot_t prot
= __pgprot(PROT_NORMAL_NC
);
391 unsigned long nr_pages
= atomic_pool_size
>> PAGE_SHIFT
;
394 unsigned int pool_size_order
= get_order(atomic_pool_size
);
396 if (dev_get_cma_area(NULL
))
397 page
= dma_alloc_from_contiguous(NULL
, nr_pages
,
398 pool_size_order
, GFP_KERNEL
);
400 page
= alloc_pages(GFP_DMA
, pool_size_order
);
404 void *page_addr
= page_address(page
);
406 memset(page_addr
, 0, atomic_pool_size
);
407 __dma_flush_area(page_addr
, atomic_pool_size
);
409 atomic_pool
= gen_pool_create(PAGE_SHIFT
, -1);
413 addr
= dma_common_contiguous_remap(page
, atomic_pool_size
,
414 VM_USERMAP
, prot
, atomic_pool_init
);
417 goto destroy_genpool
;
419 ret
= gen_pool_add_virt(atomic_pool
, (unsigned long)addr
,
421 atomic_pool_size
, -1);
425 gen_pool_set_algo(atomic_pool
,
426 gen_pool_first_fit_order_align
,
429 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
430 atomic_pool_size
/ 1024);
436 dma_common_free_remap(addr
, atomic_pool_size
, VM_USERMAP
);
438 gen_pool_destroy(atomic_pool
);
441 if (!dma_release_from_contiguous(NULL
, page
, nr_pages
))
442 __free_pages(page
, pool_size_order
);
444 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
445 atomic_pool_size
/ 1024);
449 /********************************************
450 * The following APIs are for dummy DMA ops *
451 ********************************************/
453 static void *__dummy_alloc(struct device
*dev
, size_t size
,
454 dma_addr_t
*dma_handle
, gfp_t flags
,
460 static void __dummy_free(struct device
*dev
, size_t size
,
461 void *vaddr
, dma_addr_t dma_handle
,
466 static int __dummy_mmap(struct device
*dev
,
467 struct vm_area_struct
*vma
,
468 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
474 static dma_addr_t
__dummy_map_page(struct device
*dev
, struct page
*page
,
475 unsigned long offset
, size_t size
,
476 enum dma_data_direction dir
,
482 static void __dummy_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
483 size_t size
, enum dma_data_direction dir
,
488 static int __dummy_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
489 int nelems
, enum dma_data_direction dir
,
495 static void __dummy_unmap_sg(struct device
*dev
,
496 struct scatterlist
*sgl
, int nelems
,
497 enum dma_data_direction dir
,
502 static void __dummy_sync_single(struct device
*dev
,
503 dma_addr_t dev_addr
, size_t size
,
504 enum dma_data_direction dir
)
508 static void __dummy_sync_sg(struct device
*dev
,
509 struct scatterlist
*sgl
, int nelems
,
510 enum dma_data_direction dir
)
514 static int __dummy_mapping_error(struct device
*hwdev
, dma_addr_t dma_addr
)
519 static int __dummy_dma_supported(struct device
*hwdev
, u64 mask
)
524 const struct dma_map_ops dummy_dma_ops
= {
525 .alloc
= __dummy_alloc
,
526 .free
= __dummy_free
,
527 .mmap
= __dummy_mmap
,
528 .map_page
= __dummy_map_page
,
529 .unmap_page
= __dummy_unmap_page
,
530 .map_sg
= __dummy_map_sg
,
531 .unmap_sg
= __dummy_unmap_sg
,
532 .sync_single_for_cpu
= __dummy_sync_single
,
533 .sync_single_for_device
= __dummy_sync_single
,
534 .sync_sg_for_cpu
= __dummy_sync_sg
,
535 .sync_sg_for_device
= __dummy_sync_sg
,
536 .mapping_error
= __dummy_mapping_error
,
537 .dma_supported
= __dummy_dma_supported
,
539 EXPORT_SYMBOL(dummy_dma_ops
);
541 static int __init
arm64_dma_init(void)
543 if (swiotlb_force
== SWIOTLB_FORCE
||
544 max_pfn
> (arm64_dma_phys_limit
>> PAGE_SHIFT
))
547 return atomic_pool_init();
549 arch_initcall(arm64_dma_init
);
551 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
553 static int __init
dma_debug_do_init(void)
555 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
558 fs_initcall(dma_debug_do_init
);
561 #ifdef CONFIG_IOMMU_DMA
562 #include <linux/dma-iommu.h>
563 #include <linux/platform_device.h>
564 #include <linux/amba/bus.h>
566 /* Thankfully, all cache ops are by VA so we can ignore phys here */
567 static void flush_page(struct device
*dev
, const void *virt
, phys_addr_t phys
)
569 __dma_flush_area(virt
, PAGE_SIZE
);
572 static void *__iommu_alloc_attrs(struct device
*dev
, size_t size
,
573 dma_addr_t
*handle
, gfp_t gfp
,
576 bool coherent
= is_device_dma_coherent(dev
);
577 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
578 size_t iosize
= size
;
581 if (WARN(!dev
, "cannot create IOMMU mapping for unknown device\n"))
584 size
= PAGE_ALIGN(size
);
587 * Some drivers rely on this, and we probably don't want the
588 * possibility of stale kernel data being read by devices anyway.
592 if (!gfpflags_allow_blocking(gfp
)) {
595 * In atomic context we can't remap anything, so we'll only
596 * get the virtually contiguous buffer we need by way of a
597 * physically contiguous allocation.
600 page
= alloc_pages(gfp
, get_order(size
));
601 addr
= page
? page_address(page
) : NULL
;
603 addr
= __alloc_from_pool(size
, &page
, gfp
);
608 *handle
= iommu_dma_map_page(dev
, page
, 0, iosize
, ioprot
);
609 if (iommu_dma_mapping_error(dev
, *handle
)) {
611 __free_pages(page
, get_order(size
));
613 __free_from_pool(addr
, size
);
616 } else if (attrs
& DMA_ATTR_FORCE_CONTIGUOUS
) {
617 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
, coherent
);
620 page
= dma_alloc_from_contiguous(dev
, size
>> PAGE_SHIFT
,
621 get_order(size
), gfp
);
625 *handle
= iommu_dma_map_page(dev
, page
, 0, iosize
, ioprot
);
626 if (iommu_dma_mapping_error(dev
, *handle
)) {
627 dma_release_from_contiguous(dev
, page
,
631 addr
= dma_common_contiguous_remap(page
, size
, VM_USERMAP
,
633 __builtin_return_address(0));
636 __dma_flush_area(page_to_virt(page
), iosize
);
637 memset(addr
, 0, size
);
639 iommu_dma_unmap_page(dev
, *handle
, iosize
, 0, attrs
);
640 dma_release_from_contiguous(dev
, page
,
644 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
, coherent
);
647 pages
= iommu_dma_alloc(dev
, iosize
, gfp
, attrs
, ioprot
,
652 addr
= dma_common_pages_remap(pages
, size
, VM_USERMAP
, prot
,
653 __builtin_return_address(0));
655 iommu_dma_free(dev
, pages
, iosize
, handle
);
660 static void __iommu_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
661 dma_addr_t handle
, unsigned long attrs
)
663 size_t iosize
= size
;
665 size
= PAGE_ALIGN(size
);
667 * @cpu_addr will be one of 4 things depending on how it was allocated:
668 * - A remapped array of pages for contiguous allocations.
669 * - A remapped array of pages from iommu_dma_alloc(), for all
670 * non-atomic allocations.
671 * - A non-cacheable alias from the atomic pool, for atomic
672 * allocations by non-coherent devices.
673 * - A normal lowmem address, for atomic allocations by
675 * Hence how dodgy the below logic looks...
677 if (__in_atomic_pool(cpu_addr
, size
)) {
678 iommu_dma_unmap_page(dev
, handle
, iosize
, 0, 0);
679 __free_from_pool(cpu_addr
, size
);
680 } else if (attrs
& DMA_ATTR_FORCE_CONTIGUOUS
) {
681 struct page
*page
= vmalloc_to_page(cpu_addr
);
683 iommu_dma_unmap_page(dev
, handle
, iosize
, 0, attrs
);
684 dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
);
685 dma_common_free_remap(cpu_addr
, size
, VM_USERMAP
);
686 } else if (is_vmalloc_addr(cpu_addr
)){
687 struct vm_struct
*area
= find_vm_area(cpu_addr
);
689 if (WARN_ON(!area
|| !area
->pages
))
691 iommu_dma_free(dev
, area
->pages
, iosize
, &handle
);
692 dma_common_free_remap(cpu_addr
, size
, VM_USERMAP
);
694 iommu_dma_unmap_page(dev
, handle
, iosize
, 0, 0);
695 __free_pages(virt_to_page(cpu_addr
), get_order(size
));
699 static int __iommu_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
700 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
703 struct vm_struct
*area
;
706 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
,
707 is_device_dma_coherent(dev
));
709 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
712 if (!is_vmalloc_addr(cpu_addr
)) {
713 unsigned long pfn
= page_to_pfn(virt_to_page(cpu_addr
));
714 return __swiotlb_mmap_pfn(vma
, pfn
, size
);
717 if (attrs
& DMA_ATTR_FORCE_CONTIGUOUS
) {
719 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
720 * hence in the vmalloc space.
722 unsigned long pfn
= vmalloc_to_pfn(cpu_addr
);
723 return __swiotlb_mmap_pfn(vma
, pfn
, size
);
726 area
= find_vm_area(cpu_addr
);
727 if (WARN_ON(!area
|| !area
->pages
))
730 return iommu_dma_mmap(area
->pages
, size
, vma
);
733 static int __iommu_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
734 void *cpu_addr
, dma_addr_t dma_addr
,
735 size_t size
, unsigned long attrs
)
737 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
738 struct vm_struct
*area
= find_vm_area(cpu_addr
);
740 if (!is_vmalloc_addr(cpu_addr
)) {
741 struct page
*page
= virt_to_page(cpu_addr
);
742 return __swiotlb_get_sgtable_page(sgt
, page
, size
);
745 if (attrs
& DMA_ATTR_FORCE_CONTIGUOUS
) {
747 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
748 * hence in the vmalloc space.
750 struct page
*page
= vmalloc_to_page(cpu_addr
);
751 return __swiotlb_get_sgtable_page(sgt
, page
, size
);
754 if (WARN_ON(!area
|| !area
->pages
))
757 return sg_alloc_table_from_pages(sgt
, area
->pages
, count
, 0, size
,
761 static void __iommu_sync_single_for_cpu(struct device
*dev
,
762 dma_addr_t dev_addr
, size_t size
,
763 enum dma_data_direction dir
)
767 if (is_device_dma_coherent(dev
))
770 phys
= iommu_iova_to_phys(iommu_get_domain_for_dev(dev
), dev_addr
);
771 __dma_unmap_area(phys_to_virt(phys
), size
, dir
);
774 static void __iommu_sync_single_for_device(struct device
*dev
,
775 dma_addr_t dev_addr
, size_t size
,
776 enum dma_data_direction dir
)
780 if (is_device_dma_coherent(dev
))
783 phys
= iommu_iova_to_phys(iommu_get_domain_for_dev(dev
), dev_addr
);
784 __dma_map_area(phys_to_virt(phys
), size
, dir
);
787 static dma_addr_t
__iommu_map_page(struct device
*dev
, struct page
*page
,
788 unsigned long offset
, size_t size
,
789 enum dma_data_direction dir
,
792 bool coherent
= is_device_dma_coherent(dev
);
793 int prot
= dma_info_to_prot(dir
, coherent
, attrs
);
794 dma_addr_t dev_addr
= iommu_dma_map_page(dev
, page
, offset
, size
, prot
);
796 if (!iommu_dma_mapping_error(dev
, dev_addr
) &&
797 (attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
798 __iommu_sync_single_for_device(dev
, dev_addr
, size
, dir
);
803 static void __iommu_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
804 size_t size
, enum dma_data_direction dir
,
807 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
808 __iommu_sync_single_for_cpu(dev
, dev_addr
, size
, dir
);
810 iommu_dma_unmap_page(dev
, dev_addr
, size
, dir
, attrs
);
813 static void __iommu_sync_sg_for_cpu(struct device
*dev
,
814 struct scatterlist
*sgl
, int nelems
,
815 enum dma_data_direction dir
)
817 struct scatterlist
*sg
;
820 if (is_device_dma_coherent(dev
))
823 for_each_sg(sgl
, sg
, nelems
, i
)
824 __dma_unmap_area(sg_virt(sg
), sg
->length
, dir
);
827 static void __iommu_sync_sg_for_device(struct device
*dev
,
828 struct scatterlist
*sgl
, int nelems
,
829 enum dma_data_direction dir
)
831 struct scatterlist
*sg
;
834 if (is_device_dma_coherent(dev
))
837 for_each_sg(sgl
, sg
, nelems
, i
)
838 __dma_map_area(sg_virt(sg
), sg
->length
, dir
);
841 static int __iommu_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
842 int nelems
, enum dma_data_direction dir
,
845 bool coherent
= is_device_dma_coherent(dev
);
847 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
848 __iommu_sync_sg_for_device(dev
, sgl
, nelems
, dir
);
850 return iommu_dma_map_sg(dev
, sgl
, nelems
,
851 dma_info_to_prot(dir
, coherent
, attrs
));
854 static void __iommu_unmap_sg_attrs(struct device
*dev
,
855 struct scatterlist
*sgl
, int nelems
,
856 enum dma_data_direction dir
,
859 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
860 __iommu_sync_sg_for_cpu(dev
, sgl
, nelems
, dir
);
862 iommu_dma_unmap_sg(dev
, sgl
, nelems
, dir
, attrs
);
865 static const struct dma_map_ops iommu_dma_ops
= {
866 .alloc
= __iommu_alloc_attrs
,
867 .free
= __iommu_free_attrs
,
868 .mmap
= __iommu_mmap_attrs
,
869 .get_sgtable
= __iommu_get_sgtable
,
870 .map_page
= __iommu_map_page
,
871 .unmap_page
= __iommu_unmap_page
,
872 .map_sg
= __iommu_map_sg_attrs
,
873 .unmap_sg
= __iommu_unmap_sg_attrs
,
874 .sync_single_for_cpu
= __iommu_sync_single_for_cpu
,
875 .sync_single_for_device
= __iommu_sync_single_for_device
,
876 .sync_sg_for_cpu
= __iommu_sync_sg_for_cpu
,
877 .sync_sg_for_device
= __iommu_sync_sg_for_device
,
878 .map_resource
= iommu_dma_map_resource
,
879 .unmap_resource
= iommu_dma_unmap_resource
,
880 .mapping_error
= iommu_dma_mapping_error
,
883 static int __init
__iommu_dma_init(void)
885 return iommu_dma_init();
887 arch_initcall(__iommu_dma_init
);
889 static void __iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
890 const struct iommu_ops
*ops
)
892 struct iommu_domain
*domain
;
898 * The IOMMU core code allocates the default DMA domain, which the
899 * underlying IOMMU driver needs to support via the dma-iommu layer.
901 domain
= iommu_get_domain_for_dev(dev
);
906 if (domain
->type
== IOMMU_DOMAIN_DMA
) {
907 if (iommu_dma_init_domain(domain
, dma_base
, size
, dev
))
910 dev
->dma_ops
= &iommu_dma_ops
;
916 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
920 void arch_teardown_dma_ops(struct device
*dev
)
927 static void __iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
928 const struct iommu_ops
*iommu
)
931 #endif /* CONFIG_IOMMU_DMA */
933 void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
934 const struct iommu_ops
*iommu
, bool coherent
)
937 dev
->dma_ops
= &swiotlb_dma_ops
;
939 dev
->archdata
.dma_coherent
= coherent
;
940 __iommu_setup_dma_ops(dev
, dma_base
, size
, iommu
);
943 if (xen_initial_domain()) {
944 dev
->archdata
.dev_dma_ops
= dev
->dma_ops
;
945 dev
->dma_ops
= xen_dma_ops
;