2 * linux/arch/arm/mm/dma-mapping.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/bootmem.h>
13 #include <linux/module.h>
15 #include <linux/genalloc.h>
16 #include <linux/gfp.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/init.h>
20 #include <linux/device.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/dma-contiguous.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25 #include <linux/slab.h>
26 #include <linux/iommu.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sizes.h>
30 #include <linux/cma.h>
32 #include <asm/memory.h>
33 #include <asm/highmem.h>
34 #include <asm/cacheflush.h>
35 #include <asm/tlbflush.h>
36 #include <asm/mach/arch.h>
37 #include <asm/dma-iommu.h>
38 #include <asm/mach/map.h>
39 #include <asm/system_info.h>
40 #include <asm/dma-contiguous.h>
45 struct arm_dma_alloc_args
{
55 struct arm_dma_free_args
{
66 struct arm_dma_allocator
{
67 void *(*alloc
)(struct arm_dma_alloc_args
*args
,
68 struct page
**ret_page
);
69 void (*free
)(struct arm_dma_free_args
*args
);
72 struct arm_dma_buffer
{
73 struct list_head list
;
75 struct arm_dma_allocator
*allocator
;
78 static LIST_HEAD(arm_dma_bufs
);
79 static DEFINE_SPINLOCK(arm_dma_bufs_lock
);
81 static struct arm_dma_buffer
*arm_dma_buffer_find(void *virt
)
83 struct arm_dma_buffer
*buf
, *found
= NULL
;
86 spin_lock_irqsave(&arm_dma_bufs_lock
, flags
);
87 list_for_each_entry(buf
, &arm_dma_bufs
, list
) {
88 if (buf
->virt
== virt
) {
94 spin_unlock_irqrestore(&arm_dma_bufs_lock
, flags
);
99 * The DMA API is built upon the notion of "buffer ownership". A buffer
100 * is either exclusively owned by the CPU (and therefore may be accessed
101 * by it) or exclusively owned by the DMA device. These helper functions
102 * represent the transitions between these two ownership states.
104 * Note, however, that on later ARMs, this notion does not work due to
105 * speculative prefetches. We model our approach on the assumption that
106 * the CPU does do speculative prefetches, which means we clean caches
107 * before transfers and delay cache invalidation until transfer completion.
110 static void __dma_page_cpu_to_dev(struct page
*, unsigned long,
111 size_t, enum dma_data_direction
);
112 static void __dma_page_dev_to_cpu(struct page
*, unsigned long,
113 size_t, enum dma_data_direction
);
116 * arm_dma_map_page - map a portion of a page for streaming DMA
117 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
118 * @page: page that buffer resides in
119 * @offset: offset into page for start of buffer
120 * @size: size of buffer to map
121 * @dir: DMA transfer direction
123 * Ensure that any data held in the cache is appropriately discarded
126 * The device owns this memory once this call has completed. The CPU
127 * can regain ownership by calling dma_unmap_page().
129 static dma_addr_t
arm_dma_map_page(struct device
*dev
, struct page
*page
,
130 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
133 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
134 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
135 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
138 static dma_addr_t
arm_coherent_dma_map_page(struct device
*dev
, struct page
*page
,
139 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
142 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
146 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
147 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
148 * @handle: DMA address of buffer
149 * @size: size of buffer (same as passed to dma_map_page)
150 * @dir: DMA transfer direction (same as passed to dma_map_page)
152 * Unmap a page streaming mode DMA translation. The handle and size
153 * must match what was provided in the previous dma_map_page() call.
154 * All other usages are undefined.
156 * After this call, reads by the CPU to the buffer are guaranteed to see
157 * whatever the device wrote there.
159 static void arm_dma_unmap_page(struct device
*dev
, dma_addr_t handle
,
160 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
162 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
163 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev
, handle
)),
164 handle
& ~PAGE_MASK
, size
, dir
);
167 static void arm_dma_sync_single_for_cpu(struct device
*dev
,
168 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
170 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
171 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
172 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
175 static void arm_dma_sync_single_for_device(struct device
*dev
,
176 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
178 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
179 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
180 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
183 static int arm_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
185 return dma_addr
== ARM_MAPPING_ERROR
;
188 const struct dma_map_ops arm_dma_ops
= {
189 .alloc
= arm_dma_alloc
,
190 .free
= arm_dma_free
,
191 .mmap
= arm_dma_mmap
,
192 .get_sgtable
= arm_dma_get_sgtable
,
193 .map_page
= arm_dma_map_page
,
194 .unmap_page
= arm_dma_unmap_page
,
195 .map_sg
= arm_dma_map_sg
,
196 .unmap_sg
= arm_dma_unmap_sg
,
197 .sync_single_for_cpu
= arm_dma_sync_single_for_cpu
,
198 .sync_single_for_device
= arm_dma_sync_single_for_device
,
199 .sync_sg_for_cpu
= arm_dma_sync_sg_for_cpu
,
200 .sync_sg_for_device
= arm_dma_sync_sg_for_device
,
201 .mapping_error
= arm_dma_mapping_error
,
202 .dma_supported
= arm_dma_supported
,
204 EXPORT_SYMBOL(arm_dma_ops
);
206 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
207 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
);
208 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
209 dma_addr_t handle
, unsigned long attrs
);
210 static int arm_coherent_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
211 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
212 unsigned long attrs
);
214 const struct dma_map_ops arm_coherent_dma_ops
= {
215 .alloc
= arm_coherent_dma_alloc
,
216 .free
= arm_coherent_dma_free
,
217 .mmap
= arm_coherent_dma_mmap
,
218 .get_sgtable
= arm_dma_get_sgtable
,
219 .map_page
= arm_coherent_dma_map_page
,
220 .map_sg
= arm_dma_map_sg
,
221 .mapping_error
= arm_dma_mapping_error
,
222 .dma_supported
= arm_dma_supported
,
224 EXPORT_SYMBOL(arm_coherent_dma_ops
);
226 static int __dma_supported(struct device
*dev
, u64 mask
, bool warn
)
228 unsigned long max_dma_pfn
;
231 * If the mask allows for more memory than we can address,
232 * and we actually have that much memory, then we must
233 * indicate that DMA to this device is not supported.
235 if (sizeof(mask
) != sizeof(dma_addr_t
) &&
236 mask
> (dma_addr_t
)~0 &&
237 dma_to_pfn(dev
, ~0) < max_pfn
- 1) {
239 dev_warn(dev
, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
241 dev_warn(dev
, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
246 max_dma_pfn
= min(max_pfn
, arm_dma_pfn_limit
);
249 * Translate the device's DMA mask to a PFN limit. This
250 * PFN number includes the page which we can DMA to.
252 if (dma_to_pfn(dev
, mask
) < max_dma_pfn
) {
254 dev_warn(dev
, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
256 dma_to_pfn(dev
, 0), dma_to_pfn(dev
, mask
) + 1,
264 static u64
get_coherent_dma_mask(struct device
*dev
)
266 u64 mask
= (u64
)DMA_BIT_MASK(32);
269 mask
= dev
->coherent_dma_mask
;
272 * Sanity check the DMA mask - it must be non-zero, and
273 * must be able to be satisfied by a DMA allocation.
276 dev_warn(dev
, "coherent DMA mask is unset\n");
280 if (!__dma_supported(dev
, mask
, true))
287 static void __dma_clear_buffer(struct page
*page
, size_t size
, int coherent_flag
)
290 * Ensure that the allocated pages are zeroed, and that any data
291 * lurking in the kernel direct-mapped region is invalidated.
293 if (PageHighMem(page
)) {
294 phys_addr_t base
= __pfn_to_phys(page_to_pfn(page
));
295 phys_addr_t end
= base
+ size
;
297 void *ptr
= kmap_atomic(page
);
298 memset(ptr
, 0, PAGE_SIZE
);
299 if (coherent_flag
!= COHERENT
)
300 dmac_flush_range(ptr
, ptr
+ PAGE_SIZE
);
305 if (coherent_flag
!= COHERENT
)
306 outer_flush_range(base
, end
);
308 void *ptr
= page_address(page
);
309 memset(ptr
, 0, size
);
310 if (coherent_flag
!= COHERENT
) {
311 dmac_flush_range(ptr
, ptr
+ size
);
312 outer_flush_range(__pa(ptr
), __pa(ptr
) + size
);
318 * Allocate a DMA buffer for 'dev' of size 'size' using the
319 * specified gfp mask. Note that 'size' must be page aligned.
321 static struct page
*__dma_alloc_buffer(struct device
*dev
, size_t size
,
322 gfp_t gfp
, int coherent_flag
)
324 unsigned long order
= get_order(size
);
325 struct page
*page
, *p
, *e
;
327 page
= alloc_pages(gfp
, order
);
332 * Now split the huge page and free the excess pages
334 split_page(page
, order
);
335 for (p
= page
+ (size
>> PAGE_SHIFT
), e
= page
+ (1 << order
); p
< e
; p
++)
338 __dma_clear_buffer(page
, size
, coherent_flag
);
344 * Free a DMA buffer. 'size' must be page aligned.
346 static void __dma_free_buffer(struct page
*page
, size_t size
)
348 struct page
*e
= page
+ (size
>> PAGE_SHIFT
);
356 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
357 pgprot_t prot
, struct page
**ret_page
,
358 const void *caller
, bool want_vaddr
,
359 int coherent_flag
, gfp_t gfp
);
361 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
362 pgprot_t prot
, struct page
**ret_page
,
363 const void *caller
, bool want_vaddr
);
366 __dma_alloc_remap(struct page
*page
, size_t size
, gfp_t gfp
, pgprot_t prot
,
370 * DMA allocation can be mapped to user space, so lets
371 * set VM_USERMAP flags too.
373 return dma_common_contiguous_remap(page
, size
,
374 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
,
378 static void __dma_free_remap(void *cpu_addr
, size_t size
)
380 dma_common_free_remap(cpu_addr
, size
,
381 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
);
384 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
385 static struct gen_pool
*atomic_pool
;
387 static size_t atomic_pool_size
= DEFAULT_DMA_COHERENT_POOL_SIZE
;
389 static int __init
early_coherent_pool(char *p
)
391 atomic_pool_size
= memparse(p
, &p
);
394 early_param("coherent_pool", early_coherent_pool
);
396 void __init
init_dma_coherent_pool_size(unsigned long size
)
399 * Catch any attempt to set the pool size too late.
404 * Set architecture specific coherent pool size only if
405 * it has not been changed by kernel command line parameter.
407 if (atomic_pool_size
== DEFAULT_DMA_COHERENT_POOL_SIZE
)
408 atomic_pool_size
= size
;
412 * Initialise the coherent pool for atomic allocations.
414 static int __init
atomic_pool_init(void)
416 pgprot_t prot
= pgprot_dmacoherent(PAGE_KERNEL
);
417 gfp_t gfp
= GFP_KERNEL
| GFP_DMA
;
421 atomic_pool
= gen_pool_create(PAGE_SHIFT
, -1);
425 * The atomic pool is only used for non-coherent allocations
426 * so we must pass NORMAL for coherent_flag.
428 if (dev_get_cma_area(NULL
))
429 ptr
= __alloc_from_contiguous(NULL
, atomic_pool_size
, prot
,
430 &page
, atomic_pool_init
, true, NORMAL
,
433 ptr
= __alloc_remap_buffer(NULL
, atomic_pool_size
, gfp
, prot
,
434 &page
, atomic_pool_init
, true);
438 ret
= gen_pool_add_virt(atomic_pool
, (unsigned long)ptr
,
440 atomic_pool_size
, -1);
442 goto destroy_genpool
;
444 gen_pool_set_algo(atomic_pool
,
445 gen_pool_first_fit_order_align
,
447 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
448 atomic_pool_size
/ 1024);
453 gen_pool_destroy(atomic_pool
);
456 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
457 atomic_pool_size
/ 1024);
461 * CMA is activated by core_initcall, so we must be called after it.
463 postcore_initcall(atomic_pool_init
);
465 struct dma_contig_early_reserve
{
470 static struct dma_contig_early_reserve dma_mmu_remap
[MAX_CMA_AREAS
] __initdata
;
472 static int dma_mmu_remap_num __initdata
;
474 void __init
dma_contiguous_early_fixup(phys_addr_t base
, unsigned long size
)
476 dma_mmu_remap
[dma_mmu_remap_num
].base
= base
;
477 dma_mmu_remap
[dma_mmu_remap_num
].size
= size
;
481 void __init
dma_contiguous_remap(void)
484 for (i
= 0; i
< dma_mmu_remap_num
; i
++) {
485 phys_addr_t start
= dma_mmu_remap
[i
].base
;
486 phys_addr_t end
= start
+ dma_mmu_remap
[i
].size
;
490 if (end
> arm_lowmem_limit
)
491 end
= arm_lowmem_limit
;
495 map
.pfn
= __phys_to_pfn(start
);
496 map
.virtual = __phys_to_virt(start
);
497 map
.length
= end
- start
;
498 map
.type
= MT_MEMORY_DMA_READY
;
501 * Clear previous low-memory mapping to ensure that the
502 * TLB does not see any conflicting entries, then flush
503 * the TLB of the old entries before creating new mappings.
505 * This ensures that any speculatively loaded TLB entries
506 * (even though they may be rare) can not cause any problems,
507 * and ensures that this code is architecturally compliant.
509 for (addr
= __phys_to_virt(start
); addr
< __phys_to_virt(end
);
511 pmd_clear(pmd_off_k(addr
));
513 flush_tlb_kernel_range(__phys_to_virt(start
),
514 __phys_to_virt(end
));
516 iotable_init(&map
, 1);
520 static int __dma_update_pte(pte_t
*pte
, pgtable_t token
, unsigned long addr
,
523 struct page
*page
= virt_to_page(addr
);
524 pgprot_t prot
= *(pgprot_t
*)data
;
526 set_pte_ext(pte
, mk_pte(page
, prot
), 0);
530 static void __dma_remap(struct page
*page
, size_t size
, pgprot_t prot
)
532 unsigned long start
= (unsigned long) page_address(page
);
533 unsigned end
= start
+ size
;
535 apply_to_page_range(&init_mm
, start
, size
, __dma_update_pte
, &prot
);
536 flush_tlb_kernel_range(start
, end
);
539 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
540 pgprot_t prot
, struct page
**ret_page
,
541 const void *caller
, bool want_vaddr
)
546 * __alloc_remap_buffer is only called when the device is
549 page
= __dma_alloc_buffer(dev
, size
, gfp
, NORMAL
);
555 ptr
= __dma_alloc_remap(page
, size
, gfp
, prot
, caller
);
557 __dma_free_buffer(page
, size
);
566 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
)
572 WARN(1, "coherent pool not initialised!\n");
576 val
= gen_pool_alloc(atomic_pool
, size
);
578 phys_addr_t phys
= gen_pool_virt_to_phys(atomic_pool
, val
);
580 *ret_page
= phys_to_page(phys
);
587 static bool __in_atomic_pool(void *start
, size_t size
)
589 return addr_in_gen_pool(atomic_pool
, (unsigned long)start
, size
);
592 static int __free_from_pool(void *start
, size_t size
)
594 if (!__in_atomic_pool(start
, size
))
597 gen_pool_free(atomic_pool
, (unsigned long)start
, size
);
602 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
603 pgprot_t prot
, struct page
**ret_page
,
604 const void *caller
, bool want_vaddr
,
605 int coherent_flag
, gfp_t gfp
)
607 unsigned long order
= get_order(size
);
608 size_t count
= size
>> PAGE_SHIFT
;
612 page
= dma_alloc_from_contiguous(dev
, count
, order
, gfp
);
616 __dma_clear_buffer(page
, size
, coherent_flag
);
621 if (PageHighMem(page
)) {
622 ptr
= __dma_alloc_remap(page
, size
, GFP_KERNEL
, prot
, caller
);
624 dma_release_from_contiguous(dev
, page
, count
);
628 __dma_remap(page
, size
, prot
);
629 ptr
= page_address(page
);
637 static void __free_from_contiguous(struct device
*dev
, struct page
*page
,
638 void *cpu_addr
, size_t size
, bool want_vaddr
)
641 if (PageHighMem(page
))
642 __dma_free_remap(cpu_addr
, size
);
644 __dma_remap(page
, size
, PAGE_KERNEL
);
646 dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
);
649 static inline pgprot_t
__get_dma_pgprot(unsigned long attrs
, pgprot_t prot
)
651 prot
= (attrs
& DMA_ATTR_WRITE_COMBINE
) ?
652 pgprot_writecombine(prot
) :
653 pgprot_dmacoherent(prot
);
657 static void *__alloc_simple_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
658 struct page
**ret_page
)
661 /* __alloc_simple_buffer is only called when the device is coherent */
662 page
= __dma_alloc_buffer(dev
, size
, gfp
, COHERENT
);
667 return page_address(page
);
670 static void *simple_allocator_alloc(struct arm_dma_alloc_args
*args
,
671 struct page
**ret_page
)
673 return __alloc_simple_buffer(args
->dev
, args
->size
, args
->gfp
,
677 static void simple_allocator_free(struct arm_dma_free_args
*args
)
679 __dma_free_buffer(args
->page
, args
->size
);
682 static struct arm_dma_allocator simple_allocator
= {
683 .alloc
= simple_allocator_alloc
,
684 .free
= simple_allocator_free
,
687 static void *cma_allocator_alloc(struct arm_dma_alloc_args
*args
,
688 struct page
**ret_page
)
690 return __alloc_from_contiguous(args
->dev
, args
->size
, args
->prot
,
691 ret_page
, args
->caller
,
692 args
->want_vaddr
, args
->coherent_flag
,
696 static void cma_allocator_free(struct arm_dma_free_args
*args
)
698 __free_from_contiguous(args
->dev
, args
->page
, args
->cpu_addr
,
699 args
->size
, args
->want_vaddr
);
702 static struct arm_dma_allocator cma_allocator
= {
703 .alloc
= cma_allocator_alloc
,
704 .free
= cma_allocator_free
,
707 static void *pool_allocator_alloc(struct arm_dma_alloc_args
*args
,
708 struct page
**ret_page
)
710 return __alloc_from_pool(args
->size
, ret_page
);
713 static void pool_allocator_free(struct arm_dma_free_args
*args
)
715 __free_from_pool(args
->cpu_addr
, args
->size
);
718 static struct arm_dma_allocator pool_allocator
= {
719 .alloc
= pool_allocator_alloc
,
720 .free
= pool_allocator_free
,
723 static void *remap_allocator_alloc(struct arm_dma_alloc_args
*args
,
724 struct page
**ret_page
)
726 return __alloc_remap_buffer(args
->dev
, args
->size
, args
->gfp
,
727 args
->prot
, ret_page
, args
->caller
,
731 static void remap_allocator_free(struct arm_dma_free_args
*args
)
733 if (args
->want_vaddr
)
734 __dma_free_remap(args
->cpu_addr
, args
->size
);
736 __dma_free_buffer(args
->page
, args
->size
);
739 static struct arm_dma_allocator remap_allocator
= {
740 .alloc
= remap_allocator_alloc
,
741 .free
= remap_allocator_free
,
744 static void *__dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
745 gfp_t gfp
, pgprot_t prot
, bool is_coherent
,
746 unsigned long attrs
, const void *caller
)
748 u64 mask
= get_coherent_dma_mask(dev
);
749 struct page
*page
= NULL
;
751 bool allowblock
, cma
;
752 struct arm_dma_buffer
*buf
;
753 struct arm_dma_alloc_args args
= {
755 .size
= PAGE_ALIGN(size
),
759 .want_vaddr
= ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) == 0),
760 .coherent_flag
= is_coherent
? COHERENT
: NORMAL
,
763 #ifdef CONFIG_DMA_API_DEBUG
764 u64 limit
= (mask
+ 1) & ~mask
;
765 if (limit
&& size
>= limit
) {
766 dev_warn(dev
, "coherent allocation too big (requested %#x mask %#llx)\n",
775 buf
= kzalloc(sizeof(*buf
),
776 gfp
& ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
));
780 if (mask
< 0xffffffffULL
)
784 * Following is a work-around (a.k.a. hack) to prevent pages
785 * with __GFP_COMP being passed to split_page() which cannot
786 * handle them. The real problem is that this flag probably
787 * should be 0 on ARM as it is not supported on this
788 * platform; see CONFIG_HUGETLBFS.
790 gfp
&= ~(__GFP_COMP
);
793 *handle
= ARM_MAPPING_ERROR
;
794 allowblock
= gfpflags_allow_blocking(gfp
);
795 cma
= allowblock
? dev_get_cma_area(dev
) : false;
798 buf
->allocator
= &cma_allocator
;
799 else if (is_coherent
)
800 buf
->allocator
= &simple_allocator
;
802 buf
->allocator
= &remap_allocator
;
804 buf
->allocator
= &pool_allocator
;
806 addr
= buf
->allocator
->alloc(&args
, &page
);
811 *handle
= pfn_to_dma(dev
, page_to_pfn(page
));
812 buf
->virt
= args
.want_vaddr
? addr
: page
;
814 spin_lock_irqsave(&arm_dma_bufs_lock
, flags
);
815 list_add(&buf
->list
, &arm_dma_bufs
);
816 spin_unlock_irqrestore(&arm_dma_bufs_lock
, flags
);
821 return args
.want_vaddr
? addr
: page
;
825 * Allocate DMA-coherent memory space and return both the kernel remapped
826 * virtual and bus address for that space.
828 void *arm_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
829 gfp_t gfp
, unsigned long attrs
)
831 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
833 return __dma_alloc(dev
, size
, handle
, gfp
, prot
, false,
834 attrs
, __builtin_return_address(0));
837 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
838 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
840 return __dma_alloc(dev
, size
, handle
, gfp
, PAGE_KERNEL
, true,
841 attrs
, __builtin_return_address(0));
844 static int __arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
845 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
849 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
850 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
851 unsigned long pfn
= dma_to_pfn(dev
, dma_addr
);
852 unsigned long off
= vma
->vm_pgoff
;
854 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
857 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
858 ret
= remap_pfn_range(vma
, vma
->vm_start
,
860 vma
->vm_end
- vma
->vm_start
,
868 * Create userspace mapping for the DMA-coherent memory.
870 static int arm_coherent_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
871 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
874 return __arm_dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
877 int arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
878 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
881 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
882 return __arm_dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
886 * Free a buffer as defined by the above mapping.
888 static void __arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
889 dma_addr_t handle
, unsigned long attrs
,
892 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
893 struct arm_dma_buffer
*buf
;
894 struct arm_dma_free_args args
= {
896 .size
= PAGE_ALIGN(size
),
897 .cpu_addr
= cpu_addr
,
899 .want_vaddr
= ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) == 0),
902 buf
= arm_dma_buffer_find(cpu_addr
);
903 if (WARN(!buf
, "Freeing invalid buffer %p\n", cpu_addr
))
906 buf
->allocator
->free(&args
);
910 void arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
911 dma_addr_t handle
, unsigned long attrs
)
913 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, false);
916 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
917 dma_addr_t handle
, unsigned long attrs
)
919 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, true);
923 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
924 * that the intention is to allow exporting memory allocated via the
925 * coherent DMA APIs through the dma_buf API, which only accepts a
926 * scattertable. This presents a couple of problems:
927 * 1. Not all memory allocated via the coherent DMA APIs is backed by
929 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
930 * as we will try to flush the memory through a different alias to that
931 * actually being used (and the flushes are redundant.)
933 int arm_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
934 void *cpu_addr
, dma_addr_t handle
, size_t size
,
937 unsigned long pfn
= dma_to_pfn(dev
, handle
);
941 /* If the PFN is not valid, we do not have a struct page */
945 page
= pfn_to_page(pfn
);
947 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
951 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
955 static void dma_cache_maint_page(struct page
*page
, unsigned long offset
,
956 size_t size
, enum dma_data_direction dir
,
957 void (*op
)(const void *, size_t, int))
962 pfn
= page_to_pfn(page
) + offset
/ PAGE_SIZE
;
966 * A single sg entry may refer to multiple physically contiguous
967 * pages. But we still need to process highmem pages individually.
968 * If highmem is not configured then the bulk of this loop gets
975 page
= pfn_to_page(pfn
);
977 if (PageHighMem(page
)) {
978 if (len
+ offset
> PAGE_SIZE
)
979 len
= PAGE_SIZE
- offset
;
981 if (cache_is_vipt_nonaliasing()) {
982 vaddr
= kmap_atomic(page
);
983 op(vaddr
+ offset
, len
, dir
);
984 kunmap_atomic(vaddr
);
986 vaddr
= kmap_high_get(page
);
988 op(vaddr
+ offset
, len
, dir
);
993 vaddr
= page_address(page
) + offset
;
1003 * Make an area consistent for devices.
1004 * Note: Drivers should NOT use this function directly, as it will break
1005 * platforms with CONFIG_DMABOUNCE.
1006 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
1008 static void __dma_page_cpu_to_dev(struct page
*page
, unsigned long off
,
1009 size_t size
, enum dma_data_direction dir
)
1013 dma_cache_maint_page(page
, off
, size
, dir
, dmac_map_area
);
1015 paddr
= page_to_phys(page
) + off
;
1016 if (dir
== DMA_FROM_DEVICE
) {
1017 outer_inv_range(paddr
, paddr
+ size
);
1019 outer_clean_range(paddr
, paddr
+ size
);
1021 /* FIXME: non-speculating: flush on bidirectional mappings? */
1024 static void __dma_page_dev_to_cpu(struct page
*page
, unsigned long off
,
1025 size_t size
, enum dma_data_direction dir
)
1027 phys_addr_t paddr
= page_to_phys(page
) + off
;
1029 /* FIXME: non-speculating: not required */
1030 /* in any case, don't bother invalidating if DMA to device */
1031 if (dir
!= DMA_TO_DEVICE
) {
1032 outer_inv_range(paddr
, paddr
+ size
);
1034 dma_cache_maint_page(page
, off
, size
, dir
, dmac_unmap_area
);
1038 * Mark the D-cache clean for these pages to avoid extra flushing.
1040 if (dir
!= DMA_TO_DEVICE
&& size
>= PAGE_SIZE
) {
1044 pfn
= page_to_pfn(page
) + off
/ PAGE_SIZE
;
1048 left
-= PAGE_SIZE
- off
;
1050 while (left
>= PAGE_SIZE
) {
1051 page
= pfn_to_page(pfn
++);
1052 set_bit(PG_dcache_clean
, &page
->flags
);
1059 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
1060 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1061 * @sg: list of buffers
1062 * @nents: number of buffers to map
1063 * @dir: DMA transfer direction
1065 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1066 * This is the scatter-gather version of the dma_map_single interface.
1067 * Here the scatter gather list elements are each tagged with the
1068 * appropriate dma address and length. They are obtained via
1069 * sg_dma_{address,length}.
1071 * Device ownership issues as mentioned for dma_map_single are the same
1074 int arm_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1075 enum dma_data_direction dir
, unsigned long attrs
)
1077 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
1078 struct scatterlist
*s
;
1081 for_each_sg(sg
, s
, nents
, i
) {
1082 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1083 s
->dma_length
= s
->length
;
1085 s
->dma_address
= ops
->map_page(dev
, sg_page(s
), s
->offset
,
1086 s
->length
, dir
, attrs
);
1087 if (dma_mapping_error(dev
, s
->dma_address
))
1093 for_each_sg(sg
, s
, i
, j
)
1094 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
1099 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1100 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1101 * @sg: list of buffers
1102 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1103 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1105 * Unmap a set of streaming mode DMA translations. Again, CPU access
1106 * rules concerning calls here are the same as for dma_unmap_single().
1108 void arm_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1109 enum dma_data_direction dir
, unsigned long attrs
)
1111 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
1112 struct scatterlist
*s
;
1116 for_each_sg(sg
, s
, nents
, i
)
1117 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
1121 * arm_dma_sync_sg_for_cpu
1122 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1123 * @sg: list of buffers
1124 * @nents: number of buffers to map (returned from dma_map_sg)
1125 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1127 void arm_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1128 int nents
, enum dma_data_direction dir
)
1130 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
1131 struct scatterlist
*s
;
1134 for_each_sg(sg
, s
, nents
, i
)
1135 ops
->sync_single_for_cpu(dev
, sg_dma_address(s
), s
->length
,
1140 * arm_dma_sync_sg_for_device
1141 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1142 * @sg: list of buffers
1143 * @nents: number of buffers to map (returned from dma_map_sg)
1144 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1146 void arm_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1147 int nents
, enum dma_data_direction dir
)
1149 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
1150 struct scatterlist
*s
;
1153 for_each_sg(sg
, s
, nents
, i
)
1154 ops
->sync_single_for_device(dev
, sg_dma_address(s
), s
->length
,
1159 * Return whether the given device DMA address mask can be supported
1160 * properly. For example, if your device can only drive the low 24-bits
1161 * during bus mastering, then you would pass 0x00ffffff as the mask
1164 int arm_dma_supported(struct device
*dev
, u64 mask
)
1166 return __dma_supported(dev
, mask
, false);
1169 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
1171 static int __init
dma_debug_do_init(void)
1173 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
1176 core_initcall(dma_debug_do_init
);
1178 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1180 static int __dma_info_to_prot(enum dma_data_direction dir
, unsigned long attrs
)
1184 if (attrs
& DMA_ATTR_PRIVILEGED
)
1188 case DMA_BIDIRECTIONAL
:
1189 return prot
| IOMMU_READ
| IOMMU_WRITE
;
1191 return prot
| IOMMU_READ
;
1192 case DMA_FROM_DEVICE
:
1193 return prot
| IOMMU_WRITE
;
1201 static int extend_iommu_mapping(struct dma_iommu_mapping
*mapping
);
1203 static inline dma_addr_t
__alloc_iova(struct dma_iommu_mapping
*mapping
,
1206 unsigned int order
= get_order(size
);
1207 unsigned int align
= 0;
1208 unsigned int count
, start
;
1209 size_t mapping_size
= mapping
->bits
<< PAGE_SHIFT
;
1210 unsigned long flags
;
1214 if (order
> CONFIG_ARM_DMA_IOMMU_ALIGNMENT
)
1215 order
= CONFIG_ARM_DMA_IOMMU_ALIGNMENT
;
1217 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1218 align
= (1 << order
) - 1;
1220 spin_lock_irqsave(&mapping
->lock
, flags
);
1221 for (i
= 0; i
< mapping
->nr_bitmaps
; i
++) {
1222 start
= bitmap_find_next_zero_area(mapping
->bitmaps
[i
],
1223 mapping
->bits
, 0, count
, align
);
1225 if (start
> mapping
->bits
)
1228 bitmap_set(mapping
->bitmaps
[i
], start
, count
);
1233 * No unused range found. Try to extend the existing mapping
1234 * and perform a second attempt to reserve an IO virtual
1235 * address range of size bytes.
1237 if (i
== mapping
->nr_bitmaps
) {
1238 if (extend_iommu_mapping(mapping
)) {
1239 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1240 return ARM_MAPPING_ERROR
;
1243 start
= bitmap_find_next_zero_area(mapping
->bitmaps
[i
],
1244 mapping
->bits
, 0, count
, align
);
1246 if (start
> mapping
->bits
) {
1247 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1248 return ARM_MAPPING_ERROR
;
1251 bitmap_set(mapping
->bitmaps
[i
], start
, count
);
1253 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1255 iova
= mapping
->base
+ (mapping_size
* i
);
1256 iova
+= start
<< PAGE_SHIFT
;
1261 static inline void __free_iova(struct dma_iommu_mapping
*mapping
,
1262 dma_addr_t addr
, size_t size
)
1264 unsigned int start
, count
;
1265 size_t mapping_size
= mapping
->bits
<< PAGE_SHIFT
;
1266 unsigned long flags
;
1267 dma_addr_t bitmap_base
;
1273 bitmap_index
= (u32
) (addr
- mapping
->base
) / (u32
) mapping_size
;
1274 BUG_ON(addr
< mapping
->base
|| bitmap_index
> mapping
->extensions
);
1276 bitmap_base
= mapping
->base
+ mapping_size
* bitmap_index
;
1278 start
= (addr
- bitmap_base
) >> PAGE_SHIFT
;
1280 if (addr
+ size
> bitmap_base
+ mapping_size
) {
1282 * The address range to be freed reaches into the iova
1283 * range of the next bitmap. This should not happen as
1284 * we don't allow this in __alloc_iova (at the
1289 count
= size
>> PAGE_SHIFT
;
1291 spin_lock_irqsave(&mapping
->lock
, flags
);
1292 bitmap_clear(mapping
->bitmaps
[bitmap_index
], start
, count
);
1293 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1296 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
1297 static const int iommu_order_array
[] = { 9, 8, 4, 0 };
1299 static struct page
**__iommu_alloc_buffer(struct device
*dev
, size_t size
,
1300 gfp_t gfp
, unsigned long attrs
,
1303 struct page
**pages
;
1304 int count
= size
>> PAGE_SHIFT
;
1305 int array_size
= count
* sizeof(struct page
*);
1309 if (array_size
<= PAGE_SIZE
)
1310 pages
= kzalloc(array_size
, GFP_KERNEL
);
1312 pages
= vzalloc(array_size
);
1316 if (attrs
& DMA_ATTR_FORCE_CONTIGUOUS
)
1318 unsigned long order
= get_order(size
);
1321 page
= dma_alloc_from_contiguous(dev
, count
, order
, gfp
);
1325 __dma_clear_buffer(page
, size
, coherent_flag
);
1327 for (i
= 0; i
< count
; i
++)
1328 pages
[i
] = page
+ i
;
1333 /* Go straight to 4K chunks if caller says it's OK. */
1334 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
1335 order_idx
= ARRAY_SIZE(iommu_order_array
) - 1;
1338 * IOMMU can map any pages, so himem can also be used here
1340 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
1345 order
= iommu_order_array
[order_idx
];
1347 /* Drop down when we get small */
1348 if (__fls(count
) < order
) {
1354 /* See if it's easy to allocate a high-order chunk */
1355 pages
[i
] = alloc_pages(gfp
| __GFP_NORETRY
, order
);
1357 /* Go down a notch at first sign of pressure */
1363 pages
[i
] = alloc_pages(gfp
, 0);
1369 split_page(pages
[i
], order
);
1372 pages
[i
+ j
] = pages
[i
] + j
;
1375 __dma_clear_buffer(pages
[i
], PAGE_SIZE
<< order
, coherent_flag
);
1377 count
-= 1 << order
;
1384 __free_pages(pages
[i
], 0);
1389 static int __iommu_free_buffer(struct device
*dev
, struct page
**pages
,
1390 size_t size
, unsigned long attrs
)
1392 int count
= size
>> PAGE_SHIFT
;
1395 if (attrs
& DMA_ATTR_FORCE_CONTIGUOUS
) {
1396 dma_release_from_contiguous(dev
, pages
[0], count
);
1398 for (i
= 0; i
< count
; i
++)
1400 __free_pages(pages
[i
], 0);
1408 * Create a CPU mapping for a specified pages
1411 __iommu_alloc_remap(struct page
**pages
, size_t size
, gfp_t gfp
, pgprot_t prot
,
1414 return dma_common_pages_remap(pages
, size
,
1415 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
, prot
, caller
);
1419 * Create a mapping in device IO address space for specified pages
1422 __iommu_create_mapping(struct device
*dev
, struct page
**pages
, size_t size
,
1423 unsigned long attrs
)
1425 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1426 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1427 dma_addr_t dma_addr
, iova
;
1430 dma_addr
= __alloc_iova(mapping
, size
);
1431 if (dma_addr
== ARM_MAPPING_ERROR
)
1435 for (i
= 0; i
< count
; ) {
1438 unsigned int next_pfn
= page_to_pfn(pages
[i
]) + 1;
1439 phys_addr_t phys
= page_to_phys(pages
[i
]);
1440 unsigned int len
, j
;
1442 for (j
= i
+ 1; j
< count
; j
++, next_pfn
++)
1443 if (page_to_pfn(pages
[j
]) != next_pfn
)
1446 len
= (j
- i
) << PAGE_SHIFT
;
1447 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
,
1448 __dma_info_to_prot(DMA_BIDIRECTIONAL
, attrs
));
1456 iommu_unmap(mapping
->domain
, dma_addr
, iova
-dma_addr
);
1457 __free_iova(mapping
, dma_addr
, size
);
1458 return ARM_MAPPING_ERROR
;
1461 static int __iommu_remove_mapping(struct device
*dev
, dma_addr_t iova
, size_t size
)
1463 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1466 * add optional in-page offset from iova to size and align
1467 * result to page size
1469 size
= PAGE_ALIGN((iova
& ~PAGE_MASK
) + size
);
1472 iommu_unmap(mapping
->domain
, iova
, size
);
1473 __free_iova(mapping
, iova
, size
);
1477 static struct page
**__atomic_get_pages(void *addr
)
1482 phys
= gen_pool_virt_to_phys(atomic_pool
, (unsigned long)addr
);
1483 page
= phys_to_page(phys
);
1485 return (struct page
**)page
;
1488 static struct page
**__iommu_get_pages(void *cpu_addr
, unsigned long attrs
)
1490 struct vm_struct
*area
;
1492 if (__in_atomic_pool(cpu_addr
, PAGE_SIZE
))
1493 return __atomic_get_pages(cpu_addr
);
1495 if (attrs
& DMA_ATTR_NO_KERNEL_MAPPING
)
1498 area
= find_vm_area(cpu_addr
);
1499 if (area
&& (area
->flags
& VM_ARM_DMA_CONSISTENT
))
1504 static void *__iommu_alloc_simple(struct device
*dev
, size_t size
, gfp_t gfp
,
1505 dma_addr_t
*handle
, int coherent_flag
,
1506 unsigned long attrs
)
1511 if (coherent_flag
== COHERENT
)
1512 addr
= __alloc_simple_buffer(dev
, size
, gfp
, &page
);
1514 addr
= __alloc_from_pool(size
, &page
);
1518 *handle
= __iommu_create_mapping(dev
, &page
, size
, attrs
);
1519 if (*handle
== ARM_MAPPING_ERROR
)
1525 __free_from_pool(addr
, size
);
1529 static void __iommu_free_atomic(struct device
*dev
, void *cpu_addr
,
1530 dma_addr_t handle
, size_t size
, int coherent_flag
)
1532 __iommu_remove_mapping(dev
, handle
, size
);
1533 if (coherent_flag
== COHERENT
)
1534 __dma_free_buffer(virt_to_page(cpu_addr
), size
);
1536 __free_from_pool(cpu_addr
, size
);
1539 static void *__arm_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1540 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
,
1543 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
1544 struct page
**pages
;
1547 *handle
= ARM_MAPPING_ERROR
;
1548 size
= PAGE_ALIGN(size
);
1550 if (coherent_flag
== COHERENT
|| !gfpflags_allow_blocking(gfp
))
1551 return __iommu_alloc_simple(dev
, size
, gfp
, handle
,
1552 coherent_flag
, attrs
);
1555 * Following is a work-around (a.k.a. hack) to prevent pages
1556 * with __GFP_COMP being passed to split_page() which cannot
1557 * handle them. The real problem is that this flag probably
1558 * should be 0 on ARM as it is not supported on this
1559 * platform; see CONFIG_HUGETLBFS.
1561 gfp
&= ~(__GFP_COMP
);
1563 pages
= __iommu_alloc_buffer(dev
, size
, gfp
, attrs
, coherent_flag
);
1567 *handle
= __iommu_create_mapping(dev
, pages
, size
, attrs
);
1568 if (*handle
== ARM_MAPPING_ERROR
)
1571 if (attrs
& DMA_ATTR_NO_KERNEL_MAPPING
)
1574 addr
= __iommu_alloc_remap(pages
, size
, gfp
, prot
,
1575 __builtin_return_address(0));
1582 __iommu_remove_mapping(dev
, *handle
, size
);
1584 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1588 static void *arm_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1589 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
1591 return __arm_iommu_alloc_attrs(dev
, size
, handle
, gfp
, attrs
, NORMAL
);
1594 static void *arm_coherent_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1595 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
1597 return __arm_iommu_alloc_attrs(dev
, size
, handle
, gfp
, attrs
, COHERENT
);
1600 static int __arm_iommu_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
1601 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1602 unsigned long attrs
)
1604 unsigned long uaddr
= vma
->vm_start
;
1605 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
1606 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1607 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1608 unsigned long off
= vma
->vm_pgoff
;
1613 if (off
>= nr_pages
|| (usize
>> PAGE_SHIFT
) > nr_pages
- off
)
1619 int ret
= vm_insert_page(vma
, uaddr
, *pages
++);
1621 pr_err("Remapping memory failed: %d\n", ret
);
1626 } while (usize
> 0);
1630 static int arm_iommu_mmap_attrs(struct device
*dev
,
1631 struct vm_area_struct
*vma
, void *cpu_addr
,
1632 dma_addr_t dma_addr
, size_t size
, unsigned long attrs
)
1634 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
1636 return __arm_iommu_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
1639 static int arm_coherent_iommu_mmap_attrs(struct device
*dev
,
1640 struct vm_area_struct
*vma
, void *cpu_addr
,
1641 dma_addr_t dma_addr
, size_t size
, unsigned long attrs
)
1643 return __arm_iommu_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
1647 * free a page as defined by the above mapping.
1648 * Must not be called with IRQs disabled.
1650 void __arm_iommu_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
1651 dma_addr_t handle
, unsigned long attrs
, int coherent_flag
)
1653 struct page
**pages
;
1654 size
= PAGE_ALIGN(size
);
1656 if (coherent_flag
== COHERENT
|| __in_atomic_pool(cpu_addr
, size
)) {
1657 __iommu_free_atomic(dev
, cpu_addr
, handle
, size
, coherent_flag
);
1661 pages
= __iommu_get_pages(cpu_addr
, attrs
);
1663 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr
);
1667 if ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) == 0) {
1668 dma_common_free_remap(cpu_addr
, size
,
1669 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
);
1672 __iommu_remove_mapping(dev
, handle
, size
);
1673 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1676 void arm_iommu_free_attrs(struct device
*dev
, size_t size
,
1677 void *cpu_addr
, dma_addr_t handle
, unsigned long attrs
)
1679 __arm_iommu_free_attrs(dev
, size
, cpu_addr
, handle
, attrs
, NORMAL
);
1682 void arm_coherent_iommu_free_attrs(struct device
*dev
, size_t size
,
1683 void *cpu_addr
, dma_addr_t handle
, unsigned long attrs
)
1685 __arm_iommu_free_attrs(dev
, size
, cpu_addr
, handle
, attrs
, COHERENT
);
1688 static int arm_iommu_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1689 void *cpu_addr
, dma_addr_t dma_addr
,
1690 size_t size
, unsigned long attrs
)
1692 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1693 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1698 return sg_alloc_table_from_pages(sgt
, pages
, count
, 0, size
,
1703 * Map a part of the scatter-gather list into contiguous io address space
1705 static int __map_sg_chunk(struct device
*dev
, struct scatterlist
*sg
,
1706 size_t size
, dma_addr_t
*handle
,
1707 enum dma_data_direction dir
, unsigned long attrs
,
1710 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1711 dma_addr_t iova
, iova_base
;
1714 struct scatterlist
*s
;
1717 size
= PAGE_ALIGN(size
);
1718 *handle
= ARM_MAPPING_ERROR
;
1720 iova_base
= iova
= __alloc_iova(mapping
, size
);
1721 if (iova
== ARM_MAPPING_ERROR
)
1724 for (count
= 0, s
= sg
; count
< (size
>> PAGE_SHIFT
); s
= sg_next(s
)) {
1725 phys_addr_t phys
= page_to_phys(sg_page(s
));
1726 unsigned int len
= PAGE_ALIGN(s
->offset
+ s
->length
);
1728 if (!is_coherent
&& (attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
1729 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1731 prot
= __dma_info_to_prot(dir
, attrs
);
1733 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
, prot
);
1736 count
+= len
>> PAGE_SHIFT
;
1739 *handle
= iova_base
;
1743 iommu_unmap(mapping
->domain
, iova_base
, count
* PAGE_SIZE
);
1744 __free_iova(mapping
, iova_base
, size
);
1748 static int __iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1749 enum dma_data_direction dir
, unsigned long attrs
,
1752 struct scatterlist
*s
= sg
, *dma
= sg
, *start
= sg
;
1754 unsigned int offset
= s
->offset
;
1755 unsigned int size
= s
->offset
+ s
->length
;
1756 unsigned int max
= dma_get_max_seg_size(dev
);
1758 for (i
= 1; i
< nents
; i
++) {
1761 s
->dma_address
= ARM_MAPPING_ERROR
;
1764 if (s
->offset
|| (size
& ~PAGE_MASK
) || size
+ s
->length
> max
) {
1765 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
,
1766 dir
, attrs
, is_coherent
) < 0)
1769 dma
->dma_address
+= offset
;
1770 dma
->dma_length
= size
- offset
;
1772 size
= offset
= s
->offset
;
1779 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
, dir
, attrs
,
1783 dma
->dma_address
+= offset
;
1784 dma
->dma_length
= size
- offset
;
1789 for_each_sg(sg
, s
, count
, i
)
1790 __iommu_remove_mapping(dev
, sg_dma_address(s
), sg_dma_len(s
));
1795 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1796 * @dev: valid struct device pointer
1797 * @sg: list of buffers
1798 * @nents: number of buffers to map
1799 * @dir: DMA transfer direction
1801 * Map a set of i/o coherent buffers described by scatterlist in streaming
1802 * mode for DMA. The scatter gather list elements are merged together (if
1803 * possible) and tagged with the appropriate dma address and length. They are
1804 * obtained via sg_dma_{address,length}.
1806 int arm_coherent_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1807 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
1809 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, true);
1813 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1814 * @dev: valid struct device pointer
1815 * @sg: list of buffers
1816 * @nents: number of buffers to map
1817 * @dir: DMA transfer direction
1819 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1820 * The scatter gather list elements are merged together (if possible) and
1821 * tagged with the appropriate dma address and length. They are obtained via
1822 * sg_dma_{address,length}.
1824 int arm_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1825 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
1827 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, false);
1830 static void __iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1831 int nents
, enum dma_data_direction dir
,
1832 unsigned long attrs
, bool is_coherent
)
1834 struct scatterlist
*s
;
1837 for_each_sg(sg
, s
, nents
, i
) {
1839 __iommu_remove_mapping(dev
, sg_dma_address(s
),
1841 if (!is_coherent
&& (attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
1842 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
,
1848 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1849 * @dev: valid struct device pointer
1850 * @sg: list of buffers
1851 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1852 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1854 * Unmap a set of streaming mode DMA translations. Again, CPU access
1855 * rules concerning calls here are the same as for dma_unmap_single().
1857 void arm_coherent_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1858 int nents
, enum dma_data_direction dir
,
1859 unsigned long attrs
)
1861 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, true);
1865 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1866 * @dev: valid struct device pointer
1867 * @sg: list of buffers
1868 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1869 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1871 * Unmap a set of streaming mode DMA translations. Again, CPU access
1872 * rules concerning calls here are the same as for dma_unmap_single().
1874 void arm_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1875 enum dma_data_direction dir
,
1876 unsigned long attrs
)
1878 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, false);
1882 * arm_iommu_sync_sg_for_cpu
1883 * @dev: valid struct device pointer
1884 * @sg: list of buffers
1885 * @nents: number of buffers to map (returned from dma_map_sg)
1886 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1888 void arm_iommu_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1889 int nents
, enum dma_data_direction dir
)
1891 struct scatterlist
*s
;
1894 for_each_sg(sg
, s
, nents
, i
)
1895 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
, s
->length
, dir
);
1900 * arm_iommu_sync_sg_for_device
1901 * @dev: valid struct device pointer
1902 * @sg: list of buffers
1903 * @nents: number of buffers to map (returned from dma_map_sg)
1904 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1906 void arm_iommu_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1907 int nents
, enum dma_data_direction dir
)
1909 struct scatterlist
*s
;
1912 for_each_sg(sg
, s
, nents
, i
)
1913 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1918 * arm_coherent_iommu_map_page
1919 * @dev: valid struct device pointer
1920 * @page: page that buffer resides in
1921 * @offset: offset into page for start of buffer
1922 * @size: size of buffer to map
1923 * @dir: DMA transfer direction
1925 * Coherent IOMMU aware version of arm_dma_map_page()
1927 static dma_addr_t
arm_coherent_iommu_map_page(struct device
*dev
, struct page
*page
,
1928 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1929 unsigned long attrs
)
1931 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1932 dma_addr_t dma_addr
;
1933 int ret
, prot
, len
= PAGE_ALIGN(size
+ offset
);
1935 dma_addr
= __alloc_iova(mapping
, len
);
1936 if (dma_addr
== ARM_MAPPING_ERROR
)
1939 prot
= __dma_info_to_prot(dir
, attrs
);
1941 ret
= iommu_map(mapping
->domain
, dma_addr
, page_to_phys(page
), len
, prot
);
1945 return dma_addr
+ offset
;
1947 __free_iova(mapping
, dma_addr
, len
);
1948 return ARM_MAPPING_ERROR
;
1952 * arm_iommu_map_page
1953 * @dev: valid struct device pointer
1954 * @page: page that buffer resides in
1955 * @offset: offset into page for start of buffer
1956 * @size: size of buffer to map
1957 * @dir: DMA transfer direction
1959 * IOMMU aware version of arm_dma_map_page()
1961 static dma_addr_t
arm_iommu_map_page(struct device
*dev
, struct page
*page
,
1962 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1963 unsigned long attrs
)
1965 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
1966 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1968 return arm_coherent_iommu_map_page(dev
, page
, offset
, size
, dir
, attrs
);
1972 * arm_coherent_iommu_unmap_page
1973 * @dev: valid struct device pointer
1974 * @handle: DMA address of buffer
1975 * @size: size of buffer (same as passed to dma_map_page)
1976 * @dir: DMA transfer direction (same as passed to dma_map_page)
1978 * Coherent IOMMU aware version of arm_dma_unmap_page()
1980 static void arm_coherent_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1981 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
1983 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1984 dma_addr_t iova
= handle
& PAGE_MASK
;
1985 int offset
= handle
& ~PAGE_MASK
;
1986 int len
= PAGE_ALIGN(size
+ offset
);
1991 iommu_unmap(mapping
->domain
, iova
, len
);
1992 __free_iova(mapping
, iova
, len
);
1996 * arm_iommu_unmap_page
1997 * @dev: valid struct device pointer
1998 * @handle: DMA address of buffer
1999 * @size: size of buffer (same as passed to dma_map_page)
2000 * @dir: DMA transfer direction (same as passed to dma_map_page)
2002 * IOMMU aware version of arm_dma_unmap_page()
2004 static void arm_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
2005 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
2007 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2008 dma_addr_t iova
= handle
& PAGE_MASK
;
2009 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
2010 int offset
= handle
& ~PAGE_MASK
;
2011 int len
= PAGE_ALIGN(size
+ offset
);
2016 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
2017 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
2019 iommu_unmap(mapping
->domain
, iova
, len
);
2020 __free_iova(mapping
, iova
, len
);
2024 * arm_iommu_map_resource - map a device resource for DMA
2025 * @dev: valid struct device pointer
2026 * @phys_addr: physical address of resource
2027 * @size: size of resource to map
2028 * @dir: DMA transfer direction
2030 static dma_addr_t
arm_iommu_map_resource(struct device
*dev
,
2031 phys_addr_t phys_addr
, size_t size
,
2032 enum dma_data_direction dir
, unsigned long attrs
)
2034 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2035 dma_addr_t dma_addr
;
2037 phys_addr_t addr
= phys_addr
& PAGE_MASK
;
2038 unsigned int offset
= phys_addr
& ~PAGE_MASK
;
2039 size_t len
= PAGE_ALIGN(size
+ offset
);
2041 dma_addr
= __alloc_iova(mapping
, len
);
2042 if (dma_addr
== ARM_MAPPING_ERROR
)
2045 prot
= __dma_info_to_prot(dir
, attrs
) | IOMMU_MMIO
;
2047 ret
= iommu_map(mapping
->domain
, dma_addr
, addr
, len
, prot
);
2051 return dma_addr
+ offset
;
2053 __free_iova(mapping
, dma_addr
, len
);
2054 return ARM_MAPPING_ERROR
;
2058 * arm_iommu_unmap_resource - unmap a device DMA resource
2059 * @dev: valid struct device pointer
2060 * @dma_handle: DMA address to resource
2061 * @size: size of resource to map
2062 * @dir: DMA transfer direction
2064 static void arm_iommu_unmap_resource(struct device
*dev
, dma_addr_t dma_handle
,
2065 size_t size
, enum dma_data_direction dir
,
2066 unsigned long attrs
)
2068 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2069 dma_addr_t iova
= dma_handle
& PAGE_MASK
;
2070 unsigned int offset
= dma_handle
& ~PAGE_MASK
;
2071 size_t len
= PAGE_ALIGN(size
+ offset
);
2076 iommu_unmap(mapping
->domain
, iova
, len
);
2077 __free_iova(mapping
, iova
, len
);
2080 static void arm_iommu_sync_single_for_cpu(struct device
*dev
,
2081 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
2083 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2084 dma_addr_t iova
= handle
& PAGE_MASK
;
2085 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
2086 unsigned int offset
= handle
& ~PAGE_MASK
;
2091 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
2094 static void arm_iommu_sync_single_for_device(struct device
*dev
,
2095 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
2097 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2098 dma_addr_t iova
= handle
& PAGE_MASK
;
2099 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
2100 unsigned int offset
= handle
& ~PAGE_MASK
;
2105 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
2108 const struct dma_map_ops iommu_ops
= {
2109 .alloc
= arm_iommu_alloc_attrs
,
2110 .free
= arm_iommu_free_attrs
,
2111 .mmap
= arm_iommu_mmap_attrs
,
2112 .get_sgtable
= arm_iommu_get_sgtable
,
2114 .map_page
= arm_iommu_map_page
,
2115 .unmap_page
= arm_iommu_unmap_page
,
2116 .sync_single_for_cpu
= arm_iommu_sync_single_for_cpu
,
2117 .sync_single_for_device
= arm_iommu_sync_single_for_device
,
2119 .map_sg
= arm_iommu_map_sg
,
2120 .unmap_sg
= arm_iommu_unmap_sg
,
2121 .sync_sg_for_cpu
= arm_iommu_sync_sg_for_cpu
,
2122 .sync_sg_for_device
= arm_iommu_sync_sg_for_device
,
2124 .map_resource
= arm_iommu_map_resource
,
2125 .unmap_resource
= arm_iommu_unmap_resource
,
2127 .mapping_error
= arm_dma_mapping_error
,
2128 .dma_supported
= arm_dma_supported
,
2131 const struct dma_map_ops iommu_coherent_ops
= {
2132 .alloc
= arm_coherent_iommu_alloc_attrs
,
2133 .free
= arm_coherent_iommu_free_attrs
,
2134 .mmap
= arm_coherent_iommu_mmap_attrs
,
2135 .get_sgtable
= arm_iommu_get_sgtable
,
2137 .map_page
= arm_coherent_iommu_map_page
,
2138 .unmap_page
= arm_coherent_iommu_unmap_page
,
2140 .map_sg
= arm_coherent_iommu_map_sg
,
2141 .unmap_sg
= arm_coherent_iommu_unmap_sg
,
2143 .map_resource
= arm_iommu_map_resource
,
2144 .unmap_resource
= arm_iommu_unmap_resource
,
2146 .mapping_error
= arm_dma_mapping_error
,
2147 .dma_supported
= arm_dma_supported
,
2151 * arm_iommu_create_mapping
2152 * @bus: pointer to the bus holding the client device (for IOMMU calls)
2153 * @base: start address of the valid IO address space
2154 * @size: maximum size of the valid IO address space
2156 * Creates a mapping structure which holds information about used/unused
2157 * IO address ranges, which is required to perform memory allocation and
2158 * mapping with IOMMU aware functions.
2160 * The client device need to be attached to the mapping with
2161 * arm_iommu_attach_device function.
2163 struct dma_iommu_mapping
*
2164 arm_iommu_create_mapping(struct bus_type
*bus
, dma_addr_t base
, u64 size
)
2166 unsigned int bits
= size
>> PAGE_SHIFT
;
2167 unsigned int bitmap_size
= BITS_TO_LONGS(bits
) * sizeof(long);
2168 struct dma_iommu_mapping
*mapping
;
2172 /* currently only 32-bit DMA address space is supported */
2173 if (size
> DMA_BIT_MASK(32) + 1)
2174 return ERR_PTR(-ERANGE
);
2177 return ERR_PTR(-EINVAL
);
2179 if (bitmap_size
> PAGE_SIZE
) {
2180 extensions
= bitmap_size
/ PAGE_SIZE
;
2181 bitmap_size
= PAGE_SIZE
;
2184 mapping
= kzalloc(sizeof(struct dma_iommu_mapping
), GFP_KERNEL
);
2188 mapping
->bitmap_size
= bitmap_size
;
2189 mapping
->bitmaps
= kzalloc(extensions
* sizeof(unsigned long *),
2191 if (!mapping
->bitmaps
)
2194 mapping
->bitmaps
[0] = kzalloc(bitmap_size
, GFP_KERNEL
);
2195 if (!mapping
->bitmaps
[0])
2198 mapping
->nr_bitmaps
= 1;
2199 mapping
->extensions
= extensions
;
2200 mapping
->base
= base
;
2201 mapping
->bits
= BITS_PER_BYTE
* bitmap_size
;
2203 spin_lock_init(&mapping
->lock
);
2205 mapping
->domain
= iommu_domain_alloc(bus
);
2206 if (!mapping
->domain
)
2209 kref_init(&mapping
->kref
);
2212 kfree(mapping
->bitmaps
[0]);
2214 kfree(mapping
->bitmaps
);
2218 return ERR_PTR(err
);
2220 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping
);
2222 static void release_iommu_mapping(struct kref
*kref
)
2225 struct dma_iommu_mapping
*mapping
=
2226 container_of(kref
, struct dma_iommu_mapping
, kref
);
2228 iommu_domain_free(mapping
->domain
);
2229 for (i
= 0; i
< mapping
->nr_bitmaps
; i
++)
2230 kfree(mapping
->bitmaps
[i
]);
2231 kfree(mapping
->bitmaps
);
2235 static int extend_iommu_mapping(struct dma_iommu_mapping
*mapping
)
2239 if (mapping
->nr_bitmaps
>= mapping
->extensions
)
2242 next_bitmap
= mapping
->nr_bitmaps
;
2243 mapping
->bitmaps
[next_bitmap
] = kzalloc(mapping
->bitmap_size
,
2245 if (!mapping
->bitmaps
[next_bitmap
])
2248 mapping
->nr_bitmaps
++;
2253 void arm_iommu_release_mapping(struct dma_iommu_mapping
*mapping
)
2256 kref_put(&mapping
->kref
, release_iommu_mapping
);
2258 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping
);
2260 static int __arm_iommu_attach_device(struct device
*dev
,
2261 struct dma_iommu_mapping
*mapping
)
2265 err
= iommu_attach_device(mapping
->domain
, dev
);
2269 kref_get(&mapping
->kref
);
2270 to_dma_iommu_mapping(dev
) = mapping
;
2272 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev
));
2277 * arm_iommu_attach_device
2278 * @dev: valid struct device pointer
2279 * @mapping: io address space mapping structure (returned from
2280 * arm_iommu_create_mapping)
2282 * Attaches specified io address space mapping to the provided device.
2283 * This replaces the dma operations (dma_map_ops pointer) with the
2284 * IOMMU aware version.
2286 * More than one client might be attached to the same io address space
2289 int arm_iommu_attach_device(struct device
*dev
,
2290 struct dma_iommu_mapping
*mapping
)
2294 err
= __arm_iommu_attach_device(dev
, mapping
);
2298 set_dma_ops(dev
, &iommu_ops
);
2301 EXPORT_SYMBOL_GPL(arm_iommu_attach_device
);
2304 * arm_iommu_detach_device
2305 * @dev: valid struct device pointer
2307 * Detaches the provided device from a previously attached map.
2308 * This voids the dma operations (dma_map_ops pointer)
2310 void arm_iommu_detach_device(struct device
*dev
)
2312 struct dma_iommu_mapping
*mapping
;
2314 mapping
= to_dma_iommu_mapping(dev
);
2316 dev_warn(dev
, "Not attached\n");
2320 iommu_detach_device(mapping
->domain
, dev
);
2321 kref_put(&mapping
->kref
, release_iommu_mapping
);
2322 to_dma_iommu_mapping(dev
) = NULL
;
2323 set_dma_ops(dev
, NULL
);
2325 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev
));
2327 EXPORT_SYMBOL_GPL(arm_iommu_detach_device
);
2329 static const struct dma_map_ops
*arm_get_iommu_dma_map_ops(bool coherent
)
2331 return coherent
? &iommu_coherent_ops
: &iommu_ops
;
2334 static bool arm_setup_iommu_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2335 const struct iommu_ops
*iommu
)
2337 struct dma_iommu_mapping
*mapping
;
2342 mapping
= arm_iommu_create_mapping(dev
->bus
, dma_base
, size
);
2343 if (IS_ERR(mapping
)) {
2344 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2345 size
, dev_name(dev
));
2349 if (__arm_iommu_attach_device(dev
, mapping
)) {
2350 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2352 arm_iommu_release_mapping(mapping
);
2359 static void arm_teardown_iommu_dma_ops(struct device
*dev
)
2361 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2366 arm_iommu_detach_device(dev
);
2367 arm_iommu_release_mapping(mapping
);
2372 static bool arm_setup_iommu_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2373 const struct iommu_ops
*iommu
)
2378 static void arm_teardown_iommu_dma_ops(struct device
*dev
) { }
2380 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2382 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
2384 static const struct dma_map_ops
*arm_get_dma_map_ops(bool coherent
)
2386 return coherent
? &arm_coherent_dma_ops
: &arm_dma_ops
;
2389 void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2390 const struct iommu_ops
*iommu
, bool coherent
)
2392 const struct dma_map_ops
*dma_ops
;
2394 dev
->archdata
.dma_coherent
= coherent
;
2397 * Don't override the dma_ops if they have already been set. Ideally
2398 * this should be the only location where dma_ops are set, remove this
2399 * check when all other callers of set_dma_ops will have disappeared.
2404 if (arm_setup_iommu_dma_ops(dev
, dma_base
, size
, iommu
))
2405 dma_ops
= arm_get_iommu_dma_map_ops(coherent
);
2407 dma_ops
= arm_get_dma_map_ops(coherent
);
2409 set_dma_ops(dev
, dma_ops
);
2412 if (xen_initial_domain()) {
2413 dev
->archdata
.dev_dma_ops
= dev
->dma_ops
;
2414 dev
->dma_ops
= xen_dma_ops
;
2417 dev
->archdata
.dma_ops_setup
= true;
2420 void arch_teardown_dma_ops(struct device
*dev
)
2422 if (!dev
->archdata
.dma_ops_setup
)
2425 arm_teardown_iommu_dma_ops(dev
);