]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm/mm/dma-mapping.c
arm: dma-mapping: add {map,unmap}_resource for iommu ops
[mirror_ubuntu-bionic-kernel.git] / arch / arm / mm / dma-mapping.c
CommitLineData
1da177e4 1/*
0ddbccd1 2 * linux/arch/arm/mm/dma-mapping.c
1da177e4
LT
3 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
11a5aa32 12#include <linux/bootmem.h>
1da177e4
LT
13#include <linux/module.h>
14#include <linux/mm.h>
36d0fd21 15#include <linux/genalloc.h>
5a0e3ad6 16#include <linux/gfp.h>
1da177e4
LT
17#include <linux/errno.h>
18#include <linux/list.h>
19#include <linux/init.h>
20#include <linux/device.h>
21#include <linux/dma-mapping.h>
c7909509 22#include <linux/dma-contiguous.h>
39af22a7 23#include <linux/highmem.h>
c7909509 24#include <linux/memblock.h>
99d1717d 25#include <linux/slab.h>
4ce63fcd 26#include <linux/iommu.h>
e9da6e99 27#include <linux/io.h>
4ce63fcd 28#include <linux/vmalloc.h>
158e8bfe 29#include <linux/sizes.h>
a254129e 30#include <linux/cma.h>
1da177e4 31
23759dc6 32#include <asm/memory.h>
43377453 33#include <asm/highmem.h>
1da177e4 34#include <asm/cacheflush.h>
1da177e4 35#include <asm/tlbflush.h>
99d1717d 36#include <asm/mach/arch.h>
4ce63fcd 37#include <asm/dma-iommu.h>
c7909509
MS
38#include <asm/mach/map.h>
39#include <asm/system_info.h>
40#include <asm/dma-contiguous.h>
37134cd5 41
1234e3fd 42#include "dma.h"
022ae537
RK
43#include "mm.h"
44
b4268676
RV
45struct arm_dma_alloc_args {
46 struct device *dev;
47 size_t size;
48 gfp_t gfp;
49 pgprot_t prot;
50 const void *caller;
51 bool want_vaddr;
f1270896 52 int coherent_flag;
b4268676
RV
53};
54
55struct arm_dma_free_args {
56 struct device *dev;
57 size_t size;
58 void *cpu_addr;
59 struct page *page;
60 bool want_vaddr;
61};
62
f1270896
GC
63#define NORMAL 0
64#define COHERENT 1
65
b4268676
RV
66struct arm_dma_allocator {
67 void *(*alloc)(struct arm_dma_alloc_args *args,
68 struct page **ret_page);
69 void (*free)(struct arm_dma_free_args *args);
70};
71
19e6e5e5
RV
72struct arm_dma_buffer {
73 struct list_head list;
74 void *virt;
b4268676 75 struct arm_dma_allocator *allocator;
19e6e5e5
RV
76};
77
78static LIST_HEAD(arm_dma_bufs);
79static DEFINE_SPINLOCK(arm_dma_bufs_lock);
80
81static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
82{
83 struct arm_dma_buffer *buf, *found = NULL;
84 unsigned long flags;
85
86 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
87 list_for_each_entry(buf, &arm_dma_bufs, list) {
88 if (buf->virt == virt) {
89 list_del(&buf->list);
90 found = buf;
91 break;
92 }
93 }
94 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
95 return found;
96}
97
15237e1f
MS
98/*
99 * The DMA API is built upon the notion of "buffer ownership". A buffer
100 * is either exclusively owned by the CPU (and therefore may be accessed
101 * by it) or exclusively owned by the DMA device. These helper functions
102 * represent the transitions between these two ownership states.
103 *
104 * Note, however, that on later ARMs, this notion does not work due to
105 * speculative prefetches. We model our approach on the assumption that
106 * the CPU does do speculative prefetches, which means we clean caches
107 * before transfers and delay cache invalidation until transfer completion.
108 *
15237e1f 109 */
51fde349 110static void __dma_page_cpu_to_dev(struct page *, unsigned long,
15237e1f 111 size_t, enum dma_data_direction);
51fde349 112static void __dma_page_dev_to_cpu(struct page *, unsigned long,
15237e1f
MS
113 size_t, enum dma_data_direction);
114
2dc6a016
MS
115/**
116 * arm_dma_map_page - map a portion of a page for streaming DMA
117 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
118 * @page: page that buffer resides in
119 * @offset: offset into page for start of buffer
120 * @size: size of buffer to map
121 * @dir: DMA transfer direction
122 *
123 * Ensure that any data held in the cache is appropriately discarded
124 * or written back.
125 *
126 * The device owns this memory once this call has completed. The CPU
127 * can regain ownership by calling dma_unmap_page().
128 */
51fde349 129static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
2dc6a016 130 unsigned long offset, size_t size, enum dma_data_direction dir,
00085f1e 131 unsigned long attrs)
2dc6a016 132{
00085f1e 133 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
51fde349
MS
134 __dma_page_cpu_to_dev(page, offset, size, dir);
135 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
2dc6a016
MS
136}
137
dd37e940
RH
138static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
139 unsigned long offset, size_t size, enum dma_data_direction dir,
00085f1e 140 unsigned long attrs)
dd37e940
RH
141{
142 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
143}
144
2dc6a016
MS
145/**
146 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
147 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
148 * @handle: DMA address of buffer
149 * @size: size of buffer (same as passed to dma_map_page)
150 * @dir: DMA transfer direction (same as passed to dma_map_page)
151 *
152 * Unmap a page streaming mode DMA translation. The handle and size
153 * must match what was provided in the previous dma_map_page() call.
154 * All other usages are undefined.
155 *
156 * After this call, reads by the CPU to the buffer are guaranteed to see
157 * whatever the device wrote there.
158 */
51fde349 159static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
00085f1e 160 size_t size, enum dma_data_direction dir, unsigned long attrs)
2dc6a016 161{
00085f1e 162 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
51fde349
MS
163 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
164 handle & ~PAGE_MASK, size, dir);
2dc6a016
MS
165}
166
51fde349 167static void arm_dma_sync_single_for_cpu(struct device *dev,
2dc6a016
MS
168 dma_addr_t handle, size_t size, enum dma_data_direction dir)
169{
170 unsigned int offset = handle & (PAGE_SIZE - 1);
171 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
dd37e940 172 __dma_page_dev_to_cpu(page, offset, size, dir);
2dc6a016
MS
173}
174
51fde349 175static void arm_dma_sync_single_for_device(struct device *dev,
2dc6a016
MS
176 dma_addr_t handle, size_t size, enum dma_data_direction dir)
177{
178 unsigned int offset = handle & (PAGE_SIZE - 1);
179 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
dd37e940 180 __dma_page_cpu_to_dev(page, offset, size, dir);
2dc6a016
MS
181}
182
2dc6a016 183struct dma_map_ops arm_dma_ops = {
f99d6034
MS
184 .alloc = arm_dma_alloc,
185 .free = arm_dma_free,
186 .mmap = arm_dma_mmap,
dc2832e1 187 .get_sgtable = arm_dma_get_sgtable,
2dc6a016
MS
188 .map_page = arm_dma_map_page,
189 .unmap_page = arm_dma_unmap_page,
190 .map_sg = arm_dma_map_sg,
191 .unmap_sg = arm_dma_unmap_sg,
192 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
193 .sync_single_for_device = arm_dma_sync_single_for_device,
194 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
195 .sync_sg_for_device = arm_dma_sync_sg_for_device,
2dc6a016
MS
196};
197EXPORT_SYMBOL(arm_dma_ops);
198
dd37e940 199static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
00085f1e 200 dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
dd37e940 201static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
00085f1e 202 dma_addr_t handle, unsigned long attrs);
55af8a91
ML
203static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
204 void *cpu_addr, dma_addr_t dma_addr, size_t size,
00085f1e 205 unsigned long attrs);
dd37e940
RH
206
207struct dma_map_ops arm_coherent_dma_ops = {
208 .alloc = arm_coherent_dma_alloc,
209 .free = arm_coherent_dma_free,
55af8a91 210 .mmap = arm_coherent_dma_mmap,
dd37e940
RH
211 .get_sgtable = arm_dma_get_sgtable,
212 .map_page = arm_coherent_dma_map_page,
213 .map_sg = arm_dma_map_sg,
dd37e940
RH
214};
215EXPORT_SYMBOL(arm_coherent_dma_ops);
216
9f28cde0
RK
217static int __dma_supported(struct device *dev, u64 mask, bool warn)
218{
219 unsigned long max_dma_pfn;
220
221 /*
222 * If the mask allows for more memory than we can address,
223 * and we actually have that much memory, then we must
224 * indicate that DMA to this device is not supported.
225 */
226 if (sizeof(mask) != sizeof(dma_addr_t) &&
227 mask > (dma_addr_t)~0 &&
8bf1268f 228 dma_to_pfn(dev, ~0) < max_pfn - 1) {
9f28cde0
RK
229 if (warn) {
230 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
231 mask);
232 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
233 }
234 return 0;
235 }
236
237 max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
238
239 /*
240 * Translate the device's DMA mask to a PFN limit. This
241 * PFN number includes the page which we can DMA to.
242 */
243 if (dma_to_pfn(dev, mask) < max_dma_pfn) {
244 if (warn)
245 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
246 mask,
247 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
248 max_dma_pfn + 1);
249 return 0;
250 }
251
252 return 1;
253}
254
ab6494f0
CM
255static u64 get_coherent_dma_mask(struct device *dev)
256{
4dcfa600 257 u64 mask = (u64)DMA_BIT_MASK(32);
ab6494f0
CM
258
259 if (dev) {
260 mask = dev->coherent_dma_mask;
261
262 /*
263 * Sanity check the DMA mask - it must be non-zero, and
264 * must be able to be satisfied by a DMA allocation.
265 */
266 if (mask == 0) {
267 dev_warn(dev, "coherent DMA mask is unset\n");
268 return 0;
269 }
270
9f28cde0 271 if (!__dma_supported(dev, mask, true))
ab6494f0 272 return 0;
ab6494f0 273 }
1da177e4 274
ab6494f0
CM
275 return mask;
276}
277
f1270896 278static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
c7909509 279{
c7909509
MS
280 /*
281 * Ensure that the allocated pages are zeroed, and that any data
282 * lurking in the kernel direct-mapped region is invalidated.
283 */
9848e48f
MS
284 if (PageHighMem(page)) {
285 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
286 phys_addr_t end = base + size;
287 while (size > 0) {
288 void *ptr = kmap_atomic(page);
289 memset(ptr, 0, PAGE_SIZE);
f1270896
GC
290 if (coherent_flag != COHERENT)
291 dmac_flush_range(ptr, ptr + PAGE_SIZE);
9848e48f
MS
292 kunmap_atomic(ptr);
293 page++;
294 size -= PAGE_SIZE;
295 }
f1270896
GC
296 if (coherent_flag != COHERENT)
297 outer_flush_range(base, end);
9848e48f
MS
298 } else {
299 void *ptr = page_address(page);
4ce63fcd 300 memset(ptr, 0, size);
f1270896
GC
301 if (coherent_flag != COHERENT) {
302 dmac_flush_range(ptr, ptr + size);
303 outer_flush_range(__pa(ptr), __pa(ptr) + size);
304 }
4ce63fcd 305 }
c7909509
MS
306}
307
7a9a32a9
RK
308/*
309 * Allocate a DMA buffer for 'dev' of size 'size' using the
310 * specified gfp mask. Note that 'size' must be page aligned.
311 */
f1270896
GC
312static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
313 gfp_t gfp, int coherent_flag)
7a9a32a9
RK
314{
315 unsigned long order = get_order(size);
316 struct page *page, *p, *e;
7a9a32a9
RK
317
318 page = alloc_pages(gfp, order);
319 if (!page)
320 return NULL;
321
322 /*
323 * Now split the huge page and free the excess pages
324 */
325 split_page(page, order);
326 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
327 __free_page(p);
328
f1270896 329 __dma_clear_buffer(page, size, coherent_flag);
7a9a32a9
RK
330
331 return page;
332}
333
334/*
335 * Free a DMA buffer. 'size' must be page aligned.
336 */
337static void __dma_free_buffer(struct page *page, size_t size)
338{
339 struct page *e = page + (size >> PAGE_SHIFT);
340
341 while (page < e) {
342 __free_page(page);
343 page++;
344 }
345}
346
ab6494f0 347#ifdef CONFIG_MMU
a5e9d38b 348
e9da6e99 349static void *__alloc_from_contiguous(struct device *dev, size_t size,
9848e48f 350 pgprot_t prot, struct page **ret_page,
f1270896
GC
351 const void *caller, bool want_vaddr,
352 int coherent_flag);
99d1717d 353
e9da6e99
MS
354static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
355 pgprot_t prot, struct page **ret_page,
6e8266e3 356 const void *caller, bool want_vaddr);
99d1717d 357
e9da6e99
MS
358static void *
359__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
360 const void *caller)
99d1717d 361{
e9da6e99
MS
362 /*
363 * DMA allocation can be mapped to user space, so lets
364 * set VM_USERMAP flags too.
365 */
513510dd
LA
366 return dma_common_contiguous_remap(page, size,
367 VM_ARM_DMA_CONSISTENT | VM_USERMAP,
368 prot, caller);
99d1717d 369}
1da177e4 370
e9da6e99 371static void __dma_free_remap(void *cpu_addr, size_t size)
88c58f3b 372{
513510dd
LA
373 dma_common_free_remap(cpu_addr, size,
374 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
88c58f3b 375}
88c58f3b 376
6e5267aa 377#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
36d0fd21 378static struct gen_pool *atomic_pool;
6e5267aa 379
36d0fd21 380static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
c7909509
MS
381
382static int __init early_coherent_pool(char *p)
383{
36d0fd21 384 atomic_pool_size = memparse(p, &p);
c7909509
MS
385 return 0;
386}
387early_param("coherent_pool", early_coherent_pool);
388
6e5267aa
MS
389void __init init_dma_coherent_pool_size(unsigned long size)
390{
391 /*
392 * Catch any attempt to set the pool size too late.
393 */
36d0fd21 394 BUG_ON(atomic_pool);
6e5267aa
MS
395
396 /*
397 * Set architecture specific coherent pool size only if
398 * it has not been changed by kernel command line parameter.
399 */
36d0fd21
LA
400 if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE)
401 atomic_pool_size = size;
6e5267aa
MS
402}
403
c7909509
MS
404/*
405 * Initialise the coherent pool for atomic allocations.
406 */
e9da6e99 407static int __init atomic_pool_init(void)
c7909509 408{
71b55663 409 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
9d1400cf 410 gfp_t gfp = GFP_KERNEL | GFP_DMA;
c7909509
MS
411 struct page *page;
412 void *ptr;
c7909509 413
36d0fd21
LA
414 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
415 if (!atomic_pool)
416 goto out;
f1270896
GC
417 /*
418 * The atomic pool is only used for non-coherent allocations
419 * so we must pass NORMAL for coherent_flag.
420 */
e464ef16 421 if (dev_get_cma_area(NULL))
36d0fd21 422 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
f1270896 423 &page, atomic_pool_init, true, NORMAL);
e9da6e99 424 else
36d0fd21 425 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
6e8266e3 426 &page, atomic_pool_init, true);
c7909509 427 if (ptr) {
36d0fd21
LA
428 int ret;
429
430 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
431 page_to_phys(page),
432 atomic_pool_size, -1);
433 if (ret)
434 goto destroy_genpool;
435
436 gen_pool_set_algo(atomic_pool,
437 gen_pool_first_fit_order_align,
438 (void *)PAGE_SHIFT);
439 pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
440 atomic_pool_size / 1024);
c7909509
MS
441 return 0;
442 }
ec10665c 443
36d0fd21
LA
444destroy_genpool:
445 gen_pool_destroy(atomic_pool);
446 atomic_pool = NULL;
447out:
448 pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n",
449 atomic_pool_size / 1024);
c7909509
MS
450 return -ENOMEM;
451}
452/*
453 * CMA is activated by core_initcall, so we must be called after it.
454 */
e9da6e99 455postcore_initcall(atomic_pool_init);
c7909509
MS
456
457struct dma_contig_early_reserve {
458 phys_addr_t base;
459 unsigned long size;
460};
461
462static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
463
464static int dma_mmu_remap_num __initdata;
465
466void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
467{
468 dma_mmu_remap[dma_mmu_remap_num].base = base;
469 dma_mmu_remap[dma_mmu_remap_num].size = size;
470 dma_mmu_remap_num++;
471}
472
473void __init dma_contiguous_remap(void)
474{
475 int i;
476 for (i = 0; i < dma_mmu_remap_num; i++) {
477 phys_addr_t start = dma_mmu_remap[i].base;
478 phys_addr_t end = start + dma_mmu_remap[i].size;
479 struct map_desc map;
480 unsigned long addr;
481
482 if (end > arm_lowmem_limit)
483 end = arm_lowmem_limit;
484 if (start >= end)
39f78e70 485 continue;
c7909509
MS
486
487 map.pfn = __phys_to_pfn(start);
488 map.virtual = __phys_to_virt(start);
489 map.length = end - start;
490 map.type = MT_MEMORY_DMA_READY;
491
492 /*
6b076991
RK
493 * Clear previous low-memory mapping to ensure that the
494 * TLB does not see any conflicting entries, then flush
495 * the TLB of the old entries before creating new mappings.
496 *
497 * This ensures that any speculatively loaded TLB entries
498 * (even though they may be rare) can not cause any problems,
499 * and ensures that this code is architecturally compliant.
c7909509
MS
500 */
501 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
61f6c7a4 502 addr += PMD_SIZE)
c7909509
MS
503 pmd_clear(pmd_off_k(addr));
504
6b076991
RK
505 flush_tlb_kernel_range(__phys_to_virt(start),
506 __phys_to_virt(end));
507
c7909509
MS
508 iotable_init(&map, 1);
509 }
510}
511
c7909509
MS
512static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
513 void *data)
514{
515 struct page *page = virt_to_page(addr);
516 pgprot_t prot = *(pgprot_t *)data;
517
518 set_pte_ext(pte, mk_pte(page, prot), 0);
519 return 0;
520}
521
522static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
523{
524 unsigned long start = (unsigned long) page_address(page);
525 unsigned end = start + size;
526
527 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
c7909509
MS
528 flush_tlb_kernel_range(start, end);
529}
530
531static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
532 pgprot_t prot, struct page **ret_page,
6e8266e3 533 const void *caller, bool want_vaddr)
c7909509
MS
534{
535 struct page *page;
6e8266e3 536 void *ptr = NULL;
f1270896
GC
537 /*
538 * __alloc_remap_buffer is only called when the device is
539 * non-coherent
540 */
541 page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
c7909509
MS
542 if (!page)
543 return NULL;
6e8266e3
CC
544 if (!want_vaddr)
545 goto out;
c7909509
MS
546
547 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
548 if (!ptr) {
549 __dma_free_buffer(page, size);
550 return NULL;
551 }
552
6e8266e3 553 out:
c7909509
MS
554 *ret_page = page;
555 return ptr;
556}
557
e9da6e99 558static void *__alloc_from_pool(size_t size, struct page **ret_page)
c7909509 559{
36d0fd21 560 unsigned long val;
e9da6e99 561 void *ptr = NULL;
c7909509 562
36d0fd21 563 if (!atomic_pool) {
e9da6e99 564 WARN(1, "coherent pool not initialised!\n");
c7909509
MS
565 return NULL;
566 }
567
36d0fd21
LA
568 val = gen_pool_alloc(atomic_pool, size);
569 if (val) {
570 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
571
572 *ret_page = phys_to_page(phys);
573 ptr = (void *)val;
c7909509 574 }
e9da6e99
MS
575
576 return ptr;
c7909509
MS
577}
578
21d0a759
HD
579static bool __in_atomic_pool(void *start, size_t size)
580{
36d0fd21 581 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
21d0a759
HD
582}
583
e9da6e99 584static int __free_from_pool(void *start, size_t size)
c7909509 585{
21d0a759 586 if (!__in_atomic_pool(start, size))
c7909509
MS
587 return 0;
588
36d0fd21 589 gen_pool_free(atomic_pool, (unsigned long)start, size);
e9da6e99 590
c7909509
MS
591 return 1;
592}
593
594static void *__alloc_from_contiguous(struct device *dev, size_t size,
9848e48f 595 pgprot_t prot, struct page **ret_page,
f1270896
GC
596 const void *caller, bool want_vaddr,
597 int coherent_flag)
c7909509
MS
598{
599 unsigned long order = get_order(size);
600 size_t count = size >> PAGE_SHIFT;
601 struct page *page;
6e8266e3 602 void *ptr = NULL;
c7909509
MS
603
604 page = dma_alloc_from_contiguous(dev, count, order);
605 if (!page)
606 return NULL;
607
f1270896 608 __dma_clear_buffer(page, size, coherent_flag);
c7909509 609
6e8266e3
CC
610 if (!want_vaddr)
611 goto out;
612
9848e48f
MS
613 if (PageHighMem(page)) {
614 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
615 if (!ptr) {
616 dma_release_from_contiguous(dev, page, count);
617 return NULL;
618 }
619 } else {
620 __dma_remap(page, size, prot);
621 ptr = page_address(page);
622 }
6e8266e3
CC
623
624 out:
c7909509 625 *ret_page = page;
9848e48f 626 return ptr;
c7909509
MS
627}
628
629static void __free_from_contiguous(struct device *dev, struct page *page,
6e8266e3 630 void *cpu_addr, size_t size, bool want_vaddr)
c7909509 631{
6e8266e3
CC
632 if (want_vaddr) {
633 if (PageHighMem(page))
634 __dma_free_remap(cpu_addr, size);
635 else
636 __dma_remap(page, size, PAGE_KERNEL);
637 }
c7909509
MS
638 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
639}
640
00085f1e 641static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
f99d6034 642{
00085f1e
KK
643 prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
644 pgprot_writecombine(prot) :
645 pgprot_dmacoherent(prot);
f99d6034
MS
646 return prot;
647}
648
c7909509
MS
649#define nommu() 0
650
ab6494f0 651#else /* !CONFIG_MMU */
695ae0af 652
c7909509
MS
653#define nommu() 1
654
6e8266e3
CC
655#define __get_dma_pgprot(attrs, prot) __pgprot(0)
656#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
e9da6e99 657#define __alloc_from_pool(size, ret_page) NULL
f1270896 658#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL
b4268676 659#define __free_from_pool(cpu_addr, size) do { } while (0)
6e8266e3 660#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
c7909509 661#define __dma_free_remap(cpu_addr, size) do { } while (0)
31ebf944
RK
662
663#endif /* CONFIG_MMU */
664
c7909509
MS
665static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
666 struct page **ret_page)
ab6494f0 667{
c7909509 668 struct page *page;
f1270896
GC
669 /* __alloc_simple_buffer is only called when the device is coherent */
670 page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
c7909509
MS
671 if (!page)
672 return NULL;
673
674 *ret_page = page;
675 return page_address(page);
676}
677
b4268676
RV
678static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
679 struct page **ret_page)
680{
681 return __alloc_simple_buffer(args->dev, args->size, args->gfp,
682 ret_page);
683}
c7909509 684
b4268676
RV
685static void simple_allocator_free(struct arm_dma_free_args *args)
686{
687 __dma_free_buffer(args->page, args->size);
688}
689
690static struct arm_dma_allocator simple_allocator = {
691 .alloc = simple_allocator_alloc,
692 .free = simple_allocator_free,
693};
694
695static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
696 struct page **ret_page)
697{
698 return __alloc_from_contiguous(args->dev, args->size, args->prot,
699 ret_page, args->caller,
f1270896 700 args->want_vaddr, args->coherent_flag);
b4268676
RV
701}
702
703static void cma_allocator_free(struct arm_dma_free_args *args)
704{
705 __free_from_contiguous(args->dev, args->page, args->cpu_addr,
706 args->size, args->want_vaddr);
707}
708
709static struct arm_dma_allocator cma_allocator = {
710 .alloc = cma_allocator_alloc,
711 .free = cma_allocator_free,
712};
713
714static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
715 struct page **ret_page)
716{
717 return __alloc_from_pool(args->size, ret_page);
718}
719
720static void pool_allocator_free(struct arm_dma_free_args *args)
721{
722 __free_from_pool(args->cpu_addr, args->size);
723}
724
725static struct arm_dma_allocator pool_allocator = {
726 .alloc = pool_allocator_alloc,
727 .free = pool_allocator_free,
728};
729
730static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
731 struct page **ret_page)
732{
733 return __alloc_remap_buffer(args->dev, args->size, args->gfp,
734 args->prot, ret_page, args->caller,
735 args->want_vaddr);
736}
737
738static void remap_allocator_free(struct arm_dma_free_args *args)
739{
740 if (args->want_vaddr)
741 __dma_free_remap(args->cpu_addr, args->size);
742
743 __dma_free_buffer(args->page, args->size);
744}
745
746static struct arm_dma_allocator remap_allocator = {
747 .alloc = remap_allocator_alloc,
748 .free = remap_allocator_free,
749};
c7909509
MS
750
751static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
6e8266e3 752 gfp_t gfp, pgprot_t prot, bool is_coherent,
00085f1e 753 unsigned long attrs, const void *caller)
c7909509
MS
754{
755 u64 mask = get_coherent_dma_mask(dev);
3dd7ea92 756 struct page *page = NULL;
31ebf944 757 void *addr;
b4268676 758 bool allowblock, cma;
19e6e5e5 759 struct arm_dma_buffer *buf;
b4268676
RV
760 struct arm_dma_alloc_args args = {
761 .dev = dev,
762 .size = PAGE_ALIGN(size),
763 .gfp = gfp,
764 .prot = prot,
765 .caller = caller,
00085f1e 766 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
f1270896 767 .coherent_flag = is_coherent ? COHERENT : NORMAL,
b4268676 768 };
ab6494f0 769
c7909509
MS
770#ifdef CONFIG_DMA_API_DEBUG
771 u64 limit = (mask + 1) & ~mask;
772 if (limit && size >= limit) {
773 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
774 size, mask);
775 return NULL;
776 }
777#endif
778
779 if (!mask)
780 return NULL;
781
9c18fcf7
AC
782 buf = kzalloc(sizeof(*buf),
783 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
19e6e5e5
RV
784 if (!buf)
785 return NULL;
786
c7909509
MS
787 if (mask < 0xffffffffULL)
788 gfp |= GFP_DMA;
789
ea2e7057
SB
790 /*
791 * Following is a work-around (a.k.a. hack) to prevent pages
792 * with __GFP_COMP being passed to split_page() which cannot
793 * handle them. The real problem is that this flag probably
794 * should be 0 on ARM as it is not supported on this
795 * platform; see CONFIG_HUGETLBFS.
796 */
797 gfp &= ~(__GFP_COMP);
b4268676 798 args.gfp = gfp;
ea2e7057 799
553ac788 800 *handle = DMA_ERROR_CODE;
b4268676
RV
801 allowblock = gfpflags_allow_blocking(gfp);
802 cma = allowblock ? dev_get_cma_area(dev) : false;
803
804 if (cma)
805 buf->allocator = &cma_allocator;
806 else if (nommu() || is_coherent)
807 buf->allocator = &simple_allocator;
808 else if (allowblock)
809 buf->allocator = &remap_allocator;
31ebf944 810 else
b4268676
RV
811 buf->allocator = &pool_allocator;
812
813 addr = buf->allocator->alloc(&args, &page);
695ae0af 814
19e6e5e5
RV
815 if (page) {
816 unsigned long flags;
817
9eedd963 818 *handle = pfn_to_dma(dev, page_to_pfn(page));
b4268676 819 buf->virt = args.want_vaddr ? addr : page;
19e6e5e5
RV
820
821 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
822 list_add(&buf->list, &arm_dma_bufs);
823 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
824 } else {
825 kfree(buf);
826 }
695ae0af 827
b4268676 828 return args.want_vaddr ? addr : page;
31ebf944 829}
1da177e4
LT
830
831/*
832 * Allocate DMA-coherent memory space and return both the kernel remapped
833 * virtual and bus address for that space.
834 */
f99d6034 835void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
00085f1e 836 gfp_t gfp, unsigned long attrs)
1da177e4 837{
0ea1ec71 838 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1fe53268 839
dd37e940 840 return __dma_alloc(dev, size, handle, gfp, prot, false,
6e8266e3 841 attrs, __builtin_return_address(0));
dd37e940
RH
842}
843
844static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
00085f1e 845 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
dd37e940 846{
21caf3a7 847 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
6e8266e3 848 attrs, __builtin_return_address(0));
1da177e4 849}
1da177e4 850
55af8a91 851static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
f99d6034 852 void *cpu_addr, dma_addr_t dma_addr, size_t size,
00085f1e 853 unsigned long attrs)
1da177e4 854{
ab6494f0
CM
855 int ret = -ENXIO;
856#ifdef CONFIG_MMU
50262a4b
MS
857 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
858 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
c7909509 859 unsigned long pfn = dma_to_pfn(dev, dma_addr);
50262a4b
MS
860 unsigned long off = vma->vm_pgoff;
861
47142f07
MS
862 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
863 return ret;
864
50262a4b
MS
865 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
866 ret = remap_pfn_range(vma, vma->vm_start,
867 pfn + off,
868 vma->vm_end - vma->vm_start,
869 vma->vm_page_prot);
870 }
ab6494f0 871#endif /* CONFIG_MMU */
1da177e4
LT
872
873 return ret;
874}
875
55af8a91
ML
876/*
877 * Create userspace mapping for the DMA-coherent memory.
878 */
879static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
880 void *cpu_addr, dma_addr_t dma_addr, size_t size,
00085f1e 881 unsigned long attrs)
55af8a91
ML
882{
883 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
884}
885
886int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
887 void *cpu_addr, dma_addr_t dma_addr, size_t size,
00085f1e 888 unsigned long attrs)
55af8a91
ML
889{
890#ifdef CONFIG_MMU
891 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
892#endif /* CONFIG_MMU */
893 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
894}
895
1da177e4 896/*
c7909509 897 * Free a buffer as defined by the above mapping.
1da177e4 898 */
dd37e940 899static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
00085f1e 900 dma_addr_t handle, unsigned long attrs,
dd37e940 901 bool is_coherent)
1da177e4 902{
c7909509 903 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
19e6e5e5 904 struct arm_dma_buffer *buf;
b4268676
RV
905 struct arm_dma_free_args args = {
906 .dev = dev,
907 .size = PAGE_ALIGN(size),
908 .cpu_addr = cpu_addr,
909 .page = page,
00085f1e 910 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
b4268676 911 };
19e6e5e5
RV
912
913 buf = arm_dma_buffer_find(cpu_addr);
914 if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
915 return;
5edf71ae 916
b4268676 917 buf->allocator->free(&args);
19e6e5e5 918 kfree(buf);
1da177e4 919}
afd1a321 920
dd37e940 921void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
00085f1e 922 dma_addr_t handle, unsigned long attrs)
dd37e940
RH
923{
924 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
925}
926
927static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
00085f1e 928 dma_addr_t handle, unsigned long attrs)
dd37e940
RH
929{
930 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
931}
932
dc2832e1
MS
933int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
934 void *cpu_addr, dma_addr_t handle, size_t size,
00085f1e 935 unsigned long attrs)
dc2832e1
MS
936{
937 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
938 int ret;
939
940 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
941 if (unlikely(ret))
942 return ret;
943
944 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
945 return 0;
946}
947
4ea0d737 948static void dma_cache_maint_page(struct page *page, unsigned long offset,
a9c9147e
RK
949 size_t size, enum dma_data_direction dir,
950 void (*op)(const void *, size_t, int))
43377453 951{
15653371
RK
952 unsigned long pfn;
953 size_t left = size;
954
955 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
956 offset %= PAGE_SIZE;
957
43377453
NP
958 /*
959 * A single sg entry may refer to multiple physically contiguous
960 * pages. But we still need to process highmem pages individually.
961 * If highmem is not configured then the bulk of this loop gets
962 * optimized out.
963 */
43377453
NP
964 do {
965 size_t len = left;
93f1d629
RK
966 void *vaddr;
967
15653371
RK
968 page = pfn_to_page(pfn);
969
93f1d629 970 if (PageHighMem(page)) {
15653371 971 if (len + offset > PAGE_SIZE)
93f1d629 972 len = PAGE_SIZE - offset;
dd0f67f4
JK
973
974 if (cache_is_vipt_nonaliasing()) {
39af22a7 975 vaddr = kmap_atomic(page);
7e5a69e8 976 op(vaddr + offset, len, dir);
39af22a7 977 kunmap_atomic(vaddr);
dd0f67f4
JK
978 } else {
979 vaddr = kmap_high_get(page);
980 if (vaddr) {
981 op(vaddr + offset, len, dir);
982 kunmap_high(page);
983 }
43377453 984 }
93f1d629
RK
985 } else {
986 vaddr = page_address(page) + offset;
a9c9147e 987 op(vaddr, len, dir);
43377453 988 }
43377453 989 offset = 0;
15653371 990 pfn++;
43377453
NP
991 left -= len;
992 } while (left);
993}
4ea0d737 994
51fde349
MS
995/*
996 * Make an area consistent for devices.
997 * Note: Drivers should NOT use this function directly, as it will break
998 * platforms with CONFIG_DMABOUNCE.
999 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
1000 */
1001static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
4ea0d737
RK
1002 size_t size, enum dma_data_direction dir)
1003{
2161c248 1004 phys_addr_t paddr;
65af191a 1005
a9c9147e 1006 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
65af191a
RK
1007
1008 paddr = page_to_phys(page) + off;
2ffe2da3
RK
1009 if (dir == DMA_FROM_DEVICE) {
1010 outer_inv_range(paddr, paddr + size);
1011 } else {
1012 outer_clean_range(paddr, paddr + size);
1013 }
1014 /* FIXME: non-speculating: flush on bidirectional mappings? */
4ea0d737 1015}
4ea0d737 1016
51fde349 1017static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
4ea0d737
RK
1018 size_t size, enum dma_data_direction dir)
1019{
2161c248 1020 phys_addr_t paddr = page_to_phys(page) + off;
2ffe2da3
RK
1021
1022 /* FIXME: non-speculating: not required */
deace4a6
RK
1023 /* in any case, don't bother invalidating if DMA to device */
1024 if (dir != DMA_TO_DEVICE) {
2ffe2da3
RK
1025 outer_inv_range(paddr, paddr + size);
1026
deace4a6
RK
1027 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
1028 }
c0177800
CM
1029
1030 /*
b2a234ed 1031 * Mark the D-cache clean for these pages to avoid extra flushing.
c0177800 1032 */
b2a234ed
ML
1033 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
1034 unsigned long pfn;
1035 size_t left = size;
1036
1037 pfn = page_to_pfn(page) + off / PAGE_SIZE;
1038 off %= PAGE_SIZE;
1039 if (off) {
1040 pfn++;
1041 left -= PAGE_SIZE - off;
1042 }
1043 while (left >= PAGE_SIZE) {
1044 page = pfn_to_page(pfn++);
1045 set_bit(PG_dcache_clean, &page->flags);
1046 left -= PAGE_SIZE;
1047 }
1048 }
4ea0d737 1049}
43377453 1050
afd1a321 1051/**
2a550e73 1052 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
afd1a321
RK
1053 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1054 * @sg: list of buffers
1055 * @nents: number of buffers to map
1056 * @dir: DMA transfer direction
1057 *
1058 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1059 * This is the scatter-gather version of the dma_map_single interface.
1060 * Here the scatter gather list elements are each tagged with the
1061 * appropriate dma address and length. They are obtained via
1062 * sg_dma_{address,length}.
1063 *
1064 * Device ownership issues as mentioned for dma_map_single are the same
1065 * here.
1066 */
2dc6a016 1067int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e 1068 enum dma_data_direction dir, unsigned long attrs)
afd1a321 1069{
2a550e73 1070 struct dma_map_ops *ops = get_dma_ops(dev);
afd1a321 1071 struct scatterlist *s;
01135d92 1072 int i, j;
afd1a321
RK
1073
1074 for_each_sg(sg, s, nents, i) {
4ce63fcd
MS
1075#ifdef CONFIG_NEED_SG_DMA_LENGTH
1076 s->dma_length = s->length;
1077#endif
2a550e73
MS
1078 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
1079 s->length, dir, attrs);
01135d92
RK
1080 if (dma_mapping_error(dev, s->dma_address))
1081 goto bad_mapping;
afd1a321 1082 }
afd1a321 1083 return nents;
01135d92
RK
1084
1085 bad_mapping:
1086 for_each_sg(sg, s, i, j)
2a550e73 1087 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
01135d92 1088 return 0;
afd1a321 1089}
afd1a321
RK
1090
1091/**
2a550e73 1092 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
afd1a321
RK
1093 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1094 * @sg: list of buffers
0adfca6f 1095 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
afd1a321
RK
1096 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1097 *
1098 * Unmap a set of streaming mode DMA translations. Again, CPU access
1099 * rules concerning calls here are the same as for dma_unmap_single().
1100 */
2dc6a016 1101void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e 1102 enum dma_data_direction dir, unsigned long attrs)
afd1a321 1103{
2a550e73 1104 struct dma_map_ops *ops = get_dma_ops(dev);
01135d92 1105 struct scatterlist *s;
01135d92 1106
01135d92 1107 int i;
24056f52 1108
01135d92 1109 for_each_sg(sg, s, nents, i)
2a550e73 1110 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
afd1a321 1111}
afd1a321
RK
1112
1113/**
2a550e73 1114 * arm_dma_sync_sg_for_cpu
afd1a321
RK
1115 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1116 * @sg: list of buffers
1117 * @nents: number of buffers to map (returned from dma_map_sg)
1118 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1119 */
2dc6a016 1120void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
afd1a321
RK
1121 int nents, enum dma_data_direction dir)
1122{
2a550e73 1123 struct dma_map_ops *ops = get_dma_ops(dev);
afd1a321
RK
1124 struct scatterlist *s;
1125 int i;
1126
2a550e73
MS
1127 for_each_sg(sg, s, nents, i)
1128 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
1129 dir);
afd1a321 1130}
afd1a321
RK
1131
1132/**
2a550e73 1133 * arm_dma_sync_sg_for_device
afd1a321
RK
1134 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1135 * @sg: list of buffers
1136 * @nents: number of buffers to map (returned from dma_map_sg)
1137 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1138 */
2dc6a016 1139void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
afd1a321
RK
1140 int nents, enum dma_data_direction dir)
1141{
2a550e73 1142 struct dma_map_ops *ops = get_dma_ops(dev);
afd1a321
RK
1143 struct scatterlist *s;
1144 int i;
1145
2a550e73
MS
1146 for_each_sg(sg, s, nents, i)
1147 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
1148 dir);
afd1a321 1149}
24056f52 1150
022ae537
RK
1151/*
1152 * Return whether the given device DMA address mask can be supported
1153 * properly. For example, if your device can only drive the low 24-bits
1154 * during bus mastering, then you would pass 0x00ffffff as the mask
1155 * to this function.
1156 */
1157int dma_supported(struct device *dev, u64 mask)
1158{
9f28cde0 1159 return __dma_supported(dev, mask, false);
022ae537
RK
1160}
1161EXPORT_SYMBOL(dma_supported);
1162
24056f52
RK
1163#define PREALLOC_DMA_DEBUG_ENTRIES 4096
1164
1165static int __init dma_debug_do_init(void)
1166{
1167 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
1168 return 0;
1169}
1170fs_initcall(dma_debug_do_init);
4ce63fcd
MS
1171
1172#ifdef CONFIG_ARM_DMA_USE_IOMMU
1173
1174/* IOMMU */
1175
4d852ef8
AH
1176static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1177
4ce63fcd
MS
1178static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1179 size_t size)
1180{
1181 unsigned int order = get_order(size);
1182 unsigned int align = 0;
1183 unsigned int count, start;
006f841d 1184 size_t mapping_size = mapping->bits << PAGE_SHIFT;
4ce63fcd 1185 unsigned long flags;
4d852ef8
AH
1186 dma_addr_t iova;
1187 int i;
4ce63fcd 1188
60460abf
SWK
1189 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1190 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
1191
68efd7d2
MS
1192 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1193 align = (1 << order) - 1;
4ce63fcd
MS
1194
1195 spin_lock_irqsave(&mapping->lock, flags);
4d852ef8
AH
1196 for (i = 0; i < mapping->nr_bitmaps; i++) {
1197 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1198 mapping->bits, 0, count, align);
1199
1200 if (start > mapping->bits)
1201 continue;
1202
1203 bitmap_set(mapping->bitmaps[i], start, count);
1204 break;
4ce63fcd
MS
1205 }
1206
4d852ef8
AH
1207 /*
1208 * No unused range found. Try to extend the existing mapping
1209 * and perform a second attempt to reserve an IO virtual
1210 * address range of size bytes.
1211 */
1212 if (i == mapping->nr_bitmaps) {
1213 if (extend_iommu_mapping(mapping)) {
1214 spin_unlock_irqrestore(&mapping->lock, flags);
1215 return DMA_ERROR_CODE;
1216 }
1217
1218 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1219 mapping->bits, 0, count, align);
1220
1221 if (start > mapping->bits) {
1222 spin_unlock_irqrestore(&mapping->lock, flags);
1223 return DMA_ERROR_CODE;
1224 }
1225
1226 bitmap_set(mapping->bitmaps[i], start, count);
1227 }
4ce63fcd
MS
1228 spin_unlock_irqrestore(&mapping->lock, flags);
1229
006f841d 1230 iova = mapping->base + (mapping_size * i);
68efd7d2 1231 iova += start << PAGE_SHIFT;
4d852ef8
AH
1232
1233 return iova;
4ce63fcd
MS
1234}
1235
1236static inline void __free_iova(struct dma_iommu_mapping *mapping,
1237 dma_addr_t addr, size_t size)
1238{
4d852ef8 1239 unsigned int start, count;
006f841d 1240 size_t mapping_size = mapping->bits << PAGE_SHIFT;
4ce63fcd 1241 unsigned long flags;
4d852ef8
AH
1242 dma_addr_t bitmap_base;
1243 u32 bitmap_index;
1244
1245 if (!size)
1246 return;
1247
006f841d 1248 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
4d852ef8
AH
1249 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
1250
006f841d 1251 bitmap_base = mapping->base + mapping_size * bitmap_index;
4d852ef8 1252
68efd7d2 1253 start = (addr - bitmap_base) >> PAGE_SHIFT;
4d852ef8 1254
006f841d 1255 if (addr + size > bitmap_base + mapping_size) {
4d852ef8
AH
1256 /*
1257 * The address range to be freed reaches into the iova
1258 * range of the next bitmap. This should not happen as
1259 * we don't allow this in __alloc_iova (at the
1260 * moment).
1261 */
1262 BUG();
1263 } else
68efd7d2 1264 count = size >> PAGE_SHIFT;
4ce63fcd
MS
1265
1266 spin_lock_irqsave(&mapping->lock, flags);
4d852ef8 1267 bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
4ce63fcd
MS
1268 spin_unlock_irqrestore(&mapping->lock, flags);
1269}
1270
33298ef6
DA
1271/* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
1272static const int iommu_order_array[] = { 9, 8, 4, 0 };
1273
549a17e4 1274static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
00085f1e 1275 gfp_t gfp, unsigned long attrs,
f1270896 1276 int coherent_flag)
4ce63fcd
MS
1277{
1278 struct page **pages;
1279 int count = size >> PAGE_SHIFT;
1280 int array_size = count * sizeof(struct page *);
1281 int i = 0;
33298ef6 1282 int order_idx = 0;
4ce63fcd
MS
1283
1284 if (array_size <= PAGE_SIZE)
23be7fda 1285 pages = kzalloc(array_size, GFP_KERNEL);
4ce63fcd
MS
1286 else
1287 pages = vzalloc(array_size);
1288 if (!pages)
1289 return NULL;
1290
00085f1e 1291 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
549a17e4
MS
1292 {
1293 unsigned long order = get_order(size);
1294 struct page *page;
1295
1296 page = dma_alloc_from_contiguous(dev, count, order);
1297 if (!page)
1298 goto error;
1299
f1270896 1300 __dma_clear_buffer(page, size, coherent_flag);
549a17e4
MS
1301
1302 for (i = 0; i < count; i++)
1303 pages[i] = page + i;
1304
1305 return pages;
1306 }
1307
14d3ae2e 1308 /* Go straight to 4K chunks if caller says it's OK. */
00085f1e 1309 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
14d3ae2e
DA
1310 order_idx = ARRAY_SIZE(iommu_order_array) - 1;
1311
f8669bef
MS
1312 /*
1313 * IOMMU can map any pages, so himem can also be used here
1314 */
1315 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1316
4ce63fcd 1317 while (count) {
49f28aa6
TF
1318 int j, order;
1319
33298ef6
DA
1320 order = iommu_order_array[order_idx];
1321
1322 /* Drop down when we get small */
1323 if (__fls(count) < order) {
1324 order_idx++;
1325 continue;
49f28aa6 1326 }
4ce63fcd 1327
33298ef6
DA
1328 if (order) {
1329 /* See if it's easy to allocate a high-order chunk */
1330 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
1331
1332 /* Go down a notch at first sign of pressure */
1333 if (!pages[i]) {
1334 order_idx++;
1335 continue;
1336 }
1337 } else {
49f28aa6
TF
1338 pages[i] = alloc_pages(gfp, 0);
1339 if (!pages[i])
1340 goto error;
1341 }
4ce63fcd 1342
5a796eeb 1343 if (order) {
4ce63fcd 1344 split_page(pages[i], order);
5a796eeb
HD
1345 j = 1 << order;
1346 while (--j)
1347 pages[i + j] = pages[i] + j;
1348 }
4ce63fcd 1349
f1270896 1350 __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
4ce63fcd
MS
1351 i += 1 << order;
1352 count -= 1 << order;
1353 }
1354
1355 return pages;
1356error:
9fa8af91 1357 while (i--)
4ce63fcd
MS
1358 if (pages[i])
1359 __free_pages(pages[i], 0);
1d5cfdb0 1360 kvfree(pages);
4ce63fcd
MS
1361 return NULL;
1362}
1363
549a17e4 1364static int __iommu_free_buffer(struct device *dev, struct page **pages,
00085f1e 1365 size_t size, unsigned long attrs)
4ce63fcd
MS
1366{
1367 int count = size >> PAGE_SHIFT;
4ce63fcd 1368 int i;
549a17e4 1369
00085f1e 1370 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
549a17e4
MS
1371 dma_release_from_contiguous(dev, pages[0], count);
1372 } else {
1373 for (i = 0; i < count; i++)
1374 if (pages[i])
1375 __free_pages(pages[i], 0);
1376 }
1377
1d5cfdb0 1378 kvfree(pages);
4ce63fcd
MS
1379 return 0;
1380}
1381
1382/*
1383 * Create a CPU mapping for a specified pages
1384 */
1385static void *
e9da6e99
MS
1386__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1387 const void *caller)
4ce63fcd 1388{
513510dd
LA
1389 return dma_common_pages_remap(pages, size,
1390 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
4ce63fcd
MS
1391}
1392
1393/*
1394 * Create a mapping in device IO address space for specified pages
1395 */
1396static dma_addr_t
1397__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1398{
89cfdb19 1399 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
1400 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1401 dma_addr_t dma_addr, iova;
90cde558 1402 int i;
4ce63fcd
MS
1403
1404 dma_addr = __alloc_iova(mapping, size);
1405 if (dma_addr == DMA_ERROR_CODE)
1406 return dma_addr;
1407
1408 iova = dma_addr;
1409 for (i = 0; i < count; ) {
90cde558
AP
1410 int ret;
1411
4ce63fcd
MS
1412 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1413 phys_addr_t phys = page_to_phys(pages[i]);
1414 unsigned int len, j;
1415
1416 for (j = i + 1; j < count; j++, next_pfn++)
1417 if (page_to_pfn(pages[j]) != next_pfn)
1418 break;
1419
1420 len = (j - i) << PAGE_SHIFT;
c9b24996
AH
1421 ret = iommu_map(mapping->domain, iova, phys, len,
1422 IOMMU_READ|IOMMU_WRITE);
4ce63fcd
MS
1423 if (ret < 0)
1424 goto fail;
1425 iova += len;
1426 i = j;
1427 }
1428 return dma_addr;
1429fail:
1430 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1431 __free_iova(mapping, dma_addr, size);
1432 return DMA_ERROR_CODE;
1433}
1434
1435static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1436{
89cfdb19 1437 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
1438
1439 /*
1440 * add optional in-page offset from iova to size and align
1441 * result to page size
1442 */
1443 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1444 iova &= PAGE_MASK;
1445
1446 iommu_unmap(mapping->domain, iova, size);
1447 __free_iova(mapping, iova, size);
1448 return 0;
1449}
1450
665bad7b
HD
1451static struct page **__atomic_get_pages(void *addr)
1452{
36d0fd21
LA
1453 struct page *page;
1454 phys_addr_t phys;
1455
1456 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1457 page = phys_to_page(phys);
665bad7b 1458
36d0fd21 1459 return (struct page **)page;
665bad7b
HD
1460}
1461
00085f1e 1462static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
e9da6e99
MS
1463{
1464 struct vm_struct *area;
1465
665bad7b
HD
1466 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1467 return __atomic_get_pages(cpu_addr);
1468
00085f1e 1469 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
955c757e
MS
1470 return cpu_addr;
1471
e9da6e99
MS
1472 area = find_vm_area(cpu_addr);
1473 if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1474 return area->pages;
1475 return NULL;
1476}
1477
56506822
GC
1478static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1479 dma_addr_t *handle, int coherent_flag)
479ed93a
HD
1480{
1481 struct page *page;
1482 void *addr;
1483
56506822
GC
1484 if (coherent_flag == COHERENT)
1485 addr = __alloc_simple_buffer(dev, size, gfp, &page);
1486 else
1487 addr = __alloc_from_pool(size, &page);
479ed93a
HD
1488 if (!addr)
1489 return NULL;
1490
1491 *handle = __iommu_create_mapping(dev, &page, size);
1492 if (*handle == DMA_ERROR_CODE)
1493 goto err_mapping;
1494
1495 return addr;
1496
1497err_mapping:
1498 __free_from_pool(addr, size);
1499 return NULL;
1500}
1501
d5898291 1502static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
56506822 1503 dma_addr_t handle, size_t size, int coherent_flag)
479ed93a
HD
1504{
1505 __iommu_remove_mapping(dev, handle, size);
56506822
GC
1506 if (coherent_flag == COHERENT)
1507 __dma_free_buffer(virt_to_page(cpu_addr), size);
1508 else
1509 __free_from_pool(cpu_addr, size);
479ed93a
HD
1510}
1511
56506822 1512static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
00085f1e 1513 dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
56506822 1514 int coherent_flag)
4ce63fcd 1515{
71b55663 1516 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
4ce63fcd
MS
1517 struct page **pages;
1518 void *addr = NULL;
1519
1520 *handle = DMA_ERROR_CODE;
1521 size = PAGE_ALIGN(size);
1522
56506822
GC
1523 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
1524 return __iommu_alloc_simple(dev, size, gfp, handle,
1525 coherent_flag);
479ed93a 1526
5b91a98c
RZ
1527 /*
1528 * Following is a work-around (a.k.a. hack) to prevent pages
1529 * with __GFP_COMP being passed to split_page() which cannot
1530 * handle them. The real problem is that this flag probably
1531 * should be 0 on ARM as it is not supported on this
1532 * platform; see CONFIG_HUGETLBFS.
1533 */
1534 gfp &= ~(__GFP_COMP);
1535
56506822 1536 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
4ce63fcd
MS
1537 if (!pages)
1538 return NULL;
1539
1540 *handle = __iommu_create_mapping(dev, pages, size);
1541 if (*handle == DMA_ERROR_CODE)
1542 goto err_buffer;
1543
00085f1e 1544 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
955c757e
MS
1545 return pages;
1546
e9da6e99
MS
1547 addr = __iommu_alloc_remap(pages, size, gfp, prot,
1548 __builtin_return_address(0));
4ce63fcd
MS
1549 if (!addr)
1550 goto err_mapping;
1551
1552 return addr;
1553
1554err_mapping:
1555 __iommu_remove_mapping(dev, *handle, size);
1556err_buffer:
549a17e4 1557 __iommu_free_buffer(dev, pages, size, attrs);
4ce63fcd
MS
1558 return NULL;
1559}
1560
56506822 1561static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
00085f1e 1562 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
56506822
GC
1563{
1564 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
1565}
1566
1567static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
00085f1e 1568 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
56506822
GC
1569{
1570 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
1571}
1572
1573static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
4ce63fcd 1574 void *cpu_addr, dma_addr_t dma_addr, size_t size,
00085f1e 1575 unsigned long attrs)
4ce63fcd 1576{
e9da6e99
MS
1577 unsigned long uaddr = vma->vm_start;
1578 unsigned long usize = vma->vm_end - vma->vm_start;
955c757e 1579 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
371f0f08
MS
1580 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1581 unsigned long off = vma->vm_pgoff;
4ce63fcd 1582
e9da6e99
MS
1583 if (!pages)
1584 return -ENXIO;
4ce63fcd 1585
371f0f08
MS
1586 if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
1587 return -ENXIO;
1588
7e312103
MS
1589 pages += off;
1590
e9da6e99
MS
1591 do {
1592 int ret = vm_insert_page(vma, uaddr, *pages++);
1593 if (ret) {
1594 pr_err("Remapping memory failed: %d\n", ret);
1595 return ret;
1596 }
1597 uaddr += PAGE_SIZE;
1598 usize -= PAGE_SIZE;
1599 } while (usize > 0);
4ce63fcd 1600
4ce63fcd
MS
1601 return 0;
1602}
56506822
GC
1603static int arm_iommu_mmap_attrs(struct device *dev,
1604 struct vm_area_struct *vma, void *cpu_addr,
00085f1e 1605 dma_addr_t dma_addr, size_t size, unsigned long attrs)
56506822
GC
1606{
1607 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1608
1609 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1610}
1611
1612static int arm_coherent_iommu_mmap_attrs(struct device *dev,
1613 struct vm_area_struct *vma, void *cpu_addr,
00085f1e 1614 dma_addr_t dma_addr, size_t size, unsigned long attrs)
56506822
GC
1615{
1616 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1617}
4ce63fcd
MS
1618
1619/*
1620 * free a page as defined by the above mapping.
1621 * Must not be called with IRQs disabled.
1622 */
56506822 1623void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
00085f1e 1624 dma_addr_t handle, unsigned long attrs, int coherent_flag)
4ce63fcd 1625{
836bfa0d 1626 struct page **pages;
4ce63fcd
MS
1627 size = PAGE_ALIGN(size);
1628
56506822
GC
1629 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
1630 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
e9da6e99 1631 return;
4ce63fcd 1632 }
e9da6e99 1633
836bfa0d
YC
1634 pages = __iommu_get_pages(cpu_addr, attrs);
1635 if (!pages) {
1636 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
479ed93a
HD
1637 return;
1638 }
1639
00085f1e 1640 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
513510dd
LA
1641 dma_common_free_remap(cpu_addr, size,
1642 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
955c757e 1643 }
e9da6e99
MS
1644
1645 __iommu_remove_mapping(dev, handle, size);
549a17e4 1646 __iommu_free_buffer(dev, pages, size, attrs);
4ce63fcd
MS
1647}
1648
56506822 1649void arm_iommu_free_attrs(struct device *dev, size_t size,
00085f1e 1650 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
56506822
GC
1651{
1652 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
1653}
1654
1655void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
00085f1e 1656 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
56506822
GC
1657{
1658 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
1659}
1660
dc2832e1
MS
1661static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1662 void *cpu_addr, dma_addr_t dma_addr,
00085f1e 1663 size_t size, unsigned long attrs)
dc2832e1
MS
1664{
1665 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1666 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1667
1668 if (!pages)
1669 return -ENXIO;
1670
1671 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1672 GFP_KERNEL);
4ce63fcd
MS
1673}
1674
c9b24996
AH
1675static int __dma_direction_to_prot(enum dma_data_direction dir)
1676{
1677 int prot;
1678
1679 switch (dir) {
1680 case DMA_BIDIRECTIONAL:
1681 prot = IOMMU_READ | IOMMU_WRITE;
1682 break;
1683 case DMA_TO_DEVICE:
1684 prot = IOMMU_READ;
1685 break;
1686 case DMA_FROM_DEVICE:
1687 prot = IOMMU_WRITE;
1688 break;
1689 default:
1690 prot = 0;
1691 }
1692
1693 return prot;
1694}
1695
4ce63fcd
MS
1696/*
1697 * Map a part of the scatter-gather list into contiguous io address space
1698 */
1699static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1700 size_t size, dma_addr_t *handle,
00085f1e 1701 enum dma_data_direction dir, unsigned long attrs,
0fa478df 1702 bool is_coherent)
4ce63fcd 1703{
89cfdb19 1704 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
1705 dma_addr_t iova, iova_base;
1706 int ret = 0;
1707 unsigned int count;
1708 struct scatterlist *s;
c9b24996 1709 int prot;
4ce63fcd
MS
1710
1711 size = PAGE_ALIGN(size);
1712 *handle = DMA_ERROR_CODE;
1713
1714 iova_base = iova = __alloc_iova(mapping, size);
1715 if (iova == DMA_ERROR_CODE)
1716 return -ENOMEM;
1717
1718 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
3e6110fd 1719 phys_addr_t phys = page_to_phys(sg_page(s));
4ce63fcd
MS
1720 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1721
00085f1e 1722 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
4ce63fcd
MS
1723 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1724
c9b24996
AH
1725 prot = __dma_direction_to_prot(dir);
1726
1727 ret = iommu_map(mapping->domain, iova, phys, len, prot);
4ce63fcd
MS
1728 if (ret < 0)
1729 goto fail;
1730 count += len >> PAGE_SHIFT;
1731 iova += len;
1732 }
1733 *handle = iova_base;
1734
1735 return 0;
1736fail:
1737 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1738 __free_iova(mapping, iova_base, size);
1739 return ret;
1740}
1741
0fa478df 1742static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e 1743 enum dma_data_direction dir, unsigned long attrs,
0fa478df 1744 bool is_coherent)
4ce63fcd
MS
1745{
1746 struct scatterlist *s = sg, *dma = sg, *start = sg;
1747 int i, count = 0;
1748 unsigned int offset = s->offset;
1749 unsigned int size = s->offset + s->length;
1750 unsigned int max = dma_get_max_seg_size(dev);
1751
1752 for (i = 1; i < nents; i++) {
1753 s = sg_next(s);
1754
1755 s->dma_address = DMA_ERROR_CODE;
1756 s->dma_length = 0;
1757
1758 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1759 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
0fa478df 1760 dir, attrs, is_coherent) < 0)
4ce63fcd
MS
1761 goto bad_mapping;
1762
1763 dma->dma_address += offset;
1764 dma->dma_length = size - offset;
1765
1766 size = offset = s->offset;
1767 start = s;
1768 dma = sg_next(dma);
1769 count += 1;
1770 }
1771 size += s->length;
1772 }
0fa478df
RH
1773 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1774 is_coherent) < 0)
4ce63fcd
MS
1775 goto bad_mapping;
1776
1777 dma->dma_address += offset;
1778 dma->dma_length = size - offset;
1779
1780 return count+1;
1781
1782bad_mapping:
1783 for_each_sg(sg, s, count, i)
1784 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1785 return 0;
1786}
1787
1788/**
0fa478df 1789 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
4ce63fcd
MS
1790 * @dev: valid struct device pointer
1791 * @sg: list of buffers
0fa478df
RH
1792 * @nents: number of buffers to map
1793 * @dir: DMA transfer direction
4ce63fcd 1794 *
0fa478df
RH
1795 * Map a set of i/o coherent buffers described by scatterlist in streaming
1796 * mode for DMA. The scatter gather list elements are merged together (if
1797 * possible) and tagged with the appropriate dma address and length. They are
1798 * obtained via sg_dma_{address,length}.
4ce63fcd 1799 */
0fa478df 1800int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
00085f1e 1801 int nents, enum dma_data_direction dir, unsigned long attrs)
0fa478df
RH
1802{
1803 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1804}
1805
1806/**
1807 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1808 * @dev: valid struct device pointer
1809 * @sg: list of buffers
1810 * @nents: number of buffers to map
1811 * @dir: DMA transfer direction
1812 *
1813 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1814 * The scatter gather list elements are merged together (if possible) and
1815 * tagged with the appropriate dma address and length. They are obtained via
1816 * sg_dma_{address,length}.
1817 */
1818int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
00085f1e 1819 int nents, enum dma_data_direction dir, unsigned long attrs)
0fa478df
RH
1820{
1821 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1822}
1823
1824static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
00085f1e
KK
1825 int nents, enum dma_data_direction dir,
1826 unsigned long attrs, bool is_coherent)
4ce63fcd
MS
1827{
1828 struct scatterlist *s;
1829 int i;
1830
1831 for_each_sg(sg, s, nents, i) {
1832 if (sg_dma_len(s))
1833 __iommu_remove_mapping(dev, sg_dma_address(s),
1834 sg_dma_len(s));
00085f1e 1835 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
4ce63fcd
MS
1836 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1837 s->length, dir);
1838 }
1839}
1840
0fa478df
RH
1841/**
1842 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1843 * @dev: valid struct device pointer
1844 * @sg: list of buffers
1845 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1846 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1847 *
1848 * Unmap a set of streaming mode DMA translations. Again, CPU access
1849 * rules concerning calls here are the same as for dma_unmap_single().
1850 */
1851void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
00085f1e
KK
1852 int nents, enum dma_data_direction dir,
1853 unsigned long attrs)
0fa478df
RH
1854{
1855 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1856}
1857
1858/**
1859 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1860 * @dev: valid struct device pointer
1861 * @sg: list of buffers
1862 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1863 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1864 *
1865 * Unmap a set of streaming mode DMA translations. Again, CPU access
1866 * rules concerning calls here are the same as for dma_unmap_single().
1867 */
1868void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e
KK
1869 enum dma_data_direction dir,
1870 unsigned long attrs)
0fa478df
RH
1871{
1872 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1873}
1874
4ce63fcd
MS
1875/**
1876 * arm_iommu_sync_sg_for_cpu
1877 * @dev: valid struct device pointer
1878 * @sg: list of buffers
1879 * @nents: number of buffers to map (returned from dma_map_sg)
1880 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1881 */
1882void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1883 int nents, enum dma_data_direction dir)
1884{
1885 struct scatterlist *s;
1886 int i;
1887
1888 for_each_sg(sg, s, nents, i)
0fa478df 1889 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
4ce63fcd
MS
1890
1891}
1892
1893/**
1894 * arm_iommu_sync_sg_for_device
1895 * @dev: valid struct device pointer
1896 * @sg: list of buffers
1897 * @nents: number of buffers to map (returned from dma_map_sg)
1898 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1899 */
1900void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1901 int nents, enum dma_data_direction dir)
1902{
1903 struct scatterlist *s;
1904 int i;
1905
1906 for_each_sg(sg, s, nents, i)
0fa478df 1907 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
4ce63fcd
MS
1908}
1909
1910
1911/**
0fa478df 1912 * arm_coherent_iommu_map_page
4ce63fcd
MS
1913 * @dev: valid struct device pointer
1914 * @page: page that buffer resides in
1915 * @offset: offset into page for start of buffer
1916 * @size: size of buffer to map
1917 * @dir: DMA transfer direction
1918 *
0fa478df 1919 * Coherent IOMMU aware version of arm_dma_map_page()
4ce63fcd 1920 */
0fa478df 1921static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
4ce63fcd 1922 unsigned long offset, size_t size, enum dma_data_direction dir,
00085f1e 1923 unsigned long attrs)
4ce63fcd 1924{
89cfdb19 1925 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd 1926 dma_addr_t dma_addr;
13987d68 1927 int ret, prot, len = PAGE_ALIGN(size + offset);
4ce63fcd 1928
4ce63fcd
MS
1929 dma_addr = __alloc_iova(mapping, len);
1930 if (dma_addr == DMA_ERROR_CODE)
1931 return dma_addr;
1932
c9b24996 1933 prot = __dma_direction_to_prot(dir);
13987d68
WD
1934
1935 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
4ce63fcd
MS
1936 if (ret < 0)
1937 goto fail;
1938
1939 return dma_addr + offset;
1940fail:
1941 __free_iova(mapping, dma_addr, len);
1942 return DMA_ERROR_CODE;
1943}
1944
0fa478df
RH
1945/**
1946 * arm_iommu_map_page
1947 * @dev: valid struct device pointer
1948 * @page: page that buffer resides in
1949 * @offset: offset into page for start of buffer
1950 * @size: size of buffer to map
1951 * @dir: DMA transfer direction
1952 *
1953 * IOMMU aware version of arm_dma_map_page()
1954 */
1955static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1956 unsigned long offset, size_t size, enum dma_data_direction dir,
00085f1e 1957 unsigned long attrs)
0fa478df 1958{
00085f1e 1959 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
0fa478df
RH
1960 __dma_page_cpu_to_dev(page, offset, size, dir);
1961
1962 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1963}
1964
1965/**
1966 * arm_coherent_iommu_unmap_page
1967 * @dev: valid struct device pointer
1968 * @handle: DMA address of buffer
1969 * @size: size of buffer (same as passed to dma_map_page)
1970 * @dir: DMA transfer direction (same as passed to dma_map_page)
1971 *
1972 * Coherent IOMMU aware version of arm_dma_unmap_page()
1973 */
1974static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
00085f1e 1975 size_t size, enum dma_data_direction dir, unsigned long attrs)
0fa478df 1976{
89cfdb19 1977 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
0fa478df 1978 dma_addr_t iova = handle & PAGE_MASK;
0fa478df
RH
1979 int offset = handle & ~PAGE_MASK;
1980 int len = PAGE_ALIGN(size + offset);
1981
1982 if (!iova)
1983 return;
1984
1985 iommu_unmap(mapping->domain, iova, len);
1986 __free_iova(mapping, iova, len);
1987}
1988
4ce63fcd
MS
1989/**
1990 * arm_iommu_unmap_page
1991 * @dev: valid struct device pointer
1992 * @handle: DMA address of buffer
1993 * @size: size of buffer (same as passed to dma_map_page)
1994 * @dir: DMA transfer direction (same as passed to dma_map_page)
1995 *
1996 * IOMMU aware version of arm_dma_unmap_page()
1997 */
1998static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
00085f1e 1999 size_t size, enum dma_data_direction dir, unsigned long attrs)
4ce63fcd 2000{
89cfdb19 2001 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
2002 dma_addr_t iova = handle & PAGE_MASK;
2003 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
2004 int offset = handle & ~PAGE_MASK;
2005 int len = PAGE_ALIGN(size + offset);
2006
2007 if (!iova)
2008 return;
2009
00085f1e 2010 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
4ce63fcd
MS
2011 __dma_page_dev_to_cpu(page, offset, size, dir);
2012
2013 iommu_unmap(mapping->domain, iova, len);
2014 __free_iova(mapping, iova, len);
2015}
2016
24ed5d2c
NS
2017/**
2018 * arm_iommu_map_resource - map a device resource for DMA
2019 * @dev: valid struct device pointer
2020 * @phys_addr: physical address of resource
2021 * @size: size of resource to map
2022 * @dir: DMA transfer direction
2023 */
2024static dma_addr_t arm_iommu_map_resource(struct device *dev,
2025 phys_addr_t phys_addr, size_t size,
2026 enum dma_data_direction dir, unsigned long attrs)
2027{
2028 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2029 dma_addr_t dma_addr;
2030 int ret, prot;
2031 phys_addr_t addr = phys_addr & PAGE_MASK;
2032 unsigned int offset = phys_addr & ~PAGE_MASK;
2033 size_t len = PAGE_ALIGN(size + offset);
2034
2035 dma_addr = __alloc_iova(mapping, len);
2036 if (dma_addr == DMA_ERROR_CODE)
2037 return dma_addr;
2038
2039 prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
2040
2041 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
2042 if (ret < 0)
2043 goto fail;
2044
2045 return dma_addr + offset;
2046fail:
2047 __free_iova(mapping, dma_addr, len);
2048 return DMA_ERROR_CODE;
2049}
2050
2051/**
2052 * arm_iommu_unmap_resource - unmap a device DMA resource
2053 * @dev: valid struct device pointer
2054 * @dma_handle: DMA address to resource
2055 * @size: size of resource to map
2056 * @dir: DMA transfer direction
2057 */
2058static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
2059 size_t size, enum dma_data_direction dir,
2060 unsigned long attrs)
2061{
2062 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2063 dma_addr_t iova = dma_handle & PAGE_MASK;
2064 unsigned int offset = dma_handle & ~PAGE_MASK;
2065 size_t len = PAGE_ALIGN(size + offset);
2066
2067 if (!iova)
2068 return;
2069
2070 iommu_unmap(mapping->domain, iova, len);
2071 __free_iova(mapping, iova, len);
2072}
2073
4ce63fcd
MS
2074static void arm_iommu_sync_single_for_cpu(struct device *dev,
2075 dma_addr_t handle, size_t size, enum dma_data_direction dir)
2076{
89cfdb19 2077 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
2078 dma_addr_t iova = handle & PAGE_MASK;
2079 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
2080 unsigned int offset = handle & ~PAGE_MASK;
2081
2082 if (!iova)
2083 return;
2084
0fa478df 2085 __dma_page_dev_to_cpu(page, offset, size, dir);
4ce63fcd
MS
2086}
2087
2088static void arm_iommu_sync_single_for_device(struct device *dev,
2089 dma_addr_t handle, size_t size, enum dma_data_direction dir)
2090{
89cfdb19 2091 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
2092 dma_addr_t iova = handle & PAGE_MASK;
2093 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
2094 unsigned int offset = handle & ~PAGE_MASK;
2095
2096 if (!iova)
2097 return;
2098
2099 __dma_page_cpu_to_dev(page, offset, size, dir);
2100}
2101
2102struct dma_map_ops iommu_ops = {
2103 .alloc = arm_iommu_alloc_attrs,
2104 .free = arm_iommu_free_attrs,
2105 .mmap = arm_iommu_mmap_attrs,
dc2832e1 2106 .get_sgtable = arm_iommu_get_sgtable,
4ce63fcd
MS
2107
2108 .map_page = arm_iommu_map_page,
2109 .unmap_page = arm_iommu_unmap_page,
2110 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
2111 .sync_single_for_device = arm_iommu_sync_single_for_device,
2112
2113 .map_sg = arm_iommu_map_sg,
2114 .unmap_sg = arm_iommu_unmap_sg,
2115 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
2116 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
24ed5d2c
NS
2117
2118 .map_resource = arm_iommu_map_resource,
2119 .unmap_resource = arm_iommu_unmap_resource,
4ce63fcd
MS
2120};
2121
0fa478df 2122struct dma_map_ops iommu_coherent_ops = {
56506822
GC
2123 .alloc = arm_coherent_iommu_alloc_attrs,
2124 .free = arm_coherent_iommu_free_attrs,
2125 .mmap = arm_coherent_iommu_mmap_attrs,
0fa478df
RH
2126 .get_sgtable = arm_iommu_get_sgtable,
2127
2128 .map_page = arm_coherent_iommu_map_page,
2129 .unmap_page = arm_coherent_iommu_unmap_page,
2130
2131 .map_sg = arm_coherent_iommu_map_sg,
2132 .unmap_sg = arm_coherent_iommu_unmap_sg,
24ed5d2c
NS
2133
2134 .map_resource = arm_iommu_map_resource,
2135 .unmap_resource = arm_iommu_unmap_resource,
0fa478df
RH
2136};
2137
4ce63fcd
MS
2138/**
2139 * arm_iommu_create_mapping
2140 * @bus: pointer to the bus holding the client device (for IOMMU calls)
2141 * @base: start address of the valid IO address space
68efd7d2 2142 * @size: maximum size of the valid IO address space
4ce63fcd
MS
2143 *
2144 * Creates a mapping structure which holds information about used/unused
2145 * IO address ranges, which is required to perform memory allocation and
2146 * mapping with IOMMU aware functions.
2147 *
2148 * The client device need to be attached to the mapping with
2149 * arm_iommu_attach_device function.
2150 */
2151struct dma_iommu_mapping *
1424532b 2152arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
4ce63fcd 2153{
68efd7d2
MS
2154 unsigned int bits = size >> PAGE_SHIFT;
2155 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
4ce63fcd 2156 struct dma_iommu_mapping *mapping;
68efd7d2 2157 int extensions = 1;
4ce63fcd
MS
2158 int err = -ENOMEM;
2159
1424532b
MS
2160 /* currently only 32-bit DMA address space is supported */
2161 if (size > DMA_BIT_MASK(32) + 1)
2162 return ERR_PTR(-ERANGE);
2163
68efd7d2 2164 if (!bitmap_size)
4ce63fcd
MS
2165 return ERR_PTR(-EINVAL);
2166
68efd7d2
MS
2167 if (bitmap_size > PAGE_SIZE) {
2168 extensions = bitmap_size / PAGE_SIZE;
2169 bitmap_size = PAGE_SIZE;
2170 }
2171
4ce63fcd
MS
2172 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
2173 if (!mapping)
2174 goto err;
2175
68efd7d2
MS
2176 mapping->bitmap_size = bitmap_size;
2177 mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *),
4d852ef8
AH
2178 GFP_KERNEL);
2179 if (!mapping->bitmaps)
4ce63fcd
MS
2180 goto err2;
2181
68efd7d2 2182 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
4d852ef8
AH
2183 if (!mapping->bitmaps[0])
2184 goto err3;
2185
2186 mapping->nr_bitmaps = 1;
2187 mapping->extensions = extensions;
4ce63fcd 2188 mapping->base = base;
68efd7d2 2189 mapping->bits = BITS_PER_BYTE * bitmap_size;
4d852ef8 2190
4ce63fcd
MS
2191 spin_lock_init(&mapping->lock);
2192
2193 mapping->domain = iommu_domain_alloc(bus);
2194 if (!mapping->domain)
4d852ef8 2195 goto err4;
4ce63fcd
MS
2196
2197 kref_init(&mapping->kref);
2198 return mapping;
4d852ef8
AH
2199err4:
2200 kfree(mapping->bitmaps[0]);
4ce63fcd 2201err3:
4d852ef8 2202 kfree(mapping->bitmaps);
4ce63fcd
MS
2203err2:
2204 kfree(mapping);
2205err:
2206 return ERR_PTR(err);
2207}
18177d12 2208EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
4ce63fcd
MS
2209
2210static void release_iommu_mapping(struct kref *kref)
2211{
4d852ef8 2212 int i;
4ce63fcd
MS
2213 struct dma_iommu_mapping *mapping =
2214 container_of(kref, struct dma_iommu_mapping, kref);
2215
2216 iommu_domain_free(mapping->domain);
4d852ef8
AH
2217 for (i = 0; i < mapping->nr_bitmaps; i++)
2218 kfree(mapping->bitmaps[i]);
2219 kfree(mapping->bitmaps);
4ce63fcd
MS
2220 kfree(mapping);
2221}
2222
4d852ef8
AH
2223static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
2224{
2225 int next_bitmap;
2226
462859aa 2227 if (mapping->nr_bitmaps >= mapping->extensions)
4d852ef8
AH
2228 return -EINVAL;
2229
2230 next_bitmap = mapping->nr_bitmaps;
2231 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
2232 GFP_ATOMIC);
2233 if (!mapping->bitmaps[next_bitmap])
2234 return -ENOMEM;
2235
2236 mapping->nr_bitmaps++;
2237
2238 return 0;
2239}
2240
4ce63fcd
MS
2241void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
2242{
2243 if (mapping)
2244 kref_put(&mapping->kref, release_iommu_mapping);
2245}
18177d12 2246EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
4ce63fcd 2247
eab8d653
LP
2248static int __arm_iommu_attach_device(struct device *dev,
2249 struct dma_iommu_mapping *mapping)
2250{
2251 int err;
2252
2253 err = iommu_attach_device(mapping->domain, dev);
2254 if (err)
2255 return err;
2256
2257 kref_get(&mapping->kref);
89cfdb19 2258 to_dma_iommu_mapping(dev) = mapping;
eab8d653
LP
2259
2260 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
2261 return 0;
2262}
2263
4ce63fcd
MS
2264/**
2265 * arm_iommu_attach_device
2266 * @dev: valid struct device pointer
2267 * @mapping: io address space mapping structure (returned from
2268 * arm_iommu_create_mapping)
2269 *
eab8d653
LP
2270 * Attaches specified io address space mapping to the provided device.
2271 * This replaces the dma operations (dma_map_ops pointer) with the
2272 * IOMMU aware version.
2273 *
4bb25789
WD
2274 * More than one client might be attached to the same io address space
2275 * mapping.
4ce63fcd
MS
2276 */
2277int arm_iommu_attach_device(struct device *dev,
2278 struct dma_iommu_mapping *mapping)
2279{
2280 int err;
2281
eab8d653 2282 err = __arm_iommu_attach_device(dev, mapping);
4ce63fcd
MS
2283 if (err)
2284 return err;
2285
eab8d653 2286 set_dma_ops(dev, &iommu_ops);
4ce63fcd
MS
2287 return 0;
2288}
18177d12 2289EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
4ce63fcd 2290
eab8d653 2291static void __arm_iommu_detach_device(struct device *dev)
6fe36758
HD
2292{
2293 struct dma_iommu_mapping *mapping;
2294
2295 mapping = to_dma_iommu_mapping(dev);
2296 if (!mapping) {
2297 dev_warn(dev, "Not attached\n");
2298 return;
2299 }
2300
2301 iommu_detach_device(mapping->domain, dev);
2302 kref_put(&mapping->kref, release_iommu_mapping);
89cfdb19 2303 to_dma_iommu_mapping(dev) = NULL;
6fe36758
HD
2304
2305 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2306}
eab8d653
LP
2307
2308/**
2309 * arm_iommu_detach_device
2310 * @dev: valid struct device pointer
2311 *
2312 * Detaches the provided device from a previously attached map.
2313 * This voids the dma operations (dma_map_ops pointer)
2314 */
2315void arm_iommu_detach_device(struct device *dev)
2316{
2317 __arm_iommu_detach_device(dev);
2318 set_dma_ops(dev, NULL);
2319}
18177d12 2320EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
6fe36758 2321
4bb25789
WD
2322static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
2323{
2324 return coherent ? &iommu_coherent_ops : &iommu_ops;
2325}
2326
2327static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
53c92d79 2328 const struct iommu_ops *iommu)
4bb25789
WD
2329{
2330 struct dma_iommu_mapping *mapping;
2331
2332 if (!iommu)
2333 return false;
2334
2335 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
2336 if (IS_ERR(mapping)) {
2337 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2338 size, dev_name(dev));
2339 return false;
2340 }
2341
eab8d653 2342 if (__arm_iommu_attach_device(dev, mapping)) {
4bb25789
WD
2343 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2344 dev_name(dev));
2345 arm_iommu_release_mapping(mapping);
2346 return false;
2347 }
2348
2349 return true;
2350}
2351
2352static void arm_teardown_iommu_dma_ops(struct device *dev)
2353{
89cfdb19 2354 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4bb25789 2355
c2273a18
WD
2356 if (!mapping)
2357 return;
2358
eab8d653 2359 __arm_iommu_detach_device(dev);
4bb25789
WD
2360 arm_iommu_release_mapping(mapping);
2361}
2362
2363#else
2364
2365static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
53c92d79 2366 const struct iommu_ops *iommu)
4bb25789
WD
2367{
2368 return false;
2369}
2370
2371static void arm_teardown_iommu_dma_ops(struct device *dev) { }
2372
2373#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2374
2375#endif /* CONFIG_ARM_DMA_USE_IOMMU */
2376
2377static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
2378{
2379 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
2380}
2381
2382void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
53c92d79 2383 const struct iommu_ops *iommu, bool coherent)
4bb25789
WD
2384{
2385 struct dma_map_ops *dma_ops;
2386
6f51ee70 2387 dev->archdata.dma_coherent = coherent;
4bb25789
WD
2388 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
2389 dma_ops = arm_get_iommu_dma_map_ops(coherent);
2390 else
2391 dma_ops = arm_get_dma_map_ops(coherent);
2392
2393 set_dma_ops(dev, dma_ops);
2394}
2395
2396void arch_teardown_dma_ops(struct device *dev)
2397{
2398 arm_teardown_iommu_dma_ops(dev);
2399}