]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm/mm/dma-mapping.c
ARM: 7628/1: head.S: map one extra section for the ATAG/DTB area
[mirror_ubuntu-bionic-kernel.git] / arch / arm / mm / dma-mapping.c
CommitLineData
1da177e4 1/*
0ddbccd1 2 * linux/arch/arm/mm/dma-mapping.c
1da177e4
LT
3 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
5a0e3ad6 14#include <linux/gfp.h>
1da177e4
LT
15#include <linux/errno.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
c7909509 20#include <linux/dma-contiguous.h>
39af22a7 21#include <linux/highmem.h>
c7909509 22#include <linux/memblock.h>
99d1717d 23#include <linux/slab.h>
4ce63fcd 24#include <linux/iommu.h>
e9da6e99 25#include <linux/io.h>
4ce63fcd 26#include <linux/vmalloc.h>
158e8bfe 27#include <linux/sizes.h>
1da177e4 28
23759dc6 29#include <asm/memory.h>
43377453 30#include <asm/highmem.h>
1da177e4 31#include <asm/cacheflush.h>
1da177e4 32#include <asm/tlbflush.h>
99d1717d 33#include <asm/mach/arch.h>
4ce63fcd 34#include <asm/dma-iommu.h>
c7909509
MS
35#include <asm/mach/map.h>
36#include <asm/system_info.h>
37#include <asm/dma-contiguous.h>
37134cd5 38
022ae537
RK
39#include "mm.h"
40
15237e1f
MS
41/*
42 * The DMA API is built upon the notion of "buffer ownership". A buffer
43 * is either exclusively owned by the CPU (and therefore may be accessed
44 * by it) or exclusively owned by the DMA device. These helper functions
45 * represent the transitions between these two ownership states.
46 *
47 * Note, however, that on later ARMs, this notion does not work due to
48 * speculative prefetches. We model our approach on the assumption that
49 * the CPU does do speculative prefetches, which means we clean caches
50 * before transfers and delay cache invalidation until transfer completion.
51 *
15237e1f 52 */
51fde349 53static void __dma_page_cpu_to_dev(struct page *, unsigned long,
15237e1f 54 size_t, enum dma_data_direction);
51fde349 55static void __dma_page_dev_to_cpu(struct page *, unsigned long,
15237e1f
MS
56 size_t, enum dma_data_direction);
57
2dc6a016
MS
58/**
59 * arm_dma_map_page - map a portion of a page for streaming DMA
60 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
61 * @page: page that buffer resides in
62 * @offset: offset into page for start of buffer
63 * @size: size of buffer to map
64 * @dir: DMA transfer direction
65 *
66 * Ensure that any data held in the cache is appropriately discarded
67 * or written back.
68 *
69 * The device owns this memory once this call has completed. The CPU
70 * can regain ownership by calling dma_unmap_page().
71 */
51fde349 72static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
2dc6a016
MS
73 unsigned long offset, size_t size, enum dma_data_direction dir,
74 struct dma_attrs *attrs)
75{
dd37e940 76 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
51fde349
MS
77 __dma_page_cpu_to_dev(page, offset, size, dir);
78 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
2dc6a016
MS
79}
80
dd37e940
RH
81static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
82 unsigned long offset, size_t size, enum dma_data_direction dir,
83 struct dma_attrs *attrs)
84{
85 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
86}
87
2dc6a016
MS
88/**
89 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
90 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
91 * @handle: DMA address of buffer
92 * @size: size of buffer (same as passed to dma_map_page)
93 * @dir: DMA transfer direction (same as passed to dma_map_page)
94 *
95 * Unmap a page streaming mode DMA translation. The handle and size
96 * must match what was provided in the previous dma_map_page() call.
97 * All other usages are undefined.
98 *
99 * After this call, reads by the CPU to the buffer are guaranteed to see
100 * whatever the device wrote there.
101 */
51fde349 102static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
2dc6a016
MS
103 size_t size, enum dma_data_direction dir,
104 struct dma_attrs *attrs)
105{
dd37e940 106 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
51fde349
MS
107 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
108 handle & ~PAGE_MASK, size, dir);
2dc6a016
MS
109}
110
51fde349 111static void arm_dma_sync_single_for_cpu(struct device *dev,
2dc6a016
MS
112 dma_addr_t handle, size_t size, enum dma_data_direction dir)
113{
114 unsigned int offset = handle & (PAGE_SIZE - 1);
115 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
dd37e940 116 __dma_page_dev_to_cpu(page, offset, size, dir);
2dc6a016
MS
117}
118
51fde349 119static void arm_dma_sync_single_for_device(struct device *dev,
2dc6a016
MS
120 dma_addr_t handle, size_t size, enum dma_data_direction dir)
121{
122 unsigned int offset = handle & (PAGE_SIZE - 1);
123 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
dd37e940 124 __dma_page_cpu_to_dev(page, offset, size, dir);
2dc6a016
MS
125}
126
2dc6a016 127struct dma_map_ops arm_dma_ops = {
f99d6034
MS
128 .alloc = arm_dma_alloc,
129 .free = arm_dma_free,
130 .mmap = arm_dma_mmap,
dc2832e1 131 .get_sgtable = arm_dma_get_sgtable,
2dc6a016
MS
132 .map_page = arm_dma_map_page,
133 .unmap_page = arm_dma_unmap_page,
134 .map_sg = arm_dma_map_sg,
135 .unmap_sg = arm_dma_unmap_sg,
136 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
137 .sync_single_for_device = arm_dma_sync_single_for_device,
138 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
139 .sync_sg_for_device = arm_dma_sync_sg_for_device,
140 .set_dma_mask = arm_dma_set_mask,
141};
142EXPORT_SYMBOL(arm_dma_ops);
143
dd37e940
RH
144static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
145 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
146static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
147 dma_addr_t handle, struct dma_attrs *attrs);
148
149struct dma_map_ops arm_coherent_dma_ops = {
150 .alloc = arm_coherent_dma_alloc,
151 .free = arm_coherent_dma_free,
152 .mmap = arm_dma_mmap,
153 .get_sgtable = arm_dma_get_sgtable,
154 .map_page = arm_coherent_dma_map_page,
155 .map_sg = arm_dma_map_sg,
156 .set_dma_mask = arm_dma_set_mask,
157};
158EXPORT_SYMBOL(arm_coherent_dma_ops);
159
ab6494f0
CM
160static u64 get_coherent_dma_mask(struct device *dev)
161{
022ae537 162 u64 mask = (u64)arm_dma_limit;
ab6494f0
CM
163
164 if (dev) {
165 mask = dev->coherent_dma_mask;
166
167 /*
168 * Sanity check the DMA mask - it must be non-zero, and
169 * must be able to be satisfied by a DMA allocation.
170 */
171 if (mask == 0) {
172 dev_warn(dev, "coherent DMA mask is unset\n");
173 return 0;
174 }
175
022ae537 176 if ((~mask) & (u64)arm_dma_limit) {
ab6494f0
CM
177 dev_warn(dev, "coherent DMA mask %#llx is smaller "
178 "than system GFP_DMA mask %#llx\n",
022ae537 179 mask, (u64)arm_dma_limit);
ab6494f0
CM
180 return 0;
181 }
182 }
1da177e4 183
ab6494f0
CM
184 return mask;
185}
186
c7909509
MS
187static void __dma_clear_buffer(struct page *page, size_t size)
188{
189 void *ptr;
190 /*
191 * Ensure that the allocated pages are zeroed, and that any data
192 * lurking in the kernel direct-mapped region is invalidated.
193 */
194 ptr = page_address(page);
4ce63fcd
MS
195 if (ptr) {
196 memset(ptr, 0, size);
197 dmac_flush_range(ptr, ptr + size);
198 outer_flush_range(__pa(ptr), __pa(ptr) + size);
199 }
c7909509
MS
200}
201
7a9a32a9
RK
202/*
203 * Allocate a DMA buffer for 'dev' of size 'size' using the
204 * specified gfp mask. Note that 'size' must be page aligned.
205 */
206static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
207{
208 unsigned long order = get_order(size);
209 struct page *page, *p, *e;
7a9a32a9
RK
210
211 page = alloc_pages(gfp, order);
212 if (!page)
213 return NULL;
214
215 /*
216 * Now split the huge page and free the excess pages
217 */
218 split_page(page, order);
219 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
220 __free_page(p);
221
c7909509 222 __dma_clear_buffer(page, size);
7a9a32a9
RK
223
224 return page;
225}
226
227/*
228 * Free a DMA buffer. 'size' must be page aligned.
229 */
230static void __dma_free_buffer(struct page *page, size_t size)
231{
232 struct page *e = page + (size >> PAGE_SHIFT);
233
234 while (page < e) {
235 __free_page(page);
236 page++;
237 }
238}
239
ab6494f0 240#ifdef CONFIG_MMU
e9da6e99
MS
241#ifdef CONFIG_HUGETLB_PAGE
242#error ARM Coherent DMA allocator does not (yet) support huge TLB
243#endif
a5e9d38b 244
e9da6e99
MS
245static void *__alloc_from_contiguous(struct device *dev, size_t size,
246 pgprot_t prot, struct page **ret_page);
99d1717d 247
e9da6e99
MS
248static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
249 pgprot_t prot, struct page **ret_page,
250 const void *caller);
99d1717d 251
e9da6e99
MS
252static void *
253__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
254 const void *caller)
99d1717d 255{
e9da6e99
MS
256 struct vm_struct *area;
257 unsigned long addr;
99d1717d 258
e9da6e99
MS
259 /*
260 * DMA allocation can be mapped to user space, so lets
261 * set VM_USERMAP flags too.
262 */
263 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
264 caller);
265 if (!area)
266 return NULL;
267 addr = (unsigned long)area->addr;
268 area->phys_addr = __pfn_to_phys(page_to_pfn(page));
99d1717d 269
e9da6e99
MS
270 if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
271 vunmap((void *)addr);
272 return NULL;
273 }
274 return (void *)addr;
99d1717d 275}
1da177e4 276
e9da6e99 277static void __dma_free_remap(void *cpu_addr, size_t size)
88c58f3b 278{
e9da6e99
MS
279 unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
280 struct vm_struct *area = find_vm_area(cpu_addr);
281 if (!area || (area->flags & flags) != flags) {
282 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
283 return;
99d1717d 284 }
e9da6e99
MS
285 unmap_kernel_range((unsigned long)cpu_addr, size);
286 vunmap(cpu_addr);
88c58f3b 287}
88c58f3b 288
6e5267aa
MS
289#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
290
e9da6e99
MS
291struct dma_pool {
292 size_t size;
293 spinlock_t lock;
294 unsigned long *bitmap;
295 unsigned long nr_pages;
296 void *vaddr;
6b3fe472 297 struct page **pages;
c7909509
MS
298};
299
e9da6e99 300static struct dma_pool atomic_pool = {
6e5267aa 301 .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
e9da6e99 302};
c7909509
MS
303
304static int __init early_coherent_pool(char *p)
305{
e9da6e99 306 atomic_pool.size = memparse(p, &p);
c7909509
MS
307 return 0;
308}
309early_param("coherent_pool", early_coherent_pool);
310
6e5267aa
MS
311void __init init_dma_coherent_pool_size(unsigned long size)
312{
313 /*
314 * Catch any attempt to set the pool size too late.
315 */
316 BUG_ON(atomic_pool.vaddr);
317
318 /*
319 * Set architecture specific coherent pool size only if
320 * it has not been changed by kernel command line parameter.
321 */
322 if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
323 atomic_pool.size = size;
324}
325
c7909509
MS
326/*
327 * Initialise the coherent pool for atomic allocations.
328 */
e9da6e99 329static int __init atomic_pool_init(void)
c7909509 330{
e9da6e99 331 struct dma_pool *pool = &atomic_pool;
c7909509 332 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
e9da6e99
MS
333 unsigned long nr_pages = pool->size >> PAGE_SHIFT;
334 unsigned long *bitmap;
c7909509 335 struct page *page;
6b3fe472 336 struct page **pages;
c7909509 337 void *ptr;
e9da6e99 338 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
c7909509 339
e9da6e99
MS
340 bitmap = kzalloc(bitmap_size, GFP_KERNEL);
341 if (!bitmap)
342 goto no_bitmap;
c7909509 343
6b3fe472
HD
344 pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
345 if (!pages)
346 goto no_pages;
347
e9da6e99
MS
348 if (IS_ENABLED(CONFIG_CMA))
349 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
350 else
351 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
352 &page, NULL);
c7909509 353 if (ptr) {
6b3fe472
HD
354 int i;
355
356 for (i = 0; i < nr_pages; i++)
357 pages[i] = page + i;
358
e9da6e99
MS
359 spin_lock_init(&pool->lock);
360 pool->vaddr = ptr;
6b3fe472 361 pool->pages = pages;
e9da6e99
MS
362 pool->bitmap = bitmap;
363 pool->nr_pages = nr_pages;
364 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
365 (unsigned)pool->size / 1024);
c7909509
MS
366 return 0;
367 }
ec10665c
SK
368
369 kfree(pages);
6b3fe472 370no_pages:
e9da6e99
MS
371 kfree(bitmap);
372no_bitmap:
373 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
374 (unsigned)pool->size / 1024);
c7909509
MS
375 return -ENOMEM;
376}
377/*
378 * CMA is activated by core_initcall, so we must be called after it.
379 */
e9da6e99 380postcore_initcall(atomic_pool_init);
c7909509
MS
381
382struct dma_contig_early_reserve {
383 phys_addr_t base;
384 unsigned long size;
385};
386
387static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
388
389static int dma_mmu_remap_num __initdata;
390
391void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
392{
393 dma_mmu_remap[dma_mmu_remap_num].base = base;
394 dma_mmu_remap[dma_mmu_remap_num].size = size;
395 dma_mmu_remap_num++;
396}
397
398void __init dma_contiguous_remap(void)
399{
400 int i;
401 for (i = 0; i < dma_mmu_remap_num; i++) {
402 phys_addr_t start = dma_mmu_remap[i].base;
403 phys_addr_t end = start + dma_mmu_remap[i].size;
404 struct map_desc map;
405 unsigned long addr;
406
407 if (end > arm_lowmem_limit)
408 end = arm_lowmem_limit;
409 if (start >= end)
39f78e70 410 continue;
c7909509
MS
411
412 map.pfn = __phys_to_pfn(start);
413 map.virtual = __phys_to_virt(start);
414 map.length = end - start;
415 map.type = MT_MEMORY_DMA_READY;
416
417 /*
418 * Clear previous low-memory mapping
419 */
420 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
61f6c7a4 421 addr += PMD_SIZE)
c7909509
MS
422 pmd_clear(pmd_off_k(addr));
423
424 iotable_init(&map, 1);
425 }
426}
427
c7909509
MS
428static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
429 void *data)
430{
431 struct page *page = virt_to_page(addr);
432 pgprot_t prot = *(pgprot_t *)data;
433
434 set_pte_ext(pte, mk_pte(page, prot), 0);
435 return 0;
436}
437
438static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
439{
440 unsigned long start = (unsigned long) page_address(page);
441 unsigned end = start + size;
442
443 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
444 dsb();
445 flush_tlb_kernel_range(start, end);
446}
447
448static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
449 pgprot_t prot, struct page **ret_page,
450 const void *caller)
451{
452 struct page *page;
453 void *ptr;
454 page = __dma_alloc_buffer(dev, size, gfp);
455 if (!page)
456 return NULL;
457
458 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
459 if (!ptr) {
460 __dma_free_buffer(page, size);
461 return NULL;
462 }
463
464 *ret_page = page;
465 return ptr;
466}
467
e9da6e99 468static void *__alloc_from_pool(size_t size, struct page **ret_page)
c7909509 469{
e9da6e99
MS
470 struct dma_pool *pool = &atomic_pool;
471 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
472 unsigned int pageno;
473 unsigned long flags;
474 void *ptr = NULL;
e4ea6918 475 unsigned long align_mask;
c7909509 476
e9da6e99
MS
477 if (!pool->vaddr) {
478 WARN(1, "coherent pool not initialised!\n");
c7909509
MS
479 return NULL;
480 }
481
482 /*
483 * Align the region allocation - allocations from pool are rather
484 * small, so align them to their order in pages, minimum is a page
485 * size. This helps reduce fragmentation of the DMA space.
486 */
e4ea6918 487 align_mask = (1 << get_order(size)) - 1;
e9da6e99
MS
488
489 spin_lock_irqsave(&pool->lock, flags);
490 pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
e4ea6918 491 0, count, align_mask);
e9da6e99
MS
492 if (pageno < pool->nr_pages) {
493 bitmap_set(pool->bitmap, pageno, count);
494 ptr = pool->vaddr + PAGE_SIZE * pageno;
6b3fe472 495 *ret_page = pool->pages[pageno];
fb71285f
MS
496 } else {
497 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
498 "Please increase it with coherent_pool= kernel parameter!\n",
499 (unsigned)pool->size / 1024);
c7909509 500 }
e9da6e99
MS
501 spin_unlock_irqrestore(&pool->lock, flags);
502
503 return ptr;
c7909509
MS
504}
505
21d0a759
HD
506static bool __in_atomic_pool(void *start, size_t size)
507{
508 struct dma_pool *pool = &atomic_pool;
509 void *end = start + size;
510 void *pool_start = pool->vaddr;
511 void *pool_end = pool->vaddr + pool->size;
512
f3d87524 513 if (start < pool_start || start >= pool_end)
21d0a759
HD
514 return false;
515
516 if (end <= pool_end)
517 return true;
518
519 WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
520 start, end - 1, pool_start, pool_end - 1);
521
522 return false;
523}
524
e9da6e99 525static int __free_from_pool(void *start, size_t size)
c7909509 526{
e9da6e99
MS
527 struct dma_pool *pool = &atomic_pool;
528 unsigned long pageno, count;
529 unsigned long flags;
c7909509 530
21d0a759 531 if (!__in_atomic_pool(start, size))
c7909509
MS
532 return 0;
533
e9da6e99
MS
534 pageno = (start - pool->vaddr) >> PAGE_SHIFT;
535 count = size >> PAGE_SHIFT;
536
537 spin_lock_irqsave(&pool->lock, flags);
538 bitmap_clear(pool->bitmap, pageno, count);
539 spin_unlock_irqrestore(&pool->lock, flags);
540
c7909509
MS
541 return 1;
542}
543
544static void *__alloc_from_contiguous(struct device *dev, size_t size,
545 pgprot_t prot, struct page **ret_page)
546{
547 unsigned long order = get_order(size);
548 size_t count = size >> PAGE_SHIFT;
549 struct page *page;
550
551 page = dma_alloc_from_contiguous(dev, count, order);
552 if (!page)
553 return NULL;
554
555 __dma_clear_buffer(page, size);
556 __dma_remap(page, size, prot);
557
558 *ret_page = page;
559 return page_address(page);
560}
561
562static void __free_from_contiguous(struct device *dev, struct page *page,
563 size_t size)
564{
565 __dma_remap(page, size, pgprot_kernel);
566 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
567}
568
f99d6034
MS
569static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
570{
571 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
572 pgprot_writecombine(prot) :
573 pgprot_dmacoherent(prot);
574 return prot;
575}
576
c7909509
MS
577#define nommu() 0
578
ab6494f0 579#else /* !CONFIG_MMU */
695ae0af 580
c7909509
MS
581#define nommu() 1
582
f99d6034 583#define __get_dma_pgprot(attrs, prot) __pgprot(0)
c7909509 584#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
e9da6e99 585#define __alloc_from_pool(size, ret_page) NULL
c7909509
MS
586#define __alloc_from_contiguous(dev, size, prot, ret) NULL
587#define __free_from_pool(cpu_addr, size) 0
588#define __free_from_contiguous(dev, page, size) do { } while (0)
589#define __dma_free_remap(cpu_addr, size) do { } while (0)
31ebf944
RK
590
591#endif /* CONFIG_MMU */
592
c7909509
MS
593static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
594 struct page **ret_page)
ab6494f0 595{
c7909509
MS
596 struct page *page;
597 page = __dma_alloc_buffer(dev, size, gfp);
598 if (!page)
599 return NULL;
600
601 *ret_page = page;
602 return page_address(page);
603}
604
605
606
607static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
dd37e940 608 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
c7909509
MS
609{
610 u64 mask = get_coherent_dma_mask(dev);
3dd7ea92 611 struct page *page = NULL;
31ebf944 612 void *addr;
ab6494f0 613
c7909509
MS
614#ifdef CONFIG_DMA_API_DEBUG
615 u64 limit = (mask + 1) & ~mask;
616 if (limit && size >= limit) {
617 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
618 size, mask);
619 return NULL;
620 }
621#endif
622
623 if (!mask)
624 return NULL;
625
626 if (mask < 0xffffffffULL)
627 gfp |= GFP_DMA;
628
ea2e7057
SB
629 /*
630 * Following is a work-around (a.k.a. hack) to prevent pages
631 * with __GFP_COMP being passed to split_page() which cannot
632 * handle them. The real problem is that this flag probably
633 * should be 0 on ARM as it is not supported on this
634 * platform; see CONFIG_HUGETLBFS.
635 */
636 gfp &= ~(__GFP_COMP);
637
553ac788 638 *handle = DMA_ERROR_CODE;
04da5694 639 size = PAGE_ALIGN(size);
ab6494f0 640
dd37e940 641 if (is_coherent || nommu())
c7909509 642 addr = __alloc_simple_buffer(dev, size, gfp, &page);
e9da6e99
MS
643 else if (gfp & GFP_ATOMIC)
644 addr = __alloc_from_pool(size, &page);
f1ae98da 645 else if (!IS_ENABLED(CONFIG_CMA))
c7909509 646 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
31ebf944 647 else
c7909509 648 addr = __alloc_from_contiguous(dev, size, prot, &page);
695ae0af 649
31ebf944 650 if (addr)
9eedd963 651 *handle = pfn_to_dma(dev, page_to_pfn(page));
695ae0af 652
31ebf944
RK
653 return addr;
654}
1da177e4
LT
655
656/*
657 * Allocate DMA-coherent memory space and return both the kernel remapped
658 * virtual and bus address for that space.
659 */
f99d6034
MS
660void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
661 gfp_t gfp, struct dma_attrs *attrs)
1da177e4 662{
f99d6034 663 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
1fe53268
DES
664 void *memory;
665
666 if (dma_alloc_from_coherent(dev, size, handle, &memory))
667 return memory;
668
dd37e940
RH
669 return __dma_alloc(dev, size, handle, gfp, prot, false,
670 __builtin_return_address(0));
671}
672
673static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
674 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
675{
676 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
677 void *memory;
678
679 if (dma_alloc_from_coherent(dev, size, handle, &memory))
680 return memory;
681
682 return __dma_alloc(dev, size, handle, gfp, prot, true,
45cd5290 683 __builtin_return_address(0));
1da177e4 684}
1da177e4
LT
685
686/*
f99d6034 687 * Create userspace mapping for the DMA-coherent memory.
1da177e4 688 */
f99d6034
MS
689int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
690 void *cpu_addr, dma_addr_t dma_addr, size_t size,
691 struct dma_attrs *attrs)
1da177e4 692{
ab6494f0
CM
693 int ret = -ENXIO;
694#ifdef CONFIG_MMU
50262a4b
MS
695 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
696 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
c7909509 697 unsigned long pfn = dma_to_pfn(dev, dma_addr);
50262a4b
MS
698 unsigned long off = vma->vm_pgoff;
699
f99d6034
MS
700 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
701
47142f07
MS
702 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
703 return ret;
704
50262a4b
MS
705 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
706 ret = remap_pfn_range(vma, vma->vm_start,
707 pfn + off,
708 vma->vm_end - vma->vm_start,
709 vma->vm_page_prot);
710 }
ab6494f0 711#endif /* CONFIG_MMU */
1da177e4
LT
712
713 return ret;
714}
715
1da177e4 716/*
c7909509 717 * Free a buffer as defined by the above mapping.
1da177e4 718 */
dd37e940
RH
719static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
720 dma_addr_t handle, struct dma_attrs *attrs,
721 bool is_coherent)
1da177e4 722{
c7909509 723 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
5edf71ae 724
1fe53268
DES
725 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
726 return;
727
3e82d012
RK
728 size = PAGE_ALIGN(size);
729
dd37e940 730 if (is_coherent || nommu()) {
c7909509 731 __dma_free_buffer(page, size);
d9e0d149
AK
732 } else if (__free_from_pool(cpu_addr, size)) {
733 return;
f1ae98da 734 } else if (!IS_ENABLED(CONFIG_CMA)) {
695ae0af 735 __dma_free_remap(cpu_addr, size);
c7909509
MS
736 __dma_free_buffer(page, size);
737 } else {
c7909509
MS
738 /*
739 * Non-atomic allocations cannot be freed with IRQs disabled
740 */
741 WARN_ON(irqs_disabled());
742 __free_from_contiguous(dev, page, size);
743 }
1da177e4 744}
afd1a321 745
dd37e940
RH
746void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
747 dma_addr_t handle, struct dma_attrs *attrs)
748{
749 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
750}
751
752static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
753 dma_addr_t handle, struct dma_attrs *attrs)
754{
755 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
756}
757
dc2832e1
MS
758int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
759 void *cpu_addr, dma_addr_t handle, size_t size,
760 struct dma_attrs *attrs)
761{
762 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
763 int ret;
764
765 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
766 if (unlikely(ret))
767 return ret;
768
769 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
770 return 0;
771}
772
4ea0d737 773static void dma_cache_maint_page(struct page *page, unsigned long offset,
a9c9147e
RK
774 size_t size, enum dma_data_direction dir,
775 void (*op)(const void *, size_t, int))
43377453
NP
776{
777 /*
778 * A single sg entry may refer to multiple physically contiguous
779 * pages. But we still need to process highmem pages individually.
780 * If highmem is not configured then the bulk of this loop gets
781 * optimized out.
782 */
783 size_t left = size;
784 do {
785 size_t len = left;
93f1d629
RK
786 void *vaddr;
787
788 if (PageHighMem(page)) {
789 if (len + offset > PAGE_SIZE) {
790 if (offset >= PAGE_SIZE) {
791 page += offset / PAGE_SIZE;
792 offset %= PAGE_SIZE;
793 }
794 len = PAGE_SIZE - offset;
795 }
796 vaddr = kmap_high_get(page);
797 if (vaddr) {
798 vaddr += offset;
a9c9147e 799 op(vaddr, len, dir);
93f1d629 800 kunmap_high(page);
7e5a69e8 801 } else if (cache_is_vipt()) {
39af22a7
NP
802 /* unmapped pages might still be cached */
803 vaddr = kmap_atomic(page);
7e5a69e8 804 op(vaddr + offset, len, dir);
39af22a7 805 kunmap_atomic(vaddr);
43377453 806 }
93f1d629
RK
807 } else {
808 vaddr = page_address(page) + offset;
a9c9147e 809 op(vaddr, len, dir);
43377453 810 }
43377453
NP
811 offset = 0;
812 page++;
813 left -= len;
814 } while (left);
815}
4ea0d737 816
51fde349
MS
817/*
818 * Make an area consistent for devices.
819 * Note: Drivers should NOT use this function directly, as it will break
820 * platforms with CONFIG_DMABOUNCE.
821 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
822 */
823static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
4ea0d737
RK
824 size_t size, enum dma_data_direction dir)
825{
65af191a 826 unsigned long paddr;
65af191a 827
a9c9147e 828 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
65af191a
RK
829
830 paddr = page_to_phys(page) + off;
2ffe2da3
RK
831 if (dir == DMA_FROM_DEVICE) {
832 outer_inv_range(paddr, paddr + size);
833 } else {
834 outer_clean_range(paddr, paddr + size);
835 }
836 /* FIXME: non-speculating: flush on bidirectional mappings? */
4ea0d737 837}
4ea0d737 838
51fde349 839static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
4ea0d737
RK
840 size_t size, enum dma_data_direction dir)
841{
2ffe2da3
RK
842 unsigned long paddr = page_to_phys(page) + off;
843
844 /* FIXME: non-speculating: not required */
845 /* don't bother invalidating if DMA to device */
846 if (dir != DMA_TO_DEVICE)
847 outer_inv_range(paddr, paddr + size);
848
a9c9147e 849 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
c0177800
CM
850
851 /*
852 * Mark the D-cache clean for this page to avoid extra flushing.
853 */
854 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
855 set_bit(PG_dcache_clean, &page->flags);
4ea0d737 856}
43377453 857
afd1a321 858/**
2a550e73 859 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
afd1a321
RK
860 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
861 * @sg: list of buffers
862 * @nents: number of buffers to map
863 * @dir: DMA transfer direction
864 *
865 * Map a set of buffers described by scatterlist in streaming mode for DMA.
866 * This is the scatter-gather version of the dma_map_single interface.
867 * Here the scatter gather list elements are each tagged with the
868 * appropriate dma address and length. They are obtained via
869 * sg_dma_{address,length}.
870 *
871 * Device ownership issues as mentioned for dma_map_single are the same
872 * here.
873 */
2dc6a016
MS
874int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
875 enum dma_data_direction dir, struct dma_attrs *attrs)
afd1a321 876{
2a550e73 877 struct dma_map_ops *ops = get_dma_ops(dev);
afd1a321 878 struct scatterlist *s;
01135d92 879 int i, j;
afd1a321
RK
880
881 for_each_sg(sg, s, nents, i) {
4ce63fcd
MS
882#ifdef CONFIG_NEED_SG_DMA_LENGTH
883 s->dma_length = s->length;
884#endif
2a550e73
MS
885 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
886 s->length, dir, attrs);
01135d92
RK
887 if (dma_mapping_error(dev, s->dma_address))
888 goto bad_mapping;
afd1a321 889 }
afd1a321 890 return nents;
01135d92
RK
891
892 bad_mapping:
893 for_each_sg(sg, s, i, j)
2a550e73 894 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
01135d92 895 return 0;
afd1a321 896}
afd1a321
RK
897
898/**
2a550e73 899 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
afd1a321
RK
900 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
901 * @sg: list of buffers
0adfca6f 902 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
afd1a321
RK
903 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
904 *
905 * Unmap a set of streaming mode DMA translations. Again, CPU access
906 * rules concerning calls here are the same as for dma_unmap_single().
907 */
2dc6a016
MS
908void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
909 enum dma_data_direction dir, struct dma_attrs *attrs)
afd1a321 910{
2a550e73 911 struct dma_map_ops *ops = get_dma_ops(dev);
01135d92 912 struct scatterlist *s;
01135d92 913
01135d92 914 int i;
24056f52 915
01135d92 916 for_each_sg(sg, s, nents, i)
2a550e73 917 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
afd1a321 918}
afd1a321
RK
919
920/**
2a550e73 921 * arm_dma_sync_sg_for_cpu
afd1a321
RK
922 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
923 * @sg: list of buffers
924 * @nents: number of buffers to map (returned from dma_map_sg)
925 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
926 */
2dc6a016 927void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
afd1a321
RK
928 int nents, enum dma_data_direction dir)
929{
2a550e73 930 struct dma_map_ops *ops = get_dma_ops(dev);
afd1a321
RK
931 struct scatterlist *s;
932 int i;
933
2a550e73
MS
934 for_each_sg(sg, s, nents, i)
935 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
936 dir);
afd1a321 937}
afd1a321
RK
938
939/**
2a550e73 940 * arm_dma_sync_sg_for_device
afd1a321
RK
941 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
942 * @sg: list of buffers
943 * @nents: number of buffers to map (returned from dma_map_sg)
944 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
945 */
2dc6a016 946void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
afd1a321
RK
947 int nents, enum dma_data_direction dir)
948{
2a550e73 949 struct dma_map_ops *ops = get_dma_ops(dev);
afd1a321
RK
950 struct scatterlist *s;
951 int i;
952
2a550e73
MS
953 for_each_sg(sg, s, nents, i)
954 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
955 dir);
afd1a321 956}
24056f52 957
022ae537
RK
958/*
959 * Return whether the given device DMA address mask can be supported
960 * properly. For example, if your device can only drive the low 24-bits
961 * during bus mastering, then you would pass 0x00ffffff as the mask
962 * to this function.
963 */
964int dma_supported(struct device *dev, u64 mask)
965{
966 if (mask < (u64)arm_dma_limit)
967 return 0;
968 return 1;
969}
970EXPORT_SYMBOL(dma_supported);
971
87b54e78 972int arm_dma_set_mask(struct device *dev, u64 dma_mask)
022ae537
RK
973{
974 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
975 return -EIO;
976
022ae537 977 *dev->dma_mask = dma_mask;
022ae537
RK
978
979 return 0;
980}
022ae537 981
24056f52
RK
982#define PREALLOC_DMA_DEBUG_ENTRIES 4096
983
984static int __init dma_debug_do_init(void)
985{
986 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
987 return 0;
988}
989fs_initcall(dma_debug_do_init);
4ce63fcd
MS
990
991#ifdef CONFIG_ARM_DMA_USE_IOMMU
992
993/* IOMMU */
994
995static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
996 size_t size)
997{
998 unsigned int order = get_order(size);
999 unsigned int align = 0;
1000 unsigned int count, start;
1001 unsigned long flags;
1002
1003 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
1004 (1 << mapping->order) - 1) >> mapping->order;
1005
1006 if (order > mapping->order)
1007 align = (1 << (order - mapping->order)) - 1;
1008
1009 spin_lock_irqsave(&mapping->lock, flags);
1010 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
1011 count, align);
1012 if (start > mapping->bits) {
1013 spin_unlock_irqrestore(&mapping->lock, flags);
1014 return DMA_ERROR_CODE;
1015 }
1016
1017 bitmap_set(mapping->bitmap, start, count);
1018 spin_unlock_irqrestore(&mapping->lock, flags);
1019
1020 return mapping->base + (start << (mapping->order + PAGE_SHIFT));
1021}
1022
1023static inline void __free_iova(struct dma_iommu_mapping *mapping,
1024 dma_addr_t addr, size_t size)
1025{
1026 unsigned int start = (addr - mapping->base) >>
1027 (mapping->order + PAGE_SHIFT);
1028 unsigned int count = ((size >> PAGE_SHIFT) +
1029 (1 << mapping->order) - 1) >> mapping->order;
1030 unsigned long flags;
1031
1032 spin_lock_irqsave(&mapping->lock, flags);
1033 bitmap_clear(mapping->bitmap, start, count);
1034 spin_unlock_irqrestore(&mapping->lock, flags);
1035}
1036
549a17e4
MS
1037static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1038 gfp_t gfp, struct dma_attrs *attrs)
4ce63fcd
MS
1039{
1040 struct page **pages;
1041 int count = size >> PAGE_SHIFT;
1042 int array_size = count * sizeof(struct page *);
1043 int i = 0;
1044
1045 if (array_size <= PAGE_SIZE)
1046 pages = kzalloc(array_size, gfp);
1047 else
1048 pages = vzalloc(array_size);
1049 if (!pages)
1050 return NULL;
1051
549a17e4
MS
1052 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
1053 {
1054 unsigned long order = get_order(size);
1055 struct page *page;
1056
1057 page = dma_alloc_from_contiguous(dev, count, order);
1058 if (!page)
1059 goto error;
1060
1061 __dma_clear_buffer(page, size);
1062
1063 for (i = 0; i < count; i++)
1064 pages[i] = page + i;
1065
1066 return pages;
1067 }
1068
4ce63fcd 1069 while (count) {
593f4735 1070 int j, order = __fls(count);
4ce63fcd
MS
1071
1072 pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
1073 while (!pages[i] && order)
1074 pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
1075 if (!pages[i])
1076 goto error;
1077
5a796eeb 1078 if (order) {
4ce63fcd 1079 split_page(pages[i], order);
5a796eeb
HD
1080 j = 1 << order;
1081 while (--j)
1082 pages[i + j] = pages[i] + j;
1083 }
4ce63fcd
MS
1084
1085 __dma_clear_buffer(pages[i], PAGE_SIZE << order);
1086 i += 1 << order;
1087 count -= 1 << order;
1088 }
1089
1090 return pages;
1091error:
9fa8af91 1092 while (i--)
4ce63fcd
MS
1093 if (pages[i])
1094 __free_pages(pages[i], 0);
46c87852 1095 if (array_size <= PAGE_SIZE)
4ce63fcd
MS
1096 kfree(pages);
1097 else
1098 vfree(pages);
1099 return NULL;
1100}
1101
549a17e4
MS
1102static int __iommu_free_buffer(struct device *dev, struct page **pages,
1103 size_t size, struct dma_attrs *attrs)
4ce63fcd
MS
1104{
1105 int count = size >> PAGE_SHIFT;
1106 int array_size = count * sizeof(struct page *);
1107 int i;
549a17e4
MS
1108
1109 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
1110 dma_release_from_contiguous(dev, pages[0], count);
1111 } else {
1112 for (i = 0; i < count; i++)
1113 if (pages[i])
1114 __free_pages(pages[i], 0);
1115 }
1116
46c87852 1117 if (array_size <= PAGE_SIZE)
4ce63fcd
MS
1118 kfree(pages);
1119 else
1120 vfree(pages);
1121 return 0;
1122}
1123
1124/*
1125 * Create a CPU mapping for a specified pages
1126 */
1127static void *
e9da6e99
MS
1128__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1129 const void *caller)
4ce63fcd 1130{
e9da6e99
MS
1131 unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1132 struct vm_struct *area;
1133 unsigned long p;
4ce63fcd 1134
e9da6e99
MS
1135 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
1136 caller);
1137 if (!area)
4ce63fcd 1138 return NULL;
4ce63fcd 1139
e9da6e99
MS
1140 area->pages = pages;
1141 area->nr_pages = nr_pages;
1142 p = (unsigned long)area->addr;
4ce63fcd 1143
e9da6e99
MS
1144 for (i = 0; i < nr_pages; i++) {
1145 phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
1146 if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
1147 goto err;
1148 p += PAGE_SIZE;
4ce63fcd 1149 }
e9da6e99
MS
1150 return area->addr;
1151err:
1152 unmap_kernel_range((unsigned long)area->addr, size);
1153 vunmap(area->addr);
4ce63fcd
MS
1154 return NULL;
1155}
1156
1157/*
1158 * Create a mapping in device IO address space for specified pages
1159 */
1160static dma_addr_t
1161__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1162{
1163 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1164 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1165 dma_addr_t dma_addr, iova;
1166 int i, ret = DMA_ERROR_CODE;
1167
1168 dma_addr = __alloc_iova(mapping, size);
1169 if (dma_addr == DMA_ERROR_CODE)
1170 return dma_addr;
1171
1172 iova = dma_addr;
1173 for (i = 0; i < count; ) {
1174 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1175 phys_addr_t phys = page_to_phys(pages[i]);
1176 unsigned int len, j;
1177
1178 for (j = i + 1; j < count; j++, next_pfn++)
1179 if (page_to_pfn(pages[j]) != next_pfn)
1180 break;
1181
1182 len = (j - i) << PAGE_SHIFT;
1183 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1184 if (ret < 0)
1185 goto fail;
1186 iova += len;
1187 i = j;
1188 }
1189 return dma_addr;
1190fail:
1191 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1192 __free_iova(mapping, dma_addr, size);
1193 return DMA_ERROR_CODE;
1194}
1195
1196static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1197{
1198 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1199
1200 /*
1201 * add optional in-page offset from iova to size and align
1202 * result to page size
1203 */
1204 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1205 iova &= PAGE_MASK;
1206
1207 iommu_unmap(mapping->domain, iova, size);
1208 __free_iova(mapping, iova, size);
1209 return 0;
1210}
1211
665bad7b
HD
1212static struct page **__atomic_get_pages(void *addr)
1213{
1214 struct dma_pool *pool = &atomic_pool;
1215 struct page **pages = pool->pages;
1216 int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
1217
1218 return pages + offs;
1219}
1220
955c757e 1221static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
e9da6e99
MS
1222{
1223 struct vm_struct *area;
1224
665bad7b
HD
1225 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1226 return __atomic_get_pages(cpu_addr);
1227
955c757e
MS
1228 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1229 return cpu_addr;
1230
e9da6e99
MS
1231 area = find_vm_area(cpu_addr);
1232 if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1233 return area->pages;
1234 return NULL;
1235}
1236
479ed93a
HD
1237static void *__iommu_alloc_atomic(struct device *dev, size_t size,
1238 dma_addr_t *handle)
1239{
1240 struct page *page;
1241 void *addr;
1242
1243 addr = __alloc_from_pool(size, &page);
1244 if (!addr)
1245 return NULL;
1246
1247 *handle = __iommu_create_mapping(dev, &page, size);
1248 if (*handle == DMA_ERROR_CODE)
1249 goto err_mapping;
1250
1251 return addr;
1252
1253err_mapping:
1254 __free_from_pool(addr, size);
1255 return NULL;
1256}
1257
1258static void __iommu_free_atomic(struct device *dev, struct page **pages,
1259 dma_addr_t handle, size_t size)
1260{
1261 __iommu_remove_mapping(dev, handle, size);
1262 __free_from_pool(page_address(pages[0]), size);
1263}
1264
4ce63fcd
MS
1265static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1266 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
1267{
1268 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
1269 struct page **pages;
1270 void *addr = NULL;
1271
1272 *handle = DMA_ERROR_CODE;
1273 size = PAGE_ALIGN(size);
1274
479ed93a
HD
1275 if (gfp & GFP_ATOMIC)
1276 return __iommu_alloc_atomic(dev, size, handle);
1277
549a17e4 1278 pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
4ce63fcd
MS
1279 if (!pages)
1280 return NULL;
1281
1282 *handle = __iommu_create_mapping(dev, pages, size);
1283 if (*handle == DMA_ERROR_CODE)
1284 goto err_buffer;
1285
955c757e
MS
1286 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1287 return pages;
1288
e9da6e99
MS
1289 addr = __iommu_alloc_remap(pages, size, gfp, prot,
1290 __builtin_return_address(0));
4ce63fcd
MS
1291 if (!addr)
1292 goto err_mapping;
1293
1294 return addr;
1295
1296err_mapping:
1297 __iommu_remove_mapping(dev, *handle, size);
1298err_buffer:
549a17e4 1299 __iommu_free_buffer(dev, pages, size, attrs);
4ce63fcd
MS
1300 return NULL;
1301}
1302
1303static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1304 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1305 struct dma_attrs *attrs)
1306{
e9da6e99
MS
1307 unsigned long uaddr = vma->vm_start;
1308 unsigned long usize = vma->vm_end - vma->vm_start;
955c757e 1309 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
4ce63fcd
MS
1310
1311 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
4ce63fcd 1312
e9da6e99
MS
1313 if (!pages)
1314 return -ENXIO;
4ce63fcd 1315
e9da6e99
MS
1316 do {
1317 int ret = vm_insert_page(vma, uaddr, *pages++);
1318 if (ret) {
1319 pr_err("Remapping memory failed: %d\n", ret);
1320 return ret;
1321 }
1322 uaddr += PAGE_SIZE;
1323 usize -= PAGE_SIZE;
1324 } while (usize > 0);
4ce63fcd 1325
4ce63fcd
MS
1326 return 0;
1327}
1328
1329/*
1330 * free a page as defined by the above mapping.
1331 * Must not be called with IRQs disabled.
1332 */
1333void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1334 dma_addr_t handle, struct dma_attrs *attrs)
1335{
955c757e 1336 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
4ce63fcd
MS
1337 size = PAGE_ALIGN(size);
1338
e9da6e99
MS
1339 if (!pages) {
1340 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1341 return;
4ce63fcd 1342 }
e9da6e99 1343
479ed93a
HD
1344 if (__in_atomic_pool(cpu_addr, size)) {
1345 __iommu_free_atomic(dev, pages, handle, size);
1346 return;
1347 }
1348
955c757e
MS
1349 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
1350 unmap_kernel_range((unsigned long)cpu_addr, size);
1351 vunmap(cpu_addr);
1352 }
e9da6e99
MS
1353
1354 __iommu_remove_mapping(dev, handle, size);
549a17e4 1355 __iommu_free_buffer(dev, pages, size, attrs);
4ce63fcd
MS
1356}
1357
dc2832e1
MS
1358static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1359 void *cpu_addr, dma_addr_t dma_addr,
1360 size_t size, struct dma_attrs *attrs)
1361{
1362 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1363 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1364
1365 if (!pages)
1366 return -ENXIO;
1367
1368 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1369 GFP_KERNEL);
4ce63fcd
MS
1370}
1371
1372/*
1373 * Map a part of the scatter-gather list into contiguous io address space
1374 */
1375static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1376 size_t size, dma_addr_t *handle,
0fa478df
RH
1377 enum dma_data_direction dir, struct dma_attrs *attrs,
1378 bool is_coherent)
4ce63fcd
MS
1379{
1380 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1381 dma_addr_t iova, iova_base;
1382 int ret = 0;
1383 unsigned int count;
1384 struct scatterlist *s;
1385
1386 size = PAGE_ALIGN(size);
1387 *handle = DMA_ERROR_CODE;
1388
1389 iova_base = iova = __alloc_iova(mapping, size);
1390 if (iova == DMA_ERROR_CODE)
1391 return -ENOMEM;
1392
1393 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1394 phys_addr_t phys = page_to_phys(sg_page(s));
1395 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1396
0fa478df
RH
1397 if (!is_coherent &&
1398 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
4ce63fcd
MS
1399 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1400
1401 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1402 if (ret < 0)
1403 goto fail;
1404 count += len >> PAGE_SHIFT;
1405 iova += len;
1406 }
1407 *handle = iova_base;
1408
1409 return 0;
1410fail:
1411 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1412 __free_iova(mapping, iova_base, size);
1413 return ret;
1414}
1415
0fa478df
RH
1416static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1417 enum dma_data_direction dir, struct dma_attrs *attrs,
1418 bool is_coherent)
4ce63fcd
MS
1419{
1420 struct scatterlist *s = sg, *dma = sg, *start = sg;
1421 int i, count = 0;
1422 unsigned int offset = s->offset;
1423 unsigned int size = s->offset + s->length;
1424 unsigned int max = dma_get_max_seg_size(dev);
1425
1426 for (i = 1; i < nents; i++) {
1427 s = sg_next(s);
1428
1429 s->dma_address = DMA_ERROR_CODE;
1430 s->dma_length = 0;
1431
1432 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1433 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
0fa478df 1434 dir, attrs, is_coherent) < 0)
4ce63fcd
MS
1435 goto bad_mapping;
1436
1437 dma->dma_address += offset;
1438 dma->dma_length = size - offset;
1439
1440 size = offset = s->offset;
1441 start = s;
1442 dma = sg_next(dma);
1443 count += 1;
1444 }
1445 size += s->length;
1446 }
0fa478df
RH
1447 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1448 is_coherent) < 0)
4ce63fcd
MS
1449 goto bad_mapping;
1450
1451 dma->dma_address += offset;
1452 dma->dma_length = size - offset;
1453
1454 return count+1;
1455
1456bad_mapping:
1457 for_each_sg(sg, s, count, i)
1458 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1459 return 0;
1460}
1461
1462/**
0fa478df 1463 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
4ce63fcd
MS
1464 * @dev: valid struct device pointer
1465 * @sg: list of buffers
0fa478df
RH
1466 * @nents: number of buffers to map
1467 * @dir: DMA transfer direction
4ce63fcd 1468 *
0fa478df
RH
1469 * Map a set of i/o coherent buffers described by scatterlist in streaming
1470 * mode for DMA. The scatter gather list elements are merged together (if
1471 * possible) and tagged with the appropriate dma address and length. They are
1472 * obtained via sg_dma_{address,length}.
4ce63fcd 1473 */
0fa478df
RH
1474int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1475 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1476{
1477 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1478}
1479
1480/**
1481 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1482 * @dev: valid struct device pointer
1483 * @sg: list of buffers
1484 * @nents: number of buffers to map
1485 * @dir: DMA transfer direction
1486 *
1487 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1488 * The scatter gather list elements are merged together (if possible) and
1489 * tagged with the appropriate dma address and length. They are obtained via
1490 * sg_dma_{address,length}.
1491 */
1492int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1493 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1494{
1495 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1496}
1497
1498static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1499 int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
1500 bool is_coherent)
4ce63fcd
MS
1501{
1502 struct scatterlist *s;
1503 int i;
1504
1505 for_each_sg(sg, s, nents, i) {
1506 if (sg_dma_len(s))
1507 __iommu_remove_mapping(dev, sg_dma_address(s),
1508 sg_dma_len(s));
0fa478df 1509 if (!is_coherent &&
97ef952a 1510 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
4ce63fcd
MS
1511 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1512 s->length, dir);
1513 }
1514}
1515
0fa478df
RH
1516/**
1517 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1518 * @dev: valid struct device pointer
1519 * @sg: list of buffers
1520 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1521 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1522 *
1523 * Unmap a set of streaming mode DMA translations. Again, CPU access
1524 * rules concerning calls here are the same as for dma_unmap_single().
1525 */
1526void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1527 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1528{
1529 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1530}
1531
1532/**
1533 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1534 * @dev: valid struct device pointer
1535 * @sg: list of buffers
1536 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1537 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1538 *
1539 * Unmap a set of streaming mode DMA translations. Again, CPU access
1540 * rules concerning calls here are the same as for dma_unmap_single().
1541 */
1542void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1543 enum dma_data_direction dir, struct dma_attrs *attrs)
1544{
1545 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1546}
1547
4ce63fcd
MS
1548/**
1549 * arm_iommu_sync_sg_for_cpu
1550 * @dev: valid struct device pointer
1551 * @sg: list of buffers
1552 * @nents: number of buffers to map (returned from dma_map_sg)
1553 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1554 */
1555void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1556 int nents, enum dma_data_direction dir)
1557{
1558 struct scatterlist *s;
1559 int i;
1560
1561 for_each_sg(sg, s, nents, i)
0fa478df 1562 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
4ce63fcd
MS
1563
1564}
1565
1566/**
1567 * arm_iommu_sync_sg_for_device
1568 * @dev: valid struct device pointer
1569 * @sg: list of buffers
1570 * @nents: number of buffers to map (returned from dma_map_sg)
1571 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1572 */
1573void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1574 int nents, enum dma_data_direction dir)
1575{
1576 struct scatterlist *s;
1577 int i;
1578
1579 for_each_sg(sg, s, nents, i)
0fa478df 1580 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
4ce63fcd
MS
1581}
1582
1583
1584/**
0fa478df 1585 * arm_coherent_iommu_map_page
4ce63fcd
MS
1586 * @dev: valid struct device pointer
1587 * @page: page that buffer resides in
1588 * @offset: offset into page for start of buffer
1589 * @size: size of buffer to map
1590 * @dir: DMA transfer direction
1591 *
0fa478df 1592 * Coherent IOMMU aware version of arm_dma_map_page()
4ce63fcd 1593 */
0fa478df 1594static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
4ce63fcd
MS
1595 unsigned long offset, size_t size, enum dma_data_direction dir,
1596 struct dma_attrs *attrs)
1597{
1598 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1599 dma_addr_t dma_addr;
1600 int ret, len = PAGE_ALIGN(size + offset);
1601
4ce63fcd
MS
1602 dma_addr = __alloc_iova(mapping, len);
1603 if (dma_addr == DMA_ERROR_CODE)
1604 return dma_addr;
1605
1606 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
1607 if (ret < 0)
1608 goto fail;
1609
1610 return dma_addr + offset;
1611fail:
1612 __free_iova(mapping, dma_addr, len);
1613 return DMA_ERROR_CODE;
1614}
1615
0fa478df
RH
1616/**
1617 * arm_iommu_map_page
1618 * @dev: valid struct device pointer
1619 * @page: page that buffer resides in
1620 * @offset: offset into page for start of buffer
1621 * @size: size of buffer to map
1622 * @dir: DMA transfer direction
1623 *
1624 * IOMMU aware version of arm_dma_map_page()
1625 */
1626static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1627 unsigned long offset, size_t size, enum dma_data_direction dir,
1628 struct dma_attrs *attrs)
1629{
1630 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1631 __dma_page_cpu_to_dev(page, offset, size, dir);
1632
1633 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1634}
1635
1636/**
1637 * arm_coherent_iommu_unmap_page
1638 * @dev: valid struct device pointer
1639 * @handle: DMA address of buffer
1640 * @size: size of buffer (same as passed to dma_map_page)
1641 * @dir: DMA transfer direction (same as passed to dma_map_page)
1642 *
1643 * Coherent IOMMU aware version of arm_dma_unmap_page()
1644 */
1645static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1646 size_t size, enum dma_data_direction dir,
1647 struct dma_attrs *attrs)
1648{
1649 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1650 dma_addr_t iova = handle & PAGE_MASK;
0fa478df
RH
1651 int offset = handle & ~PAGE_MASK;
1652 int len = PAGE_ALIGN(size + offset);
1653
1654 if (!iova)
1655 return;
1656
1657 iommu_unmap(mapping->domain, iova, len);
1658 __free_iova(mapping, iova, len);
1659}
1660
4ce63fcd
MS
1661/**
1662 * arm_iommu_unmap_page
1663 * @dev: valid struct device pointer
1664 * @handle: DMA address of buffer
1665 * @size: size of buffer (same as passed to dma_map_page)
1666 * @dir: DMA transfer direction (same as passed to dma_map_page)
1667 *
1668 * IOMMU aware version of arm_dma_unmap_page()
1669 */
1670static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1671 size_t size, enum dma_data_direction dir,
1672 struct dma_attrs *attrs)
1673{
1674 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1675 dma_addr_t iova = handle & PAGE_MASK;
1676 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1677 int offset = handle & ~PAGE_MASK;
1678 int len = PAGE_ALIGN(size + offset);
1679
1680 if (!iova)
1681 return;
1682
0fa478df 1683 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
4ce63fcd
MS
1684 __dma_page_dev_to_cpu(page, offset, size, dir);
1685
1686 iommu_unmap(mapping->domain, iova, len);
1687 __free_iova(mapping, iova, len);
1688}
1689
1690static void arm_iommu_sync_single_for_cpu(struct device *dev,
1691 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1692{
1693 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1694 dma_addr_t iova = handle & PAGE_MASK;
1695 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1696 unsigned int offset = handle & ~PAGE_MASK;
1697
1698 if (!iova)
1699 return;
1700
0fa478df 1701 __dma_page_dev_to_cpu(page, offset, size, dir);
4ce63fcd
MS
1702}
1703
1704static void arm_iommu_sync_single_for_device(struct device *dev,
1705 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1706{
1707 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1708 dma_addr_t iova = handle & PAGE_MASK;
1709 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1710 unsigned int offset = handle & ~PAGE_MASK;
1711
1712 if (!iova)
1713 return;
1714
1715 __dma_page_cpu_to_dev(page, offset, size, dir);
1716}
1717
1718struct dma_map_ops iommu_ops = {
1719 .alloc = arm_iommu_alloc_attrs,
1720 .free = arm_iommu_free_attrs,
1721 .mmap = arm_iommu_mmap_attrs,
dc2832e1 1722 .get_sgtable = arm_iommu_get_sgtable,
4ce63fcd
MS
1723
1724 .map_page = arm_iommu_map_page,
1725 .unmap_page = arm_iommu_unmap_page,
1726 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
1727 .sync_single_for_device = arm_iommu_sync_single_for_device,
1728
1729 .map_sg = arm_iommu_map_sg,
1730 .unmap_sg = arm_iommu_unmap_sg,
1731 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
1732 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
1733};
1734
0fa478df
RH
1735struct dma_map_ops iommu_coherent_ops = {
1736 .alloc = arm_iommu_alloc_attrs,
1737 .free = arm_iommu_free_attrs,
1738 .mmap = arm_iommu_mmap_attrs,
1739 .get_sgtable = arm_iommu_get_sgtable,
1740
1741 .map_page = arm_coherent_iommu_map_page,
1742 .unmap_page = arm_coherent_iommu_unmap_page,
1743
1744 .map_sg = arm_coherent_iommu_map_sg,
1745 .unmap_sg = arm_coherent_iommu_unmap_sg,
1746};
1747
4ce63fcd
MS
1748/**
1749 * arm_iommu_create_mapping
1750 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1751 * @base: start address of the valid IO address space
1752 * @size: size of the valid IO address space
1753 * @order: accuracy of the IO addresses allocations
1754 *
1755 * Creates a mapping structure which holds information about used/unused
1756 * IO address ranges, which is required to perform memory allocation and
1757 * mapping with IOMMU aware functions.
1758 *
1759 * The client device need to be attached to the mapping with
1760 * arm_iommu_attach_device function.
1761 */
1762struct dma_iommu_mapping *
1763arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
1764 int order)
1765{
1766 unsigned int count = size >> (PAGE_SHIFT + order);
1767 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
1768 struct dma_iommu_mapping *mapping;
1769 int err = -ENOMEM;
1770
1771 if (!count)
1772 return ERR_PTR(-EINVAL);
1773
1774 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
1775 if (!mapping)
1776 goto err;
1777
1778 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1779 if (!mapping->bitmap)
1780 goto err2;
1781
1782 mapping->base = base;
1783 mapping->bits = BITS_PER_BYTE * bitmap_size;
1784 mapping->order = order;
1785 spin_lock_init(&mapping->lock);
1786
1787 mapping->domain = iommu_domain_alloc(bus);
1788 if (!mapping->domain)
1789 goto err3;
1790
1791 kref_init(&mapping->kref);
1792 return mapping;
1793err3:
1794 kfree(mapping->bitmap);
1795err2:
1796 kfree(mapping);
1797err:
1798 return ERR_PTR(err);
1799}
1800
1801static void release_iommu_mapping(struct kref *kref)
1802{
1803 struct dma_iommu_mapping *mapping =
1804 container_of(kref, struct dma_iommu_mapping, kref);
1805
1806 iommu_domain_free(mapping->domain);
1807 kfree(mapping->bitmap);
1808 kfree(mapping);
1809}
1810
1811void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1812{
1813 if (mapping)
1814 kref_put(&mapping->kref, release_iommu_mapping);
1815}
1816
1817/**
1818 * arm_iommu_attach_device
1819 * @dev: valid struct device pointer
1820 * @mapping: io address space mapping structure (returned from
1821 * arm_iommu_create_mapping)
1822 *
1823 * Attaches specified io address space mapping to the provided device,
1824 * this replaces the dma operations (dma_map_ops pointer) with the
1825 * IOMMU aware version. More than one client might be attached to
1826 * the same io address space mapping.
1827 */
1828int arm_iommu_attach_device(struct device *dev,
1829 struct dma_iommu_mapping *mapping)
1830{
1831 int err;
1832
1833 err = iommu_attach_device(mapping->domain, dev);
1834 if (err)
1835 return err;
1836
1837 kref_get(&mapping->kref);
1838 dev->archdata.mapping = mapping;
1839 set_dma_ops(dev, &iommu_ops);
1840
75c59716 1841 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
4ce63fcd
MS
1842 return 0;
1843}
1844
1845#endif