2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/genalloc.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dma-contiguous.h>
26 #include <linux/vmalloc.h>
27 #include <linux/swiotlb.h>
29 #include <asm/cacheflush.h>
31 struct dma_map_ops
*dma_ops
;
32 EXPORT_SYMBOL(dma_ops
);
34 static pgprot_t
__get_dma_pgprot(struct dma_attrs
*attrs
, pgprot_t prot
,
37 if (!coherent
|| dma_get_attr(DMA_ATTR_WRITE_COMBINE
, attrs
))
38 return pgprot_writecombine(prot
);
42 static struct gen_pool
*atomic_pool
;
44 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45 static size_t atomic_pool_size
= DEFAULT_DMA_COHERENT_POOL_SIZE
;
47 static int __init
early_coherent_pool(char *p
)
49 atomic_pool_size
= memparse(p
, &p
);
52 early_param("coherent_pool", early_coherent_pool
);
54 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
)
60 WARN(1, "coherent pool not initialised!\n");
64 val
= gen_pool_alloc(atomic_pool
, size
);
66 phys_addr_t phys
= gen_pool_virt_to_phys(atomic_pool
, val
);
68 *ret_page
= phys_to_page(phys
);
75 static bool __in_atomic_pool(void *start
, size_t size
)
77 return addr_in_gen_pool(atomic_pool
, (unsigned long)start
, size
);
80 static int __free_from_pool(void *start
, size_t size
)
82 if (!__in_atomic_pool(start
, size
))
85 gen_pool_free(atomic_pool
, (unsigned long)start
, size
);
90 static void *__dma_alloc_coherent(struct device
*dev
, size_t size
,
91 dma_addr_t
*dma_handle
, gfp_t flags
,
92 struct dma_attrs
*attrs
)
95 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
99 if (IS_ENABLED(CONFIG_ZONE_DMA
) &&
100 dev
->coherent_dma_mask
<= DMA_BIT_MASK(32))
102 if (IS_ENABLED(CONFIG_DMA_CMA
) && (flags
& __GFP_WAIT
)) {
105 size
= PAGE_ALIGN(size
);
106 page
= dma_alloc_from_contiguous(dev
, size
>> PAGE_SHIFT
,
111 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
112 return page_address(page
);
114 return swiotlb_alloc_coherent(dev
, size
, dma_handle
, flags
);
118 static void __dma_free_coherent(struct device
*dev
, size_t size
,
119 void *vaddr
, dma_addr_t dma_handle
,
120 struct dma_attrs
*attrs
)
123 phys_addr_t paddr
= dma_to_phys(dev
, dma_handle
);
126 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
130 freed
= dma_release_from_contiguous(dev
,
134 swiotlb_free_coherent(dev
, size
, vaddr
, dma_handle
);
137 static void *__dma_alloc(struct device
*dev
, size_t size
,
138 dma_addr_t
*dma_handle
, gfp_t flags
,
139 struct dma_attrs
*attrs
)
142 void *ptr
, *coherent_ptr
;
143 bool coherent
= is_device_dma_coherent(dev
);
145 size
= PAGE_ALIGN(size
);
147 if (!coherent
&& !(flags
& __GFP_WAIT
)) {
148 struct page
*page
= NULL
;
149 void *addr
= __alloc_from_pool(size
, &page
);
152 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
157 ptr
= __dma_alloc_coherent(dev
, size
, dma_handle
, flags
, attrs
);
161 /* no need for non-cacheable mapping if coherent */
165 /* remove any dirty cache lines on the kernel alias */
166 __dma_flush_range(ptr
, ptr
+ size
);
168 /* create a coherent mapping */
169 page
= virt_to_page(ptr
);
170 coherent_ptr
= dma_common_contiguous_remap(page
, size
, VM_USERMAP
,
171 __get_dma_pgprot(attrs
,
172 __pgprot(PROT_NORMAL_NC
), false),
180 __dma_free_coherent(dev
, size
, ptr
, *dma_handle
, attrs
);
182 *dma_handle
= DMA_ERROR_CODE
;
186 static void __dma_free(struct device
*dev
, size_t size
,
187 void *vaddr
, dma_addr_t dma_handle
,
188 struct dma_attrs
*attrs
)
190 void *swiotlb_addr
= phys_to_virt(dma_to_phys(dev
, dma_handle
));
192 if (!is_device_dma_coherent(dev
)) {
193 if (__free_from_pool(vaddr
, size
))
197 __dma_free_coherent(dev
, size
, swiotlb_addr
, dma_handle
, attrs
);
200 static dma_addr_t
__swiotlb_map_page(struct device
*dev
, struct page
*page
,
201 unsigned long offset
, size_t size
,
202 enum dma_data_direction dir
,
203 struct dma_attrs
*attrs
)
207 dev_addr
= swiotlb_map_page(dev
, page
, offset
, size
, dir
, attrs
);
208 if (!is_device_dma_coherent(dev
))
209 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
215 static void __swiotlb_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
216 size_t size
, enum dma_data_direction dir
,
217 struct dma_attrs
*attrs
)
219 if (!is_device_dma_coherent(dev
))
220 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
221 swiotlb_unmap_page(dev
, dev_addr
, size
, dir
, attrs
);
224 static int __swiotlb_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
225 int nelems
, enum dma_data_direction dir
,
226 struct dma_attrs
*attrs
)
228 struct scatterlist
*sg
;
231 ret
= swiotlb_map_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
232 if (!is_device_dma_coherent(dev
))
233 for_each_sg(sgl
, sg
, ret
, i
)
234 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
240 static void __swiotlb_unmap_sg_attrs(struct device
*dev
,
241 struct scatterlist
*sgl
, int nelems
,
242 enum dma_data_direction dir
,
243 struct dma_attrs
*attrs
)
245 struct scatterlist
*sg
;
248 if (!is_device_dma_coherent(dev
))
249 for_each_sg(sgl
, sg
, nelems
, i
)
250 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
252 swiotlb_unmap_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
255 static void __swiotlb_sync_single_for_cpu(struct device
*dev
,
256 dma_addr_t dev_addr
, size_t size
,
257 enum dma_data_direction dir
)
259 if (!is_device_dma_coherent(dev
))
260 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
261 swiotlb_sync_single_for_cpu(dev
, dev_addr
, size
, dir
);
264 static void __swiotlb_sync_single_for_device(struct device
*dev
,
265 dma_addr_t dev_addr
, size_t size
,
266 enum dma_data_direction dir
)
268 swiotlb_sync_single_for_device(dev
, dev_addr
, size
, dir
);
269 if (!is_device_dma_coherent(dev
))
270 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
273 static void __swiotlb_sync_sg_for_cpu(struct device
*dev
,
274 struct scatterlist
*sgl
, int nelems
,
275 enum dma_data_direction dir
)
277 struct scatterlist
*sg
;
280 if (!is_device_dma_coherent(dev
))
281 for_each_sg(sgl
, sg
, nelems
, i
)
282 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
284 swiotlb_sync_sg_for_cpu(dev
, sgl
, nelems
, dir
);
287 static void __swiotlb_sync_sg_for_device(struct device
*dev
,
288 struct scatterlist
*sgl
, int nelems
,
289 enum dma_data_direction dir
)
291 struct scatterlist
*sg
;
294 swiotlb_sync_sg_for_device(dev
, sgl
, nelems
, dir
);
295 if (!is_device_dma_coherent(dev
))
296 for_each_sg(sgl
, sg
, nelems
, i
)
297 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
301 /* vma->vm_page_prot must be set appropriately before calling this function */
302 static int __dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
303 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
306 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >>
308 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
309 unsigned long pfn
= dma_to_phys(dev
, dma_addr
) >> PAGE_SHIFT
;
310 unsigned long off
= vma
->vm_pgoff
;
312 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
315 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
316 ret
= remap_pfn_range(vma
, vma
->vm_start
,
318 vma
->vm_end
- vma
->vm_start
,
325 static int __swiotlb_mmap(struct device
*dev
,
326 struct vm_area_struct
*vma
,
327 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
328 struct dma_attrs
*attrs
)
330 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
,
331 is_device_dma_coherent(dev
));
332 return __dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
335 static struct dma_map_ops swiotlb_dma_ops
= {
336 .alloc
= __dma_alloc
,
338 .mmap
= __swiotlb_mmap
,
339 .map_page
= __swiotlb_map_page
,
340 .unmap_page
= __swiotlb_unmap_page
,
341 .map_sg
= __swiotlb_map_sg_attrs
,
342 .unmap_sg
= __swiotlb_unmap_sg_attrs
,
343 .sync_single_for_cpu
= __swiotlb_sync_single_for_cpu
,
344 .sync_single_for_device
= __swiotlb_sync_single_for_device
,
345 .sync_sg_for_cpu
= __swiotlb_sync_sg_for_cpu
,
346 .sync_sg_for_device
= __swiotlb_sync_sg_for_device
,
347 .dma_supported
= swiotlb_dma_supported
,
348 .mapping_error
= swiotlb_dma_mapping_error
,
351 static int __init
atomic_pool_init(void)
353 pgprot_t prot
= __pgprot(PROT_NORMAL_NC
);
354 unsigned long nr_pages
= atomic_pool_size
>> PAGE_SHIFT
;
357 unsigned int pool_size_order
= get_order(atomic_pool_size
);
359 if (dev_get_cma_area(NULL
))
360 page
= dma_alloc_from_contiguous(NULL
, nr_pages
,
363 page
= alloc_pages(GFP_DMA
, pool_size_order
);
367 void *page_addr
= page_address(page
);
369 memset(page_addr
, 0, atomic_pool_size
);
370 __dma_flush_range(page_addr
, page_addr
+ atomic_pool_size
);
372 atomic_pool
= gen_pool_create(PAGE_SHIFT
, -1);
376 addr
= dma_common_contiguous_remap(page
, atomic_pool_size
,
377 VM_USERMAP
, prot
, atomic_pool_init
);
380 goto destroy_genpool
;
382 ret
= gen_pool_add_virt(atomic_pool
, (unsigned long)addr
,
384 atomic_pool_size
, -1);
388 gen_pool_set_algo(atomic_pool
,
389 gen_pool_first_fit_order_align
,
392 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
393 atomic_pool_size
/ 1024);
399 dma_common_free_remap(addr
, atomic_pool_size
, VM_USERMAP
);
401 gen_pool_destroy(atomic_pool
);
404 if (!dma_release_from_contiguous(NULL
, page
, nr_pages
))
405 __free_pages(page
, pool_size_order
);
407 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
408 atomic_pool_size
/ 1024);
412 static int __init
arm64_dma_init(void)
416 dma_ops
= &swiotlb_dma_ops
;
418 ret
= atomic_pool_init();
422 arch_initcall(arm64_dma_init
);
424 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
426 static int __init
dma_debug_do_init(void)
428 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
431 fs_initcall(dma_debug_do_init
);