]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/mm/dma-mapping.c
arm64: dma-mapping: implement dma_get_sgtable()
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / mm / dma-mapping.c
1 /*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/genalloc.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dma-contiguous.h>
26 #include <linux/vmalloc.h>
27 #include <linux/swiotlb.h>
28
29 #include <asm/cacheflush.h>
30
31 struct dma_map_ops *dma_ops;
32 EXPORT_SYMBOL(dma_ops);
33
34 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
35 bool coherent)
36 {
37 if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
38 return pgprot_writecombine(prot);
39 return prot;
40 }
41
42 static struct gen_pool *atomic_pool;
43
44 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45 static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
46
47 static int __init early_coherent_pool(char *p)
48 {
49 atomic_pool_size = memparse(p, &p);
50 return 0;
51 }
52 early_param("coherent_pool", early_coherent_pool);
53
54 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
55 {
56 unsigned long val;
57 void *ptr = NULL;
58
59 if (!atomic_pool) {
60 WARN(1, "coherent pool not initialised!\n");
61 return NULL;
62 }
63
64 val = gen_pool_alloc(atomic_pool, size);
65 if (val) {
66 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
67
68 *ret_page = phys_to_page(phys);
69 ptr = (void *)val;
70 memset(ptr, 0, size);
71 }
72
73 return ptr;
74 }
75
76 static bool __in_atomic_pool(void *start, size_t size)
77 {
78 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
79 }
80
81 static int __free_from_pool(void *start, size_t size)
82 {
83 if (!__in_atomic_pool(start, size))
84 return 0;
85
86 gen_pool_free(atomic_pool, (unsigned long)start, size);
87
88 return 1;
89 }
90
91 static void *__dma_alloc_coherent(struct device *dev, size_t size,
92 dma_addr_t *dma_handle, gfp_t flags,
93 struct dma_attrs *attrs)
94 {
95 if (dev == NULL) {
96 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
97 return NULL;
98 }
99
100 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
101 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
102 flags |= GFP_DMA;
103 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
104 struct page *page;
105 void *addr;
106
107 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
108 get_order(size));
109 if (!page)
110 return NULL;
111
112 *dma_handle = phys_to_dma(dev, page_to_phys(page));
113 addr = page_address(page);
114 memset(addr, 0, size);
115 return addr;
116 } else {
117 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
118 }
119 }
120
121 static void __dma_free_coherent(struct device *dev, size_t size,
122 void *vaddr, dma_addr_t dma_handle,
123 struct dma_attrs *attrs)
124 {
125 bool freed;
126 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
127
128 if (dev == NULL) {
129 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
130 return;
131 }
132
133 freed = dma_release_from_contiguous(dev,
134 phys_to_page(paddr),
135 size >> PAGE_SHIFT);
136 if (!freed)
137 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
138 }
139
140 static void *__dma_alloc(struct device *dev, size_t size,
141 dma_addr_t *dma_handle, gfp_t flags,
142 struct dma_attrs *attrs)
143 {
144 struct page *page;
145 void *ptr, *coherent_ptr;
146 bool coherent = is_device_dma_coherent(dev);
147
148 size = PAGE_ALIGN(size);
149
150 if (!coherent && !(flags & __GFP_WAIT)) {
151 struct page *page = NULL;
152 void *addr = __alloc_from_pool(size, &page, flags);
153
154 if (addr)
155 *dma_handle = phys_to_dma(dev, page_to_phys(page));
156
157 return addr;
158 }
159
160 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
161 if (!ptr)
162 goto no_mem;
163
164 /* no need for non-cacheable mapping if coherent */
165 if (coherent)
166 return ptr;
167
168 /* remove any dirty cache lines on the kernel alias */
169 __dma_flush_range(ptr, ptr + size);
170
171 /* create a coherent mapping */
172 page = virt_to_page(ptr);
173 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
174 __get_dma_pgprot(attrs,
175 __pgprot(PROT_NORMAL_NC), false),
176 NULL);
177 if (!coherent_ptr)
178 goto no_map;
179
180 return coherent_ptr;
181
182 no_map:
183 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
184 no_mem:
185 *dma_handle = DMA_ERROR_CODE;
186 return NULL;
187 }
188
189 static void __dma_free(struct device *dev, size_t size,
190 void *vaddr, dma_addr_t dma_handle,
191 struct dma_attrs *attrs)
192 {
193 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
194
195 size = PAGE_ALIGN(size);
196
197 if (!is_device_dma_coherent(dev)) {
198 if (__free_from_pool(vaddr, size))
199 return;
200 vunmap(vaddr);
201 }
202 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
203 }
204
205 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
206 unsigned long offset, size_t size,
207 enum dma_data_direction dir,
208 struct dma_attrs *attrs)
209 {
210 dma_addr_t dev_addr;
211
212 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
213 if (!is_device_dma_coherent(dev))
214 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
215
216 return dev_addr;
217 }
218
219
220 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
221 size_t size, enum dma_data_direction dir,
222 struct dma_attrs *attrs)
223 {
224 if (!is_device_dma_coherent(dev))
225 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
226 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
227 }
228
229 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
230 int nelems, enum dma_data_direction dir,
231 struct dma_attrs *attrs)
232 {
233 struct scatterlist *sg;
234 int i, ret;
235
236 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
237 if (!is_device_dma_coherent(dev))
238 for_each_sg(sgl, sg, ret, i)
239 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
240 sg->length, dir);
241
242 return ret;
243 }
244
245 static void __swiotlb_unmap_sg_attrs(struct device *dev,
246 struct scatterlist *sgl, int nelems,
247 enum dma_data_direction dir,
248 struct dma_attrs *attrs)
249 {
250 struct scatterlist *sg;
251 int i;
252
253 if (!is_device_dma_coherent(dev))
254 for_each_sg(sgl, sg, nelems, i)
255 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
256 sg->length, dir);
257 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
258 }
259
260 static void __swiotlb_sync_single_for_cpu(struct device *dev,
261 dma_addr_t dev_addr, size_t size,
262 enum dma_data_direction dir)
263 {
264 if (!is_device_dma_coherent(dev))
265 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
266 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
267 }
268
269 static void __swiotlb_sync_single_for_device(struct device *dev,
270 dma_addr_t dev_addr, size_t size,
271 enum dma_data_direction dir)
272 {
273 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
274 if (!is_device_dma_coherent(dev))
275 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
276 }
277
278 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
279 struct scatterlist *sgl, int nelems,
280 enum dma_data_direction dir)
281 {
282 struct scatterlist *sg;
283 int i;
284
285 if (!is_device_dma_coherent(dev))
286 for_each_sg(sgl, sg, nelems, i)
287 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
288 sg->length, dir);
289 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
290 }
291
292 static void __swiotlb_sync_sg_for_device(struct device *dev,
293 struct scatterlist *sgl, int nelems,
294 enum dma_data_direction dir)
295 {
296 struct scatterlist *sg;
297 int i;
298
299 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
300 if (!is_device_dma_coherent(dev))
301 for_each_sg(sgl, sg, nelems, i)
302 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
303 sg->length, dir);
304 }
305
306 static int __swiotlb_mmap(struct device *dev,
307 struct vm_area_struct *vma,
308 void *cpu_addr, dma_addr_t dma_addr, size_t size,
309 struct dma_attrs *attrs)
310 {
311 int ret = -ENXIO;
312 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
313 PAGE_SHIFT;
314 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
315 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
316 unsigned long off = vma->vm_pgoff;
317
318 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
319 is_device_dma_coherent(dev));
320
321 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
322 return ret;
323
324 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
325 ret = remap_pfn_range(vma, vma->vm_start,
326 pfn + off,
327 vma->vm_end - vma->vm_start,
328 vma->vm_page_prot);
329 }
330
331 return ret;
332 }
333
334 static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
335 void *cpu_addr, dma_addr_t handle, size_t size,
336 struct dma_attrs *attrs)
337 {
338 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
339
340 if (!ret)
341 sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
342 PAGE_ALIGN(size), 0);
343
344 return ret;
345 }
346
347 static struct dma_map_ops swiotlb_dma_ops = {
348 .alloc = __dma_alloc,
349 .free = __dma_free,
350 .mmap = __swiotlb_mmap,
351 .get_sgtable = __swiotlb_get_sgtable,
352 .map_page = __swiotlb_map_page,
353 .unmap_page = __swiotlb_unmap_page,
354 .map_sg = __swiotlb_map_sg_attrs,
355 .unmap_sg = __swiotlb_unmap_sg_attrs,
356 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
357 .sync_single_for_device = __swiotlb_sync_single_for_device,
358 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
359 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
360 .dma_supported = swiotlb_dma_supported,
361 .mapping_error = swiotlb_dma_mapping_error,
362 };
363
364 static int __init atomic_pool_init(void)
365 {
366 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
367 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
368 struct page *page;
369 void *addr;
370 unsigned int pool_size_order = get_order(atomic_pool_size);
371
372 if (dev_get_cma_area(NULL))
373 page = dma_alloc_from_contiguous(NULL, nr_pages,
374 pool_size_order);
375 else
376 page = alloc_pages(GFP_DMA, pool_size_order);
377
378 if (page) {
379 int ret;
380 void *page_addr = page_address(page);
381
382 memset(page_addr, 0, atomic_pool_size);
383 __dma_flush_range(page_addr, page_addr + atomic_pool_size);
384
385 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
386 if (!atomic_pool)
387 goto free_page;
388
389 addr = dma_common_contiguous_remap(page, atomic_pool_size,
390 VM_USERMAP, prot, atomic_pool_init);
391
392 if (!addr)
393 goto destroy_genpool;
394
395 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
396 page_to_phys(page),
397 atomic_pool_size, -1);
398 if (ret)
399 goto remove_mapping;
400
401 gen_pool_set_algo(atomic_pool,
402 gen_pool_first_fit_order_align,
403 (void *)PAGE_SHIFT);
404
405 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
406 atomic_pool_size / 1024);
407 return 0;
408 }
409 goto out;
410
411 remove_mapping:
412 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
413 destroy_genpool:
414 gen_pool_destroy(atomic_pool);
415 atomic_pool = NULL;
416 free_page:
417 if (!dma_release_from_contiguous(NULL, page, nr_pages))
418 __free_pages(page, pool_size_order);
419 out:
420 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
421 atomic_pool_size / 1024);
422 return -ENOMEM;
423 }
424
425 /********************************************
426 * The following APIs are for dummy DMA ops *
427 ********************************************/
428
429 static void *__dummy_alloc(struct device *dev, size_t size,
430 dma_addr_t *dma_handle, gfp_t flags,
431 struct dma_attrs *attrs)
432 {
433 return NULL;
434 }
435
436 static void __dummy_free(struct device *dev, size_t size,
437 void *vaddr, dma_addr_t dma_handle,
438 struct dma_attrs *attrs)
439 {
440 }
441
442 static int __dummy_mmap(struct device *dev,
443 struct vm_area_struct *vma,
444 void *cpu_addr, dma_addr_t dma_addr, size_t size,
445 struct dma_attrs *attrs)
446 {
447 return -ENXIO;
448 }
449
450 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
451 unsigned long offset, size_t size,
452 enum dma_data_direction dir,
453 struct dma_attrs *attrs)
454 {
455 return DMA_ERROR_CODE;
456 }
457
458 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
459 size_t size, enum dma_data_direction dir,
460 struct dma_attrs *attrs)
461 {
462 }
463
464 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
465 int nelems, enum dma_data_direction dir,
466 struct dma_attrs *attrs)
467 {
468 return 0;
469 }
470
471 static void __dummy_unmap_sg(struct device *dev,
472 struct scatterlist *sgl, int nelems,
473 enum dma_data_direction dir,
474 struct dma_attrs *attrs)
475 {
476 }
477
478 static void __dummy_sync_single(struct device *dev,
479 dma_addr_t dev_addr, size_t size,
480 enum dma_data_direction dir)
481 {
482 }
483
484 static void __dummy_sync_sg(struct device *dev,
485 struct scatterlist *sgl, int nelems,
486 enum dma_data_direction dir)
487 {
488 }
489
490 static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
491 {
492 return 1;
493 }
494
495 static int __dummy_dma_supported(struct device *hwdev, u64 mask)
496 {
497 return 0;
498 }
499
500 struct dma_map_ops dummy_dma_ops = {
501 .alloc = __dummy_alloc,
502 .free = __dummy_free,
503 .mmap = __dummy_mmap,
504 .map_page = __dummy_map_page,
505 .unmap_page = __dummy_unmap_page,
506 .map_sg = __dummy_map_sg,
507 .unmap_sg = __dummy_unmap_sg,
508 .sync_single_for_cpu = __dummy_sync_single,
509 .sync_single_for_device = __dummy_sync_single,
510 .sync_sg_for_cpu = __dummy_sync_sg,
511 .sync_sg_for_device = __dummy_sync_sg,
512 .mapping_error = __dummy_mapping_error,
513 .dma_supported = __dummy_dma_supported,
514 };
515 EXPORT_SYMBOL(dummy_dma_ops);
516
517 static int __init arm64_dma_init(void)
518 {
519 int ret;
520
521 dma_ops = &swiotlb_dma_ops;
522
523 ret = atomic_pool_init();
524
525 return ret;
526 }
527 arch_initcall(arm64_dma_init);
528
529 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
530
531 static int __init dma_debug_do_init(void)
532 {
533 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
534 return 0;
535 }
536 fs_initcall(dma_debug_do_init);