1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-noncoherent.h>
12 #include <linux/export.h>
13 #include <linux/gfp.h>
14 #include <linux/of_device.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
24 dma_addr_t dma_handle
;
28 static void dmam_release(struct device
*dev
, void *res
)
30 struct dma_devres
*this = res
;
32 dma_free_attrs(dev
, this->size
, this->vaddr
, this->dma_handle
,
36 static int dmam_match(struct device
*dev
, void *res
, void *match_data
)
38 struct dma_devres
*this = res
, *match
= match_data
;
40 if (this->vaddr
== match
->vaddr
) {
41 WARN_ON(this->size
!= match
->size
||
42 this->dma_handle
!= match
->dma_handle
);
49 * dmam_free_coherent - Managed dma_free_coherent()
50 * @dev: Device to free coherent memory for
51 * @size: Size of allocation
52 * @vaddr: Virtual address of the memory to free
53 * @dma_handle: DMA handle of the memory to free
55 * Managed dma_free_coherent().
57 void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
58 dma_addr_t dma_handle
)
60 struct dma_devres match_data
= { size
, vaddr
, dma_handle
};
62 dma_free_coherent(dev
, size
, vaddr
, dma_handle
);
63 WARN_ON(devres_destroy(dev
, dmam_release
, dmam_match
, &match_data
));
65 EXPORT_SYMBOL(dmam_free_coherent
);
68 * dmam_alloc_attrs - Managed dma_alloc_attrs()
69 * @dev: Device to allocate non_coherent memory for
70 * @size: Size of allocation
71 * @dma_handle: Out argument for allocated DMA handle
72 * @gfp: Allocation flags
73 * @attrs: Flags in the DMA_ATTR_* namespace.
75 * Managed dma_alloc_attrs(). Memory allocated using this function will be
76 * automatically released on driver detach.
79 * Pointer to allocated memory on success, NULL on failure.
81 void *dmam_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
82 gfp_t gfp
, unsigned long attrs
)
84 struct dma_devres
*dr
;
87 dr
= devres_alloc(dmam_release
, sizeof(*dr
), gfp
);
91 vaddr
= dma_alloc_attrs(dev
, size
, dma_handle
, gfp
, attrs
);
98 dr
->dma_handle
= *dma_handle
;
106 EXPORT_SYMBOL(dmam_alloc_attrs
);
108 static bool dma_go_direct(struct device
*dev
, dma_addr_t mask
,
109 const struct dma_map_ops
*ops
)
113 #ifdef CONFIG_DMA_OPS_BYPASS
114 if (dev
->dma_ops_bypass
)
115 return min_not_zero(mask
, dev
->bus_dma_limit
) >=
116 dma_direct_get_required_mask(dev
);
123 * Check if the devices uses a direct mapping for streaming DMA operations.
124 * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
127 static inline bool dma_alloc_direct(struct device
*dev
,
128 const struct dma_map_ops
*ops
)
130 return dma_go_direct(dev
, dev
->coherent_dma_mask
, ops
);
133 static inline bool dma_map_direct(struct device
*dev
,
134 const struct dma_map_ops
*ops
)
136 return dma_go_direct(dev
, *dev
->dma_mask
, ops
);
139 dma_addr_t
dma_map_page_attrs(struct device
*dev
, struct page
*page
,
140 size_t offset
, size_t size
, enum dma_data_direction dir
,
143 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
146 BUG_ON(!valid_dma_direction(dir
));
147 if (dma_map_direct(dev
, ops
))
148 addr
= dma_direct_map_page(dev
, page
, offset
, size
, dir
, attrs
);
150 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, attrs
);
151 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
);
155 EXPORT_SYMBOL(dma_map_page_attrs
);
157 void dma_unmap_page_attrs(struct device
*dev
, dma_addr_t addr
, size_t size
,
158 enum dma_data_direction dir
, unsigned long attrs
)
160 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
162 BUG_ON(!valid_dma_direction(dir
));
163 if (dma_map_direct(dev
, ops
))
164 dma_direct_unmap_page(dev
, addr
, size
, dir
, attrs
);
165 else if (ops
->unmap_page
)
166 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
167 debug_dma_unmap_page(dev
, addr
, size
, dir
);
169 EXPORT_SYMBOL(dma_unmap_page_attrs
);
172 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
173 * It should never return a value < 0.
175 int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
, int nents
,
176 enum dma_data_direction dir
, unsigned long attrs
)
178 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
181 BUG_ON(!valid_dma_direction(dir
));
182 if (dma_map_direct(dev
, ops
))
183 ents
= dma_direct_map_sg(dev
, sg
, nents
, dir
, attrs
);
185 ents
= ops
->map_sg(dev
, sg
, nents
, dir
, attrs
);
187 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
);
191 EXPORT_SYMBOL(dma_map_sg_attrs
);
193 void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
194 int nents
, enum dma_data_direction dir
,
197 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
199 BUG_ON(!valid_dma_direction(dir
));
200 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
201 if (dma_map_direct(dev
, ops
))
202 dma_direct_unmap_sg(dev
, sg
, nents
, dir
, attrs
);
203 else if (ops
->unmap_sg
)
204 ops
->unmap_sg(dev
, sg
, nents
, dir
, attrs
);
206 EXPORT_SYMBOL(dma_unmap_sg_attrs
);
208 dma_addr_t
dma_map_resource(struct device
*dev
, phys_addr_t phys_addr
,
209 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
211 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
212 dma_addr_t addr
= DMA_MAPPING_ERROR
;
214 BUG_ON(!valid_dma_direction(dir
));
216 /* Don't allow RAM to be mapped */
217 if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr
))))
218 return DMA_MAPPING_ERROR
;
220 if (dma_map_direct(dev
, ops
))
221 addr
= dma_direct_map_resource(dev
, phys_addr
, size
, dir
, attrs
);
222 else if (ops
->map_resource
)
223 addr
= ops
->map_resource(dev
, phys_addr
, size
, dir
, attrs
);
225 debug_dma_map_resource(dev
, phys_addr
, size
, dir
, addr
);
228 EXPORT_SYMBOL(dma_map_resource
);
230 void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
, size_t size
,
231 enum dma_data_direction dir
, unsigned long attrs
)
233 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
235 BUG_ON(!valid_dma_direction(dir
));
236 if (!dma_map_direct(dev
, ops
) && ops
->unmap_resource
)
237 ops
->unmap_resource(dev
, addr
, size
, dir
, attrs
);
238 debug_dma_unmap_resource(dev
, addr
, size
, dir
);
240 EXPORT_SYMBOL(dma_unmap_resource
);
242 void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
, size_t size
,
243 enum dma_data_direction dir
)
245 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
247 BUG_ON(!valid_dma_direction(dir
));
248 if (dma_map_direct(dev
, ops
))
249 dma_direct_sync_single_for_cpu(dev
, addr
, size
, dir
);
250 else if (ops
->sync_single_for_cpu
)
251 ops
->sync_single_for_cpu(dev
, addr
, size
, dir
);
252 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
254 EXPORT_SYMBOL(dma_sync_single_for_cpu
);
256 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t addr
,
257 size_t size
, enum dma_data_direction dir
)
259 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
261 BUG_ON(!valid_dma_direction(dir
));
262 if (dma_map_direct(dev
, ops
))
263 dma_direct_sync_single_for_device(dev
, addr
, size
, dir
);
264 else if (ops
->sync_single_for_device
)
265 ops
->sync_single_for_device(dev
, addr
, size
, dir
);
266 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
268 EXPORT_SYMBOL(dma_sync_single_for_device
);
270 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
271 int nelems
, enum dma_data_direction dir
)
273 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
275 BUG_ON(!valid_dma_direction(dir
));
276 if (dma_map_direct(dev
, ops
))
277 dma_direct_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
278 else if (ops
->sync_sg_for_cpu
)
279 ops
->sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
280 debug_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
282 EXPORT_SYMBOL(dma_sync_sg_for_cpu
);
284 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
285 int nelems
, enum dma_data_direction dir
)
287 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
289 BUG_ON(!valid_dma_direction(dir
));
290 if (dma_map_direct(dev
, ops
))
291 dma_direct_sync_sg_for_device(dev
, sg
, nelems
, dir
);
292 else if (ops
->sync_sg_for_device
)
293 ops
->sync_sg_for_device(dev
, sg
, nelems
, dir
);
294 debug_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
296 EXPORT_SYMBOL(dma_sync_sg_for_device
);
299 * Create scatter-list for the already allocated DMA buffer.
301 int dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
302 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
305 struct page
*page
= virt_to_page(cpu_addr
);
308 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
310 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
315 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
316 * that the intention is to allow exporting memory allocated via the
317 * coherent DMA APIs through the dma_buf API, which only accepts a
318 * scattertable. This presents a couple of problems:
319 * 1. Not all memory allocated via the coherent DMA APIs is backed by
321 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
322 * as we will try to flush the memory through a different alias to that
323 * actually being used (and the flushes are redundant.)
325 int dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
,
326 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
329 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
331 if (dma_alloc_direct(dev
, ops
))
332 return dma_direct_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
,
334 if (!ops
->get_sgtable
)
336 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
, attrs
);
338 EXPORT_SYMBOL(dma_get_sgtable_attrs
);
342 * Return the page attributes used for mapping dma_alloc_* memory, either in
343 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
345 pgprot_t
dma_pgprot(struct device
*dev
, pgprot_t prot
, unsigned long attrs
)
347 if (force_dma_unencrypted(dev
))
348 prot
= pgprot_decrypted(prot
);
349 if (dev_is_dma_coherent(dev
) ||
350 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC
) &&
351 (attrs
& DMA_ATTR_NON_CONSISTENT
)))
353 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
354 if (attrs
& DMA_ATTR_WRITE_COMBINE
)
355 return pgprot_writecombine(prot
);
357 return pgprot_dmacoherent(prot
);
359 #endif /* CONFIG_MMU */
362 * Create userspace mapping for the DMA-coherent memory.
364 int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
365 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
369 unsigned long user_count
= vma_pages(vma
);
370 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
371 unsigned long off
= vma
->vm_pgoff
;
374 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
376 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
379 if (off
>= count
|| user_count
> count
- off
)
382 return remap_pfn_range(vma
, vma
->vm_start
,
383 page_to_pfn(virt_to_page(cpu_addr
)) + vma
->vm_pgoff
,
384 user_count
<< PAGE_SHIFT
, vma
->vm_page_prot
);
387 #endif /* CONFIG_MMU */
391 * dma_can_mmap - check if a given device supports dma_mmap_*
392 * @dev: device to check
394 * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
395 * map DMA allocations to userspace.
397 bool dma_can_mmap(struct device
*dev
)
399 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
401 if (dma_alloc_direct(dev
, ops
))
402 return dma_direct_can_mmap(dev
);
403 return ops
->mmap
!= NULL
;
405 EXPORT_SYMBOL_GPL(dma_can_mmap
);
408 * dma_mmap_attrs - map a coherent DMA allocation into user space
409 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
410 * @vma: vm_area_struct describing requested user mapping
411 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
412 * @dma_addr: device-view address returned from dma_alloc_attrs
413 * @size: size of memory originally requested in dma_alloc_attrs
414 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
416 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
417 * space. The coherent DMA buffer must not be freed by the driver until the
418 * user space mapping has been released.
420 int dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
421 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
424 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
426 if (dma_alloc_direct(dev
, ops
))
427 return dma_direct_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
,
431 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
433 EXPORT_SYMBOL(dma_mmap_attrs
);
435 u64
dma_get_required_mask(struct device
*dev
)
437 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
439 if (dma_alloc_direct(dev
, ops
))
440 return dma_direct_get_required_mask(dev
);
441 if (ops
->get_required_mask
)
442 return ops
->get_required_mask(dev
);
445 * We require every DMA ops implementation to at least support a 32-bit
446 * DMA mask (and use bounce buffering if that isn't supported in
447 * hardware). As the direct mapping code has its own routine to
448 * actually report an optimal mask we default to 32-bit here as that
449 * is the right thing for most IOMMUs, and at least not actively
450 * harmful in general.
452 return DMA_BIT_MASK(32);
454 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
456 void *dma_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
457 gfp_t flag
, unsigned long attrs
)
459 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
462 WARN_ON_ONCE(!dev
->coherent_dma_mask
);
464 if (dma_alloc_from_dev_coherent(dev
, size
, dma_handle
, &cpu_addr
))
467 /* let the implementation decide on the zone to allocate from: */
468 flag
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
470 if (dma_alloc_direct(dev
, ops
))
471 cpu_addr
= dma_direct_alloc(dev
, size
, dma_handle
, flag
, attrs
);
473 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
477 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
480 EXPORT_SYMBOL(dma_alloc_attrs
);
482 void dma_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
483 dma_addr_t dma_handle
, unsigned long attrs
)
485 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
487 if (dma_release_from_dev_coherent(dev
, get_order(size
), cpu_addr
))
490 * On non-coherent platforms which implement DMA-coherent buffers via
491 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
492 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
493 * sleep on some machines, and b) an indication that the driver is
494 * probably misusing the coherent API anyway.
496 WARN_ON(irqs_disabled());
501 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
502 if (dma_alloc_direct(dev
, ops
))
503 dma_direct_free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
505 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
507 EXPORT_SYMBOL(dma_free_attrs
);
509 int dma_supported(struct device
*dev
, u64 mask
)
511 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
514 * ->dma_supported sets the bypass flag, so we must always call
515 * into the method here unless the device is truly direct mapped.
518 return dma_direct_supported(dev
, mask
);
519 if (!ops
->dma_supported
)
521 return ops
->dma_supported(dev
, mask
);
523 EXPORT_SYMBOL(dma_supported
);
525 #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
526 void arch_dma_set_mask(struct device
*dev
, u64 mask
);
528 #define arch_dma_set_mask(dev, mask) do { } while (0)
531 int dma_set_mask(struct device
*dev
, u64 mask
)
534 * Truncate the mask to the actually supported dma_addr_t width to
535 * avoid generating unsupportable addresses.
537 mask
= (dma_addr_t
)mask
;
539 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
542 arch_dma_set_mask(dev
, mask
);
543 *dev
->dma_mask
= mask
;
546 EXPORT_SYMBOL(dma_set_mask
);
548 #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
549 int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
552 * Truncate the mask to the actually supported dma_addr_t width to
553 * avoid generating unsupportable addresses.
555 mask
= (dma_addr_t
)mask
;
557 if (!dma_supported(dev
, mask
))
560 dev
->coherent_dma_mask
= mask
;
563 EXPORT_SYMBOL(dma_set_coherent_mask
);
566 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
567 enum dma_data_direction dir
)
569 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
571 BUG_ON(!valid_dma_direction(dir
));
573 if (dma_alloc_direct(dev
, ops
))
574 arch_dma_cache_sync(dev
, vaddr
, size
, dir
);
575 else if (ops
->cache_sync
)
576 ops
->cache_sync(dev
, vaddr
, size
, dir
);
578 EXPORT_SYMBOL(dma_cache_sync
);
580 size_t dma_max_mapping_size(struct device
*dev
)
582 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
583 size_t size
= SIZE_MAX
;
585 if (dma_map_direct(dev
, ops
))
586 size
= dma_direct_max_mapping_size(dev
);
587 else if (ops
&& ops
->max_mapping_size
)
588 size
= ops
->max_mapping_size(dev
);
592 EXPORT_SYMBOL_GPL(dma_max_mapping_size
);
594 bool dma_need_sync(struct device
*dev
, dma_addr_t dma_addr
)
596 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
598 if (dma_map_direct(dev
, ops
))
599 return dma_direct_need_sync(dev
, dma_addr
);
600 return ops
->sync_single_for_cpu
|| ops
->sync_single_for_device
;
602 EXPORT_SYMBOL_GPL(dma_need_sync
);
604 unsigned long dma_get_merge_boundary(struct device
*dev
)
606 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
608 if (!ops
|| !ops
->get_merge_boundary
)
609 return 0; /* can't merge */
611 return ops
->get_merge_boundary(dev
);
613 EXPORT_SYMBOL_GPL(dma_get_merge_boundary
);