1 /* SPDX-License-Identifier: GPL-2.0 */
3 * This header is for implementations of dma_map_ops and related code.
4 * It should not be included in drivers just using the DMA API.
6 #ifndef _LINUX_DMA_MAP_OPS_H
7 #define _LINUX_DMA_MAP_OPS_H
9 #include <linux/dma-mapping.h>
10 #include <linux/pgtable.h>
15 void *(*alloc
)(struct device
*dev
, size_t size
,
16 dma_addr_t
*dma_handle
, gfp_t gfp
,
18 void (*free
)(struct device
*dev
, size_t size
, void *vaddr
,
19 dma_addr_t dma_handle
, unsigned long attrs
);
20 struct page
*(*alloc_pages
)(struct device
*dev
, size_t size
,
21 dma_addr_t
*dma_handle
, enum dma_data_direction dir
,
23 void (*free_pages
)(struct device
*dev
, size_t size
, struct page
*vaddr
,
24 dma_addr_t dma_handle
, enum dma_data_direction dir
);
25 struct sg_table
*(*alloc_noncontiguous
)(struct device
*dev
, size_t size
,
26 enum dma_data_direction dir
, gfp_t gfp
,
28 void (*free_noncontiguous
)(struct device
*dev
, size_t size
,
29 struct sg_table
*sgt
, enum dma_data_direction dir
);
30 int (*mmap
)(struct device
*, struct vm_area_struct
*,
31 void *, dma_addr_t
, size_t, unsigned long attrs
);
33 int (*get_sgtable
)(struct device
*dev
, struct sg_table
*sgt
,
34 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
37 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
38 unsigned long offset
, size_t size
,
39 enum dma_data_direction dir
, unsigned long attrs
);
40 void (*unmap_page
)(struct device
*dev
, dma_addr_t dma_handle
,
41 size_t size
, enum dma_data_direction dir
,
44 * map_sg returns 0 on error and a value > 0 on success.
45 * It should never return a value < 0.
47 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
, int nents
,
48 enum dma_data_direction dir
, unsigned long attrs
);
49 void (*unmap_sg
)(struct device
*dev
, struct scatterlist
*sg
, int nents
,
50 enum dma_data_direction dir
, unsigned long attrs
);
51 dma_addr_t (*map_resource
)(struct device
*dev
, phys_addr_t phys_addr
,
52 size_t size
, enum dma_data_direction dir
,
54 void (*unmap_resource
)(struct device
*dev
, dma_addr_t dma_handle
,
55 size_t size
, enum dma_data_direction dir
,
57 void (*sync_single_for_cpu
)(struct device
*dev
, dma_addr_t dma_handle
,
58 size_t size
, enum dma_data_direction dir
);
59 void (*sync_single_for_device
)(struct device
*dev
,
60 dma_addr_t dma_handle
, size_t size
,
61 enum dma_data_direction dir
);
62 void (*sync_sg_for_cpu
)(struct device
*dev
, struct scatterlist
*sg
,
63 int nents
, enum dma_data_direction dir
);
64 void (*sync_sg_for_device
)(struct device
*dev
, struct scatterlist
*sg
,
65 int nents
, enum dma_data_direction dir
);
66 void (*cache_sync
)(struct device
*dev
, void *vaddr
, size_t size
,
67 enum dma_data_direction direction
);
68 int (*dma_supported
)(struct device
*dev
, u64 mask
);
69 u64 (*get_required_mask
)(struct device
*dev
);
70 size_t (*max_mapping_size
)(struct device
*dev
);
71 unsigned long (*get_merge_boundary
)(struct device
*dev
);
75 #include <asm/dma-mapping.h>
77 static inline const struct dma_map_ops
*get_dma_ops(struct device
*dev
)
81 return get_arch_dma_ops(dev
->bus
);
84 static inline void set_dma_ops(struct device
*dev
,
85 const struct dma_map_ops
*dma_ops
)
87 dev
->dma_ops
= dma_ops
;
89 #else /* CONFIG_DMA_OPS */
90 static inline const struct dma_map_ops
*get_dma_ops(struct device
*dev
)
94 static inline void set_dma_ops(struct device
*dev
,
95 const struct dma_map_ops
*dma_ops
)
98 #endif /* CONFIG_DMA_OPS */
100 #ifdef CONFIG_DMA_CMA
101 extern struct cma
*dma_contiguous_default_area
;
103 static inline struct cma
*dev_get_cma_area(struct device
*dev
)
105 if (dev
&& dev
->cma_area
)
106 return dev
->cma_area
;
107 return dma_contiguous_default_area
;
110 void dma_contiguous_reserve(phys_addr_t addr_limit
);
111 int __init
dma_contiguous_reserve_area(phys_addr_t size
, phys_addr_t base
,
112 phys_addr_t limit
, struct cma
**res_cma
, bool fixed
);
114 struct page
*dma_alloc_from_contiguous(struct device
*dev
, size_t count
,
115 unsigned int order
, bool no_warn
);
116 bool dma_release_from_contiguous(struct device
*dev
, struct page
*pages
,
118 struct page
*dma_alloc_contiguous(struct device
*dev
, size_t size
, gfp_t gfp
);
119 void dma_free_contiguous(struct device
*dev
, struct page
*page
, size_t size
);
121 void dma_contiguous_early_fixup(phys_addr_t base
, unsigned long size
);
122 #else /* CONFIG_DMA_CMA */
123 static inline struct cma
*dev_get_cma_area(struct device
*dev
)
127 static inline void dma_contiguous_reserve(phys_addr_t limit
)
130 static inline int dma_contiguous_reserve_area(phys_addr_t size
,
131 phys_addr_t base
, phys_addr_t limit
, struct cma
**res_cma
,
136 static inline struct page
*dma_alloc_from_contiguous(struct device
*dev
,
137 size_t count
, unsigned int order
, bool no_warn
)
141 static inline bool dma_release_from_contiguous(struct device
*dev
,
142 struct page
*pages
, int count
)
146 /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
147 static inline struct page
*dma_alloc_contiguous(struct device
*dev
, size_t size
,
152 static inline void dma_free_contiguous(struct device
*dev
, struct page
*page
,
155 __free_pages(page
, get_order(size
));
157 #endif /* CONFIG_DMA_CMA*/
159 #ifdef CONFIG_DMA_PERNUMA_CMA
160 void dma_pernuma_cma_reserve(void);
162 static inline void dma_pernuma_cma_reserve(void) { }
163 #endif /* CONFIG_DMA_PERNUMA_CMA */
165 #ifdef CONFIG_DMA_DECLARE_COHERENT
166 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
167 dma_addr_t device_addr
, size_t size
);
168 int dma_alloc_from_dev_coherent(struct device
*dev
, ssize_t size
,
169 dma_addr_t
*dma_handle
, void **ret
);
170 int dma_release_from_dev_coherent(struct device
*dev
, int order
, void *vaddr
);
171 int dma_mmap_from_dev_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
172 void *cpu_addr
, size_t size
, int *ret
);
174 void *dma_alloc_from_global_coherent(struct device
*dev
, ssize_t size
,
175 dma_addr_t
*dma_handle
);
176 int dma_release_from_global_coherent(int order
, void *vaddr
);
177 int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
, void *cpu_addr
,
178 size_t size
, int *ret
);
181 static inline int dma_declare_coherent_memory(struct device
*dev
,
182 phys_addr_t phys_addr
, dma_addr_t device_addr
, size_t size
)
186 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
187 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
188 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
190 static inline void *dma_alloc_from_global_coherent(struct device
*dev
,
191 ssize_t size
, dma_addr_t
*dma_handle
)
195 static inline int dma_release_from_global_coherent(int order
, void *vaddr
)
199 static inline int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
,
200 void *cpu_addr
, size_t size
, int *ret
)
204 #endif /* CONFIG_DMA_DECLARE_COHERENT */
207 * This is the actual return value from the ->alloc_noncontiguous method.
208 * The users of the DMA API should only care about the sg_table, but to make
209 * the DMA-API internal vmaping and freeing easier we stash away the page
210 * array as well (except for the fallback case). This can go away any time,
211 * e.g. when a vmap-variant that takes a scatterlist comes along.
213 struct dma_sgt_handle
{
217 #define sgt_handle(sgt) \
218 container_of((sgt), struct dma_sgt_handle, sgt)
220 int dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
221 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
222 unsigned long attrs
);
223 int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
224 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
225 unsigned long attrs
);
226 struct page
*dma_common_alloc_pages(struct device
*dev
, size_t size
,
227 dma_addr_t
*dma_handle
, enum dma_data_direction dir
, gfp_t gfp
);
228 void dma_common_free_pages(struct device
*dev
, size_t size
, struct page
*vaddr
,
229 dma_addr_t dma_handle
, enum dma_data_direction dir
);
231 struct page
**dma_common_find_pages(void *cpu_addr
);
232 void *dma_common_contiguous_remap(struct page
*page
, size_t size
, pgprot_t prot
,
234 void *dma_common_pages_remap(struct page
**pages
, size_t size
, pgprot_t prot
,
236 void dma_common_free_remap(void *cpu_addr
, size_t size
);
238 struct page
*dma_alloc_from_pool(struct device
*dev
, size_t size
,
239 void **cpu_addr
, gfp_t flags
,
240 bool (*phys_addr_ok
)(struct device
*, phys_addr_t
, size_t));
241 bool dma_free_from_pool(struct device
*dev
, void *start
, size_t size
);
243 int dma_direct_set_offset(struct device
*dev
, phys_addr_t cpu_start
,
244 dma_addr_t dma_start
, u64 size
);
246 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
247 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
248 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
249 extern bool dma_default_coherent
;
250 static inline bool dev_is_dma_coherent(struct device
*dev
)
252 return dev
->dma_coherent
;
255 static inline bool dev_is_dma_coherent(struct device
*dev
)
259 #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
261 void *arch_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
262 gfp_t gfp
, unsigned long attrs
);
263 void arch_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
264 dma_addr_t dma_addr
, unsigned long attrs
);
268 * Page protection so that devices that can't snoop CPU caches can use the
269 * memory coherently. We default to pgprot_noncached which is usually used
270 * for ioremap as a safe bet, but architectures can override this with less
271 * strict semantics if possible.
273 #ifndef pgprot_dmacoherent
274 #define pgprot_dmacoherent(prot) pgprot_noncached(prot)
277 pgprot_t
dma_pgprot(struct device
*dev
, pgprot_t prot
, unsigned long attrs
);
279 static inline pgprot_t
dma_pgprot(struct device
*dev
, pgprot_t prot
,
282 return prot
; /* no protection bits supported without page tables */
284 #endif /* CONFIG_MMU */
286 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
287 void arch_sync_dma_for_device(phys_addr_t paddr
, size_t size
,
288 enum dma_data_direction dir
);
290 static inline void arch_sync_dma_for_device(phys_addr_t paddr
, size_t size
,
291 enum dma_data_direction dir
)
294 #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
296 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
297 void arch_sync_dma_for_cpu(phys_addr_t paddr
, size_t size
,
298 enum dma_data_direction dir
);
300 static inline void arch_sync_dma_for_cpu(phys_addr_t paddr
, size_t size
,
301 enum dma_data_direction dir
)
304 #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
306 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
307 void arch_sync_dma_for_cpu_all(void);
309 static inline void arch_sync_dma_for_cpu_all(void)
312 #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
314 #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
315 void arch_dma_prep_coherent(struct page
*page
, size_t size
);
317 static inline void arch_dma_prep_coherent(struct page
*page
, size_t size
)
320 #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
322 #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
323 void arch_dma_mark_clean(phys_addr_t paddr
, size_t size
);
325 static inline void arch_dma_mark_clean(phys_addr_t paddr
, size_t size
)
328 #endif /* ARCH_HAS_DMA_MARK_CLEAN */
330 void *arch_dma_set_uncached(void *addr
, size_t size
);
331 void arch_dma_clear_uncached(void *addr
, size_t size
);
333 #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
334 bool arch_dma_map_page_direct(struct device
*dev
, phys_addr_t addr
);
335 bool arch_dma_unmap_page_direct(struct device
*dev
, dma_addr_t dma_handle
);
336 bool arch_dma_map_sg_direct(struct device
*dev
, struct scatterlist
*sg
,
338 bool arch_dma_unmap_sg_direct(struct device
*dev
, struct scatterlist
*sg
,
341 #define arch_dma_map_page_direct(d, a) (false)
342 #define arch_dma_unmap_page_direct(d, a) (false)
343 #define arch_dma_map_sg_direct(d, s, n) (false)
344 #define arch_dma_unmap_sg_direct(d, s, n) (false)
347 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
348 void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
349 const struct iommu_ops
*iommu
, bool coherent
);
351 static inline void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
,
352 u64 size
, const struct iommu_ops
*iommu
, bool coherent
)
355 #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
357 #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
358 void arch_teardown_dma_ops(struct device
*dev
);
360 static inline void arch_teardown_dma_ops(struct device
*dev
)
363 #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
365 #ifdef CONFIG_DMA_API_DEBUG
366 void dma_debug_add_bus(struct bus_type
*bus
);
367 void debug_dma_dump_mappings(struct device
*dev
);
369 static inline void dma_debug_add_bus(struct bus_type
*bus
)
372 static inline void debug_dma_dump_mappings(struct device
*dev
)
375 #endif /* CONFIG_DMA_API_DEBUG */
377 extern const struct dma_map_ops dma_dummy_ops
;
379 #endif /* _LINUX_DMA_MAP_OPS_H */