]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - include/linux/dma-map-ops.h
dma-mapping: merge <linux/dma-contiguous.h> into <linux/dma-map-ops.h>
[mirror_ubuntu-hirsute-kernel.git] / include / linux / dma-map-ops.h
CommitLineData
0a0f0d8b
CH
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This header is for implementations of dma_map_ops and related code.
4 * It should not be included in drivers just using the DMA API.
5 */
6#ifndef _LINUX_DMA_MAP_OPS_H
7#define _LINUX_DMA_MAP_OPS_H
8
9#include <linux/dma-mapping.h>
10
0b1abd1f
CH
11struct cma;
12
0a0f0d8b
CH
13struct dma_map_ops {
14 void *(*alloc)(struct device *dev, size_t size,
15 dma_addr_t *dma_handle, gfp_t gfp,
16 unsigned long attrs);
17 void (*free)(struct device *dev, size_t size, void *vaddr,
18 dma_addr_t dma_handle, unsigned long attrs);
19 struct page *(*alloc_pages)(struct device *dev, size_t size,
20 dma_addr_t *dma_handle, enum dma_data_direction dir,
21 gfp_t gfp);
22 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
23 dma_addr_t dma_handle, enum dma_data_direction dir);
24 void *(*alloc_noncoherent)(struct device *dev, size_t size,
25 dma_addr_t *dma_handle, enum dma_data_direction dir,
26 gfp_t gfp);
27 void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr,
28 dma_addr_t dma_handle, enum dma_data_direction dir);
29 int (*mmap)(struct device *, struct vm_area_struct *,
30 void *, dma_addr_t, size_t, unsigned long attrs);
31
32 int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
33 void *cpu_addr, dma_addr_t dma_addr, size_t size,
34 unsigned long attrs);
35
36 dma_addr_t (*map_page)(struct device *dev, struct page *page,
37 unsigned long offset, size_t size,
38 enum dma_data_direction dir, unsigned long attrs);
39 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
40 size_t size, enum dma_data_direction dir,
41 unsigned long attrs);
42 /*
43 * map_sg returns 0 on error and a value > 0 on success.
44 * It should never return a value < 0.
45 */
46 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
47 enum dma_data_direction dir, unsigned long attrs);
48 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
49 enum dma_data_direction dir, unsigned long attrs);
50 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
51 size_t size, enum dma_data_direction dir,
52 unsigned long attrs);
53 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
54 size_t size, enum dma_data_direction dir,
55 unsigned long attrs);
56 void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
57 size_t size, enum dma_data_direction dir);
58 void (*sync_single_for_device)(struct device *dev,
59 dma_addr_t dma_handle, size_t size,
60 enum dma_data_direction dir);
61 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
62 int nents, enum dma_data_direction dir);
63 void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
64 int nents, enum dma_data_direction dir);
65 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
66 enum dma_data_direction direction);
67 int (*dma_supported)(struct device *dev, u64 mask);
68 u64 (*get_required_mask)(struct device *dev);
69 size_t (*max_mapping_size)(struct device *dev);
70 unsigned long (*get_merge_boundary)(struct device *dev);
71};
72
73#ifdef CONFIG_DMA_OPS
74#include <asm/dma-mapping.h>
75
76static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
77{
78 if (dev->dma_ops)
79 return dev->dma_ops;
80 return get_arch_dma_ops(dev->bus);
81}
82
83static inline void set_dma_ops(struct device *dev,
84 const struct dma_map_ops *dma_ops)
85{
86 dev->dma_ops = dma_ops;
87}
88#else /* CONFIG_DMA_OPS */
89static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
90{
91 return NULL;
92}
93static inline void set_dma_ops(struct device *dev,
94 const struct dma_map_ops *dma_ops)
95{
96}
97#endif /* CONFIG_DMA_OPS */
98
0b1abd1f
CH
99#ifdef CONFIG_DMA_CMA
100extern struct cma *dma_contiguous_default_area;
101
102static inline struct cma *dev_get_cma_area(struct device *dev)
103{
104 if (dev && dev->cma_area)
105 return dev->cma_area;
106 return dma_contiguous_default_area;
107}
108
109void dma_contiguous_reserve(phys_addr_t addr_limit);
110int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
111 phys_addr_t limit, struct cma **res_cma, bool fixed);
112
113struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
114 unsigned int order, bool no_warn);
115bool dma_release_from_contiguous(struct device *dev, struct page *pages,
116 int count);
117struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
118void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
119#else /* CONFIG_DMA_CMA */
120static inline struct cma *dev_get_cma_area(struct device *dev)
121{
122 return NULL;
123}
124static inline void dma_contiguous_reserve(phys_addr_t limit)
125{
126}
127static inline int dma_contiguous_reserve_area(phys_addr_t size,
128 phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
129 bool fixed)
130{
131 return -ENOSYS;
132}
133static inline struct page *dma_alloc_from_contiguous(struct device *dev,
134 size_t count, unsigned int order, bool no_warn)
135{
136 return NULL;
137}
138static inline bool dma_release_from_contiguous(struct device *dev,
139 struct page *pages, int count)
140{
141 return false;
142}
143/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
144static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
145 gfp_t gfp)
146{
147 return NULL;
148}
149static inline void dma_free_contiguous(struct device *dev, struct page *page,
150 size_t size)
151{
152 __free_pages(page, get_order(size));
153}
154#endif /* CONFIG_DMA_CMA*/
155
156#ifdef CONFIG_DMA_PERNUMA_CMA
157void dma_pernuma_cma_reserve(void);
158#else
159static inline void dma_pernuma_cma_reserve(void) { }
160#endif /* CONFIG_DMA_PERNUMA_CMA */
161
0a0f0d8b
CH
162#ifdef CONFIG_DMA_DECLARE_COHERENT
163int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
164 dma_addr_t device_addr, size_t size);
165int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
166 dma_addr_t *dma_handle, void **ret);
167int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
168int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
169 void *cpu_addr, size_t size, int *ret);
170
171void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
172 dma_addr_t *dma_handle);
173int dma_release_from_global_coherent(int order, void *vaddr);
174int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
175 size_t size, int *ret);
176
177#else
178static inline int dma_declare_coherent_memory(struct device *dev,
179 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
180{
181 return -ENOSYS;
182}
183#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
184#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
185#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
186
187static inline void *dma_alloc_from_global_coherent(struct device *dev,
188 ssize_t size, dma_addr_t *dma_handle)
189{
190 return NULL;
191}
192static inline int dma_release_from_global_coherent(int order, void *vaddr)
193{
194 return 0;
195}
196static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
197 void *cpu_addr, size_t size, int *ret)
198{
199 return 0;
200}
201#endif /* CONFIG_DMA_DECLARE_COHERENT */
202
203#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
204void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
205 const struct iommu_ops *iommu, bool coherent);
206#else
207static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
208 u64 size, const struct iommu_ops *iommu, bool coherent)
209{
210}
211#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
212
213#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
214void arch_teardown_dma_ops(struct device *dev);
215#else
216static inline void arch_teardown_dma_ops(struct device *dev)
217{
218}
219#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
220
221extern const struct dma_map_ops dma_dummy_ops;
222
223#endif /* _LINUX_DMA_MAP_OPS_H */