]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
96532bab RD |
2 | #ifndef _LINUX_DMA_MAPPING_H |
3 | #define _LINUX_DMA_MAPPING_H | |
1da177e4 | 4 | |
002edb6f | 5 | #include <linux/sizes.h> |
842fa69f | 6 | #include <linux/string.h> |
1da177e4 LT |
7 | #include <linux/device.h> |
8 | #include <linux/err.h> | |
e1c7e324 | 9 | #include <linux/dma-debug.h> |
b7f080cf | 10 | #include <linux/dma-direction.h> |
f0402a26 | 11 | #include <linux/scatterlist.h> |
e1c7e324 | 12 | #include <linux/bug.h> |
648babb7 | 13 | #include <linux/mem_encrypt.h> |
1da177e4 | 14 | |
00085f1e KK |
15 | /** |
16 | * List of possible attributes associated with a DMA mapping. The semantics | |
985098a0 | 17 | * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. |
00085f1e | 18 | */ |
7283fff8 | 19 | |
00085f1e KK |
20 | /* |
21 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping | |
22 | * may be weakly ordered, that is that reads and writes may pass each other. | |
23 | */ | |
24 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) | |
25 | /* | |
26 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be | |
27 | * buffered to improve performance. | |
28 | */ | |
29 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) | |
30 | /* | |
31 | * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either | |
32 | * consistent or non-consistent memory as it sees fit. | |
33 | */ | |
34 | #define DMA_ATTR_NON_CONSISTENT (1UL << 3) | |
35 | /* | |
36 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel | |
37 | * virtual mapping for the allocated buffer. | |
38 | */ | |
39 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) | |
40 | /* | |
41 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of | |
42 | * the CPU cache for the given buffer assuming that it has been already | |
43 | * transferred to 'device' domain. | |
44 | */ | |
45 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) | |
46 | /* | |
47 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer | |
48 | * in physical memory. | |
49 | */ | |
50 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) | |
51 | /* | |
52 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem | |
53 | * that it's probably not worth the time to try to allocate memory to in a way | |
54 | * that gives better TLB efficiency. | |
55 | */ | |
56 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) | |
a9a62c93 MFO |
57 | /* |
58 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress | |
59 | * allocation failure reports (similarly to __GFP_NOWARN). | |
60 | */ | |
61 | #define DMA_ATTR_NO_WARN (1UL << 8) | |
00085f1e | 62 | |
b2fb3664 MH |
63 | /* |
64 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully | |
65 | * accessible at an elevated privilege level (and ideally inaccessible or | |
66 | * at least read-only at lesser-privileged levels). | |
67 | */ | |
68 | #define DMA_ATTR_PRIVILEGED (1UL << 9) | |
69 | ||
77f2ea2f BH |
70 | /* |
71 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | |
72 | * It can be given to a device to use as a DMA source or target. A CPU cannot | |
73 | * reference a dma_addr_t directly because there may be translation between | |
74 | * its physical address space and the bus address space. | |
75 | */ | |
f0402a26 | 76 | struct dma_map_ops { |
613c4578 MS |
77 | void* (*alloc)(struct device *dev, size_t size, |
78 | dma_addr_t *dma_handle, gfp_t gfp, | |
00085f1e | 79 | unsigned long attrs); |
613c4578 MS |
80 | void (*free)(struct device *dev, size_t size, |
81 | void *vaddr, dma_addr_t dma_handle, | |
00085f1e | 82 | unsigned long attrs); |
9adc5374 | 83 | int (*mmap)(struct device *, struct vm_area_struct *, |
00085f1e KK |
84 | void *, dma_addr_t, size_t, |
85 | unsigned long attrs); | |
9adc5374 | 86 | |
d2b7428e | 87 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
00085f1e | 88 | dma_addr_t, size_t, unsigned long attrs); |
d2b7428e | 89 | |
f0402a26 FT |
90 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
91 | unsigned long offset, size_t size, | |
92 | enum dma_data_direction dir, | |
00085f1e | 93 | unsigned long attrs); |
f0402a26 FT |
94 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
95 | size_t size, enum dma_data_direction dir, | |
00085f1e | 96 | unsigned long attrs); |
04abab69 RR |
97 | /* |
98 | * map_sg returns 0 on error and a value > 0 on success. | |
99 | * It should never return a value < 0. | |
100 | */ | |
f0402a26 FT |
101 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
102 | int nents, enum dma_data_direction dir, | |
00085f1e | 103 | unsigned long attrs); |
f0402a26 FT |
104 | void (*unmap_sg)(struct device *dev, |
105 | struct scatterlist *sg, int nents, | |
106 | enum dma_data_direction dir, | |
00085f1e | 107 | unsigned long attrs); |
ba409b31 NS |
108 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, |
109 | size_t size, enum dma_data_direction dir, | |
110 | unsigned long attrs); | |
111 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, | |
112 | size_t size, enum dma_data_direction dir, | |
113 | unsigned long attrs); | |
f0402a26 FT |
114 | void (*sync_single_for_cpu)(struct device *dev, |
115 | dma_addr_t dma_handle, size_t size, | |
116 | enum dma_data_direction dir); | |
117 | void (*sync_single_for_device)(struct device *dev, | |
118 | dma_addr_t dma_handle, size_t size, | |
119 | enum dma_data_direction dir); | |
f0402a26 FT |
120 | void (*sync_sg_for_cpu)(struct device *dev, |
121 | struct scatterlist *sg, int nents, | |
122 | enum dma_data_direction dir); | |
123 | void (*sync_sg_for_device)(struct device *dev, | |
124 | struct scatterlist *sg, int nents, | |
125 | enum dma_data_direction dir); | |
c9eb6172 CH |
126 | void (*cache_sync)(struct device *dev, void *vaddr, size_t size, |
127 | enum dma_data_direction direction); | |
f0402a26 | 128 | int (*dma_supported)(struct device *dev, u64 mask); |
3a8f7558 | 129 | u64 (*get_required_mask)(struct device *dev); |
133d624b | 130 | size_t (*max_mapping_size)(struct device *dev); |
6ba99411 | 131 | unsigned long (*get_merge_boundary)(struct device *dev); |
f0402a26 FT |
132 | }; |
133 | ||
42ee3cae CH |
134 | #define DMA_MAPPING_ERROR (~(dma_addr_t)0) |
135 | ||
551199ac | 136 | extern const struct dma_map_ops dma_virt_ops; |
90ac706e | 137 | extern const struct dma_map_ops dma_dummy_ops; |
a8463d4b | 138 | |
8f286c33 | 139 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
34c65384 | 140 | |
32e8f702 JB |
141 | #define DMA_MASK_NONE 0x0ULL |
142 | ||
d6bd3a39 REB |
143 | static inline int valid_dma_direction(int dma_direction) |
144 | { | |
145 | return ((dma_direction == DMA_BIDIRECTIONAL) || | |
146 | (dma_direction == DMA_TO_DEVICE) || | |
147 | (dma_direction == DMA_FROM_DEVICE)); | |
148 | } | |
149 | ||
ff4c25f2 | 150 | #ifdef CONFIG_DMA_DECLARE_COHERENT |
20d666e4 CH |
151 | /* |
152 | * These three functions are only for dma allocator. | |
153 | * Don't use them in device drivers. | |
154 | */ | |
43fc509c | 155 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
20d666e4 | 156 | dma_addr_t *dma_handle, void **ret); |
43fc509c | 157 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); |
20d666e4 | 158 | |
43fc509c | 159 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
20d666e4 | 160 | void *cpu_addr, size_t size, int *ret); |
43fc509c | 161 | |
a445e940 | 162 | void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle); |
43fc509c VM |
163 | int dma_release_from_global_coherent(int order, void *vaddr); |
164 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, | |
165 | size_t size, int *ret); | |
166 | ||
20d666e4 | 167 | #else |
43fc509c VM |
168 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
169 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) | |
170 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) | |
171 | ||
a445e940 | 172 | static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, |
43fc509c VM |
173 | dma_addr_t *dma_handle) |
174 | { | |
175 | return NULL; | |
176 | } | |
177 | ||
178 | static inline int dma_release_from_global_coherent(int order, void *vaddr) | |
179 | { | |
180 | return 0; | |
181 | } | |
182 | ||
183 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, | |
184 | void *cpu_addr, size_t size, | |
185 | int *ret) | |
186 | { | |
187 | return 0; | |
188 | } | |
ff4c25f2 | 189 | #endif /* CONFIG_DMA_DECLARE_COHERENT */ |
20d666e4 | 190 | |
ed6ccf10 CH |
191 | #ifdef CONFIG_HAS_DMA |
192 | #include <asm/dma-mapping.h> | |
193 | ||
2f9237d4 | 194 | #ifdef CONFIG_DMA_OPS |
ed6ccf10 CH |
195 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
196 | { | |
d7e02a93 | 197 | if (dev->dma_ops) |
ed6ccf10 | 198 | return dev->dma_ops; |
d7e02a93 | 199 | return get_arch_dma_ops(dev->bus); |
ed6ccf10 CH |
200 | } |
201 | ||
202 | static inline void set_dma_ops(struct device *dev, | |
203 | const struct dma_map_ops *dma_ops) | |
204 | { | |
205 | dev->dma_ops = dma_ops; | |
206 | } | |
2f9237d4 CH |
207 | #else /* CONFIG_DMA_OPS */ |
208 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) | |
209 | { | |
210 | return NULL; | |
211 | } | |
212 | static inline void set_dma_ops(struct device *dev, | |
213 | const struct dma_map_ops *dma_ops) | |
214 | { | |
215 | } | |
216 | #endif /* CONFIG_DMA_OPS */ | |
e1c7e324 | 217 | |
ed6ccf10 CH |
218 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
219 | { | |
220 | debug_dma_mapping_error(dev, dma_addr); | |
221 | ||
222 | if (dma_addr == DMA_MAPPING_ERROR) | |
223 | return -ENOMEM; | |
224 | return 0; | |
225 | } | |
226 | ||
d3fa60d7 CH |
227 | dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, |
228 | size_t offset, size_t size, enum dma_data_direction dir, | |
229 | unsigned long attrs); | |
230 | void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, | |
231 | enum dma_data_direction dir, unsigned long attrs); | |
232 | int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, | |
233 | enum dma_data_direction dir, unsigned long attrs); | |
234 | void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | |
235 | int nents, enum dma_data_direction dir, | |
236 | unsigned long attrs); | |
237 | dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, | |
238 | size_t size, enum dma_data_direction dir, unsigned long attrs); | |
239 | void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, | |
240 | enum dma_data_direction dir, unsigned long attrs); | |
241 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, | |
242 | enum dma_data_direction dir); | |
243 | void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, | |
244 | size_t size, enum dma_data_direction dir); | |
245 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
246 | int nelems, enum dma_data_direction dir); | |
247 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
248 | int nelems, enum dma_data_direction dir); | |
ed6ccf10 CH |
249 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
250 | gfp_t flag, unsigned long attrs); | |
251 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
252 | dma_addr_t dma_handle, unsigned long attrs); | |
253 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
254 | gfp_t gfp, unsigned long attrs); | |
255 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
256 | dma_addr_t dma_handle); | |
257 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
258 | enum dma_data_direction dir); | |
259 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, | |
260 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
261 | unsigned long attrs); | |
262 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
263 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
264 | unsigned long attrs); | |
e29ccc18 | 265 | bool dma_can_mmap(struct device *dev); |
ed6ccf10 CH |
266 | int dma_supported(struct device *dev, u64 mask); |
267 | int dma_set_mask(struct device *dev, u64 mask); | |
268 | int dma_set_coherent_mask(struct device *dev, u64 mask); | |
269 | u64 dma_get_required_mask(struct device *dev); | |
133d624b | 270 | size_t dma_max_mapping_size(struct device *dev); |
3aa91625 | 271 | bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); |
6ba99411 | 272 | unsigned long dma_get_merge_boundary(struct device *dev); |
ed6ccf10 CH |
273 | #else /* CONFIG_HAS_DMA */ |
274 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | |
275 | struct page *page, size_t offset, size_t size, | |
276 | enum dma_data_direction dir, unsigned long attrs) | |
277 | { | |
278 | return DMA_MAPPING_ERROR; | |
279 | } | |
280 | static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, | |
281 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
282 | { | |
283 | } | |
284 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
285 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
286 | { | |
287 | return 0; | |
288 | } | |
289 | static inline void dma_unmap_sg_attrs(struct device *dev, | |
290 | struct scatterlist *sg, int nents, enum dma_data_direction dir, | |
291 | unsigned long attrs) | |
292 | { | |
293 | } | |
294 | static inline dma_addr_t dma_map_resource(struct device *dev, | |
295 | phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, | |
296 | unsigned long attrs) | |
297 | { | |
298 | return DMA_MAPPING_ERROR; | |
299 | } | |
300 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, | |
301 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
302 | { | |
303 | } | |
304 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |
305 | size_t size, enum dma_data_direction dir) | |
306 | { | |
307 | } | |
308 | static inline void dma_sync_single_for_device(struct device *dev, | |
309 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | |
310 | { | |
311 | } | |
312 | static inline void dma_sync_sg_for_cpu(struct device *dev, | |
313 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) | |
314 | { | |
315 | } | |
316 | static inline void dma_sync_sg_for_device(struct device *dev, | |
317 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) | |
318 | { | |
319 | } | |
320 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
321 | { | |
322 | return -ENOMEM; | |
323 | } | |
324 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
325 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) | |
326 | { | |
327 | return NULL; | |
328 | } | |
329 | static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
330 | dma_addr_t dma_handle, unsigned long attrs) | |
331 | { | |
332 | } | |
333 | static inline void *dmam_alloc_attrs(struct device *dev, size_t size, | |
334 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | |
335 | { | |
336 | return NULL; | |
337 | } | |
338 | static inline void dmam_free_coherent(struct device *dev, size_t size, | |
339 | void *vaddr, dma_addr_t dma_handle) | |
340 | { | |
341 | } | |
342 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
343 | enum dma_data_direction dir) | |
344 | { | |
345 | } | |
346 | static inline int dma_get_sgtable_attrs(struct device *dev, | |
347 | struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, | |
348 | size_t size, unsigned long attrs) | |
349 | { | |
350 | return -ENXIO; | |
351 | } | |
352 | static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
353 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
354 | unsigned long attrs) | |
355 | { | |
356 | return -ENXIO; | |
357 | } | |
e29ccc18 CH |
358 | static inline bool dma_can_mmap(struct device *dev) |
359 | { | |
360 | return false; | |
361 | } | |
ed6ccf10 CH |
362 | static inline int dma_supported(struct device *dev, u64 mask) |
363 | { | |
364 | return 0; | |
365 | } | |
366 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
367 | { | |
368 | return -EIO; | |
369 | } | |
370 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) | |
371 | { | |
372 | return -EIO; | |
373 | } | |
374 | static inline u64 dma_get_required_mask(struct device *dev) | |
375 | { | |
376 | return 0; | |
377 | } | |
133d624b JR |
378 | static inline size_t dma_max_mapping_size(struct device *dev) |
379 | { | |
380 | return 0; | |
381 | } | |
3aa91625 CH |
382 | static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
383 | { | |
384 | return false; | |
385 | } | |
6ba99411 YS |
386 | static inline unsigned long dma_get_merge_boundary(struct device *dev) |
387 | { | |
388 | return 0; | |
389 | } | |
ed6ccf10 CH |
390 | #endif /* CONFIG_HAS_DMA */ |
391 | ||
2e05ea5c CH |
392 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
393 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
394 | { | |
4544b9f2 KC |
395 | /* DMA must never operate on areas that might be remapped. */ |
396 | if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), | |
397 | "rejecting DMA map of vmalloc memory\n")) | |
398 | return DMA_MAPPING_ERROR; | |
2e05ea5c CH |
399 | debug_dma_map_single(dev, ptr, size); |
400 | return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), | |
401 | size, dir, attrs); | |
402 | } | |
403 | ||
404 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | |
405 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
406 | { | |
407 | return dma_unmap_page_attrs(dev, addr, size, dir, attrs); | |
408 | } | |
409 | ||
ed6ccf10 CH |
410 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
411 | dma_addr_t addr, unsigned long offset, size_t size, | |
412 | enum dma_data_direction dir) | |
413 | { | |
414 | return dma_sync_single_for_cpu(dev, addr + offset, size, dir); | |
415 | } | |
416 | ||
417 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
418 | dma_addr_t addr, unsigned long offset, size_t size, | |
419 | enum dma_data_direction dir) | |
420 | { | |
421 | return dma_sync_single_for_device(dev, addr + offset, size, dir); | |
422 | } | |
423 | ||
d9d200bc MS |
424 | /** |
425 | * dma_map_sgtable - Map the given buffer for DMA | |
426 | * @dev: The device for which to perform the DMA operation | |
427 | * @sgt: The sg_table object describing the buffer | |
428 | * @dir: DMA direction | |
429 | * @attrs: Optional DMA attributes for the map operation | |
430 | * | |
431 | * Maps a buffer described by a scatterlist stored in the given sg_table | |
432 | * object for the @dir DMA operation by the @dev device. After success the | |
433 | * ownership for the buffer is transferred to the DMA domain. One has to | |
434 | * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the | |
435 | * ownership of the buffer back to the CPU domain before touching the | |
436 | * buffer by the CPU. | |
437 | * | |
438 | * Returns 0 on success or -EINVAL on error during mapping the buffer. | |
439 | */ | |
440 | static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, | |
441 | enum dma_data_direction dir, unsigned long attrs) | |
442 | { | |
443 | int nents; | |
444 | ||
445 | nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); | |
446 | if (nents <= 0) | |
447 | return -EINVAL; | |
448 | sgt->nents = nents; | |
449 | return 0; | |
450 | } | |
451 | ||
452 | /** | |
453 | * dma_unmap_sgtable - Unmap the given buffer for DMA | |
454 | * @dev: The device for which to perform the DMA operation | |
455 | * @sgt: The sg_table object describing the buffer | |
456 | * @dir: DMA direction | |
457 | * @attrs: Optional DMA attributes for the unmap operation | |
458 | * | |
459 | * Unmaps a buffer described by a scatterlist stored in the given sg_table | |
460 | * object for the @dir DMA operation by the @dev device. After this function | |
461 | * the ownership of the buffer is transferred back to the CPU domain. | |
462 | */ | |
463 | static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, | |
464 | enum dma_data_direction dir, unsigned long attrs) | |
465 | { | |
466 | dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); | |
467 | } | |
468 | ||
469 | /** | |
470 | * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access | |
471 | * @dev: The device for which to perform the DMA operation | |
472 | * @sgt: The sg_table object describing the buffer | |
473 | * @dir: DMA direction | |
474 | * | |
475 | * Performs the needed cache synchronization and moves the ownership of the | |
476 | * buffer back to the CPU domain, so it is safe to perform any access to it | |
477 | * by the CPU. Before doing any further DMA operations, one has to transfer | |
478 | * the ownership of the buffer back to the DMA domain by calling the | |
479 | * dma_sync_sgtable_for_device(). | |
480 | */ | |
481 | static inline void dma_sync_sgtable_for_cpu(struct device *dev, | |
482 | struct sg_table *sgt, enum dma_data_direction dir) | |
483 | { | |
484 | dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); | |
485 | } | |
486 | ||
487 | /** | |
488 | * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA | |
489 | * @dev: The device for which to perform the DMA operation | |
490 | * @sgt: The sg_table object describing the buffer | |
491 | * @dir: DMA direction | |
492 | * | |
493 | * Performs the needed cache synchronization and moves the ownership of the | |
494 | * buffer back to the DMA domain, so it is safe to perform the DMA operation. | |
495 | * Once finished, one has to call dma_sync_sgtable_for_cpu() or | |
496 | * dma_unmap_sgtable(). | |
497 | */ | |
498 | static inline void dma_sync_sgtable_for_device(struct device *dev, | |
499 | struct sg_table *sgt, enum dma_data_direction dir) | |
500 | { | |
501 | dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); | |
502 | } | |
503 | ||
00085f1e KK |
504 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
505 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) | |
506 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) | |
507 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) | |
0495c3d3 AD |
508 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
509 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) | |
ed6ccf10 CH |
510 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
511 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) | |
c9eb6172 | 512 | |
e1c7e324 | 513 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
58b04406 CH |
514 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
515 | unsigned long attrs); | |
e1c7e324 | 516 | |
5cf45379 | 517 | struct page **dma_common_find_pages(void *cpu_addr); |
e1c7e324 | 518 | void *dma_common_contiguous_remap(struct page *page, size_t size, |
e1c7e324 CH |
519 | pgprot_t prot, const void *caller); |
520 | ||
521 | void *dma_common_pages_remap(struct page **pages, size_t size, | |
51231740 CH |
522 | pgprot_t prot, const void *caller); |
523 | void dma_common_free_remap(void *cpu_addr, size_t size); | |
e1c7e324 | 524 | |
9420139f CH |
525 | struct page *dma_alloc_from_pool(struct device *dev, size_t size, |
526 | void **cpu_addr, gfp_t flags, | |
527 | bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); | |
c84dc6e6 | 528 | bool dma_free_from_pool(struct device *dev, void *start, size_t size); |
e1c7e324 | 529 | |
e1c7e324 | 530 | int |
9406a49f CH |
531 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, |
532 | dma_addr_t dma_addr, size_t size, unsigned long attrs); | |
e1c7e324 | 533 | |
e1c7e324 | 534 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
7ed1d91a | 535 | dma_addr_t *dma_handle, gfp_t gfp) |
e1c7e324 | 536 | { |
7ed1d91a CH |
537 | |
538 | return dma_alloc_attrs(dev, size, dma_handle, gfp, | |
539 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); | |
e1c7e324 CH |
540 | } |
541 | ||
542 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
543 | void *cpu_addr, dma_addr_t dma_handle) | |
544 | { | |
00085f1e | 545 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
e1c7e324 CH |
546 | } |
547 | ||
1da177e4 | 548 | |
589fc9a6 FT |
549 | static inline u64 dma_get_mask(struct device *dev) |
550 | { | |
d7e02a93 | 551 | if (dev->dma_mask && *dev->dma_mask) |
589fc9a6 | 552 | return *dev->dma_mask; |
284901a9 | 553 | return DMA_BIT_MASK(32); |
589fc9a6 FT |
554 | } |
555 | ||
4aa806b7 RK |
556 | /* |
557 | * Set both the DMA mask and the coherent DMA mask to the same thing. | |
558 | * Note that we don't check the return value from dma_set_coherent_mask() | |
559 | * as the DMA API guarantees that the coherent DMA mask can be set to | |
560 | * the same or smaller than the streaming DMA mask. | |
561 | */ | |
562 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) | |
563 | { | |
564 | int rc = dma_set_mask(dev, mask); | |
565 | if (rc == 0) | |
566 | dma_set_coherent_mask(dev, mask); | |
567 | return rc; | |
568 | } | |
569 | ||
fa6a8d6d RK |
570 | /* |
571 | * Similar to the above, except it deals with the case where the device | |
572 | * does not have dev->dma_mask appropriately setup. | |
573 | */ | |
574 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |
575 | { | |
576 | dev->dma_mask = &dev->coherent_dma_mask; | |
577 | return dma_set_mask_and_coherent(dev, mask); | |
578 | } | |
579 | ||
b8664554 CH |
580 | /** |
581 | * dma_addressing_limited - return if the device is addressing limited | |
582 | * @dev: device to check | |
583 | * | |
584 | * Return %true if the devices DMA mask is too small to address all memory in | |
585 | * the system, else %false. Lack of addressing bits is the prime reason for | |
586 | * bounce buffering, but might not be the only one. | |
587 | */ | |
588 | static inline bool dma_addressing_limited(struct device *dev) | |
589 | { | |
a7ba70f1 | 590 | return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < |
06532750 | 591 | dma_get_required_mask(dev); |
b8664554 CH |
592 | } |
593 | ||
347cb6af CH |
594 | #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS |
595 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |
596 | const struct iommu_ops *iommu, bool coherent); | |
597 | #else | |
97890ba9 | 598 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
347cb6af CH |
599 | u64 size, const struct iommu_ops *iommu, bool coherent) |
600 | { | |
601 | } | |
602 | #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ | |
97890ba9 | 603 | |
dc2acded CH |
604 | #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS |
605 | void arch_teardown_dma_ops(struct device *dev); | |
606 | #else | |
607 | static inline void arch_teardown_dma_ops(struct device *dev) | |
608 | { | |
609 | } | |
610 | #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ | |
591c1ee4 | 611 | |
6b7b6510 FT |
612 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
613 | { | |
002edb6f RM |
614 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
615 | return dev->dma_parms->max_segment_size; | |
616 | return SZ_64K; | |
6b7b6510 FT |
617 | } |
618 | ||
c9d76d06 | 619 | static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) |
6b7b6510 FT |
620 | { |
621 | if (dev->dma_parms) { | |
622 | dev->dma_parms->max_segment_size = size; | |
623 | return 0; | |
002edb6f RM |
624 | } |
625 | return -EIO; | |
6b7b6510 FT |
626 | } |
627 | ||
d22a6966 FT |
628 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
629 | { | |
002edb6f RM |
630 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
631 | return dev->dma_parms->segment_boundary_mask; | |
632 | return DMA_BIT_MASK(32); | |
d22a6966 FT |
633 | } |
634 | ||
635 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |
636 | { | |
637 | if (dev->dma_parms) { | |
638 | dev->dma_parms->segment_boundary_mask = mask; | |
639 | return 0; | |
002edb6f RM |
640 | } |
641 | return -EIO; | |
d22a6966 FT |
642 | } |
643 | ||
4565f017 FT |
644 | static inline int dma_get_cache_alignment(void) |
645 | { | |
646 | #ifdef ARCH_DMA_MINALIGN | |
647 | return ARCH_DMA_MINALIGN; | |
648 | #endif | |
649 | return 1; | |
650 | } | |
651 | ||
ff4c25f2 | 652 | #ifdef CONFIG_DMA_DECLARE_COHERENT |
20d666e4 | 653 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
82c5de0a | 654 | dma_addr_t device_addr, size_t size); |
20d666e4 | 655 | #else |
1da177e4 | 656 | static inline int |
88a984ba | 657 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
82c5de0a | 658 | dma_addr_t device_addr, size_t size) |
1da177e4 | 659 | { |
2436bdcd | 660 | return -ENOSYS; |
1da177e4 | 661 | } |
ff4c25f2 | 662 | #endif /* CONFIG_DMA_DECLARE_COHERENT */ |
1da177e4 | 663 | |
d7076f07 CH |
664 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, |
665 | dma_addr_t *dma_handle, gfp_t gfp) | |
666 | { | |
667 | return dmam_alloc_attrs(dev, size, dma_handle, gfp, | |
668 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); | |
669 | } | |
670 | ||
f6e45661 LR |
671 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
672 | dma_addr_t *dma_addr, gfp_t gfp) | |
b4bbb107 | 673 | { |
0cd60eb1 | 674 | unsigned long attrs = DMA_ATTR_WRITE_COMBINE; |
7ed1d91a CH |
675 | |
676 | if (gfp & __GFP_NOWARN) | |
677 | attrs |= DMA_ATTR_NO_WARN; | |
678 | ||
679 | return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); | |
b4bbb107 TR |
680 | } |
681 | ||
f6e45661 LR |
682 | static inline void dma_free_wc(struct device *dev, size_t size, |
683 | void *cpu_addr, dma_addr_t dma_addr) | |
b4bbb107 | 684 | { |
00085f1e KK |
685 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, |
686 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 TR |
687 | } |
688 | ||
f6e45661 LR |
689 | static inline int dma_mmap_wc(struct device *dev, |
690 | struct vm_area_struct *vma, | |
691 | void *cpu_addr, dma_addr_t dma_addr, | |
692 | size_t size) | |
b4bbb107 | 693 | { |
00085f1e KK |
694 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
695 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 696 | } |
74bc7cee | 697 | |
f616ab59 | 698 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
0acedc12 FT |
699 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
700 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | |
701 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | |
702 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) | |
703 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) | |
704 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) | |
705 | #else | |
706 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) | |
707 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) | |
708 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) | |
709 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | |
710 | #define dma_unmap_len(PTR, LEN_NAME) (0) | |
711 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | |
712 | #endif | |
713 | ||
9ac7849e | 714 | #endif |