]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
96532bab RD |
2 | #ifndef _LINUX_DMA_MAPPING_H |
3 | #define _LINUX_DMA_MAPPING_H | |
1da177e4 | 4 | |
002edb6f | 5 | #include <linux/sizes.h> |
842fa69f | 6 | #include <linux/string.h> |
1da177e4 LT |
7 | #include <linux/device.h> |
8 | #include <linux/err.h> | |
e1c7e324 | 9 | #include <linux/dma-debug.h> |
b7f080cf | 10 | #include <linux/dma-direction.h> |
f0402a26 | 11 | #include <linux/scatterlist.h> |
e1c7e324 CH |
12 | #include <linux/kmemcheck.h> |
13 | #include <linux/bug.h> | |
648babb7 | 14 | #include <linux/mem_encrypt.h> |
1da177e4 | 15 | |
00085f1e KK |
16 | /** |
17 | * List of possible attributes associated with a DMA mapping. The semantics | |
18 | * of each attribute should be defined in Documentation/DMA-attributes.txt. | |
19 | * | |
20 | * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute | |
21 | * forces all pending DMA writes to complete. | |
22 | */ | |
23 | #define DMA_ATTR_WRITE_BARRIER (1UL << 0) | |
24 | /* | |
25 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping | |
26 | * may be weakly ordered, that is that reads and writes may pass each other. | |
27 | */ | |
28 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) | |
29 | /* | |
30 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be | |
31 | * buffered to improve performance. | |
32 | */ | |
33 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) | |
34 | /* | |
35 | * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either | |
36 | * consistent or non-consistent memory as it sees fit. | |
37 | */ | |
38 | #define DMA_ATTR_NON_CONSISTENT (1UL << 3) | |
39 | /* | |
40 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel | |
41 | * virtual mapping for the allocated buffer. | |
42 | */ | |
43 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) | |
44 | /* | |
45 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of | |
46 | * the CPU cache for the given buffer assuming that it has been already | |
47 | * transferred to 'device' domain. | |
48 | */ | |
49 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) | |
50 | /* | |
51 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer | |
52 | * in physical memory. | |
53 | */ | |
54 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) | |
55 | /* | |
56 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem | |
57 | * that it's probably not worth the time to try to allocate memory to in a way | |
58 | * that gives better TLB efficiency. | |
59 | */ | |
60 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) | |
a9a62c93 MFO |
61 | /* |
62 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress | |
63 | * allocation failure reports (similarly to __GFP_NOWARN). | |
64 | */ | |
65 | #define DMA_ATTR_NO_WARN (1UL << 8) | |
00085f1e | 66 | |
b2fb3664 MH |
67 | /* |
68 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully | |
69 | * accessible at an elevated privilege level (and ideally inaccessible or | |
70 | * at least read-only at lesser-privileged levels). | |
71 | */ | |
72 | #define DMA_ATTR_PRIVILEGED (1UL << 9) | |
73 | ||
77f2ea2f BH |
74 | /* |
75 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | |
76 | * It can be given to a device to use as a DMA source or target. A CPU cannot | |
77 | * reference a dma_addr_t directly because there may be translation between | |
78 | * its physical address space and the bus address space. | |
79 | */ | |
f0402a26 | 80 | struct dma_map_ops { |
613c4578 MS |
81 | void* (*alloc)(struct device *dev, size_t size, |
82 | dma_addr_t *dma_handle, gfp_t gfp, | |
00085f1e | 83 | unsigned long attrs); |
613c4578 MS |
84 | void (*free)(struct device *dev, size_t size, |
85 | void *vaddr, dma_addr_t dma_handle, | |
00085f1e | 86 | unsigned long attrs); |
9adc5374 | 87 | int (*mmap)(struct device *, struct vm_area_struct *, |
00085f1e KK |
88 | void *, dma_addr_t, size_t, |
89 | unsigned long attrs); | |
9adc5374 | 90 | |
d2b7428e | 91 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
00085f1e | 92 | dma_addr_t, size_t, unsigned long attrs); |
d2b7428e | 93 | |
f0402a26 FT |
94 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
95 | unsigned long offset, size_t size, | |
96 | enum dma_data_direction dir, | |
00085f1e | 97 | unsigned long attrs); |
f0402a26 FT |
98 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
99 | size_t size, enum dma_data_direction dir, | |
00085f1e | 100 | unsigned long attrs); |
04abab69 RR |
101 | /* |
102 | * map_sg returns 0 on error and a value > 0 on success. | |
103 | * It should never return a value < 0. | |
104 | */ | |
f0402a26 FT |
105 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
106 | int nents, enum dma_data_direction dir, | |
00085f1e | 107 | unsigned long attrs); |
f0402a26 FT |
108 | void (*unmap_sg)(struct device *dev, |
109 | struct scatterlist *sg, int nents, | |
110 | enum dma_data_direction dir, | |
00085f1e | 111 | unsigned long attrs); |
ba409b31 NS |
112 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, |
113 | size_t size, enum dma_data_direction dir, | |
114 | unsigned long attrs); | |
115 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, | |
116 | size_t size, enum dma_data_direction dir, | |
117 | unsigned long attrs); | |
f0402a26 FT |
118 | void (*sync_single_for_cpu)(struct device *dev, |
119 | dma_addr_t dma_handle, size_t size, | |
120 | enum dma_data_direction dir); | |
121 | void (*sync_single_for_device)(struct device *dev, | |
122 | dma_addr_t dma_handle, size_t size, | |
123 | enum dma_data_direction dir); | |
f0402a26 FT |
124 | void (*sync_sg_for_cpu)(struct device *dev, |
125 | struct scatterlist *sg, int nents, | |
126 | enum dma_data_direction dir); | |
127 | void (*sync_sg_for_device)(struct device *dev, | |
128 | struct scatterlist *sg, int nents, | |
129 | enum dma_data_direction dir); | |
130 | int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); | |
131 | int (*dma_supported)(struct device *dev, u64 mask); | |
3a8f7558 MM |
132 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK |
133 | u64 (*get_required_mask)(struct device *dev); | |
134 | #endif | |
f0402a26 FT |
135 | int is_phys; |
136 | }; | |
137 | ||
5299709d | 138 | extern const struct dma_map_ops dma_noop_ops; |
551199ac | 139 | extern const struct dma_map_ops dma_virt_ops; |
a8463d4b | 140 | |
8f286c33 | 141 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
34c65384 | 142 | |
32e8f702 JB |
143 | #define DMA_MASK_NONE 0x0ULL |
144 | ||
d6bd3a39 REB |
145 | static inline int valid_dma_direction(int dma_direction) |
146 | { | |
147 | return ((dma_direction == DMA_BIDIRECTIONAL) || | |
148 | (dma_direction == DMA_TO_DEVICE) || | |
149 | (dma_direction == DMA_FROM_DEVICE)); | |
150 | } | |
151 | ||
32e8f702 JB |
152 | static inline int is_device_dma_capable(struct device *dev) |
153 | { | |
154 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; | |
155 | } | |
156 | ||
20d666e4 CH |
157 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
158 | /* | |
159 | * These three functions are only for dma allocator. | |
160 | * Don't use them in device drivers. | |
161 | */ | |
43fc509c | 162 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
20d666e4 | 163 | dma_addr_t *dma_handle, void **ret); |
43fc509c | 164 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); |
20d666e4 | 165 | |
43fc509c | 166 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
20d666e4 | 167 | void *cpu_addr, size_t size, int *ret); |
43fc509c VM |
168 | |
169 | void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); | |
170 | int dma_release_from_global_coherent(int order, void *vaddr); | |
171 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, | |
172 | size_t size, int *ret); | |
173 | ||
20d666e4 | 174 | #else |
43fc509c VM |
175 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
176 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) | |
177 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) | |
178 | ||
179 | static inline void *dma_alloc_from_global_coherent(ssize_t size, | |
180 | dma_addr_t *dma_handle) | |
181 | { | |
182 | return NULL; | |
183 | } | |
184 | ||
185 | static inline int dma_release_from_global_coherent(int order, void *vaddr) | |
186 | { | |
187 | return 0; | |
188 | } | |
189 | ||
190 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, | |
191 | void *cpu_addr, size_t size, | |
192 | int *ret) | |
193 | { | |
194 | return 0; | |
195 | } | |
20d666e4 CH |
196 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
197 | ||
1b0fac45 | 198 | #ifdef CONFIG_HAS_DMA |
1da177e4 | 199 | #include <asm/dma-mapping.h> |
815dd187 BVA |
200 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
201 | { | |
202 | if (dev && dev->dma_ops) | |
203 | return dev->dma_ops; | |
204 | return get_arch_dma_ops(dev ? dev->bus : NULL); | |
205 | } | |
206 | ||
ca6e8e10 BVA |
207 | static inline void set_dma_ops(struct device *dev, |
208 | const struct dma_map_ops *dma_ops) | |
209 | { | |
210 | dev->dma_ops = dma_ops; | |
211 | } | |
1b0fac45 | 212 | #else |
e1c7e324 CH |
213 | /* |
214 | * Define the dma api to allow compilation but not linking of | |
215 | * dma dependent code. Code that depends on the dma-mapping | |
216 | * API needs to set 'depends on HAS_DMA' in its Kconfig | |
217 | */ | |
5299709d BVA |
218 | extern const struct dma_map_ops bad_dma_ops; |
219 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) | |
e1c7e324 CH |
220 | { |
221 | return &bad_dma_ops; | |
222 | } | |
223 | #endif | |
224 | ||
225 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | |
226 | size_t size, | |
227 | enum dma_data_direction dir, | |
00085f1e | 228 | unsigned long attrs) |
e1c7e324 | 229 | { |
5299709d | 230 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
231 | dma_addr_t addr; |
232 | ||
233 | kmemcheck_mark_initialized(ptr, size); | |
234 | BUG_ON(!valid_dma_direction(dir)); | |
235 | addr = ops->map_page(dev, virt_to_page(ptr), | |
8e99469a | 236 | offset_in_page(ptr), size, |
e1c7e324 CH |
237 | dir, attrs); |
238 | debug_dma_map_page(dev, virt_to_page(ptr), | |
8e99469a | 239 | offset_in_page(ptr), size, |
e1c7e324 CH |
240 | dir, addr, true); |
241 | return addr; | |
242 | } | |
243 | ||
244 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | |
245 | size_t size, | |
246 | enum dma_data_direction dir, | |
00085f1e | 247 | unsigned long attrs) |
e1c7e324 | 248 | { |
5299709d | 249 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
250 | |
251 | BUG_ON(!valid_dma_direction(dir)); | |
252 | if (ops->unmap_page) | |
253 | ops->unmap_page(dev, addr, size, dir, attrs); | |
254 | debug_dma_unmap_page(dev, addr, size, dir, true); | |
255 | } | |
256 | ||
257 | /* | |
258 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. | |
259 | * It should never return a value < 0. | |
260 | */ | |
261 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
262 | int nents, enum dma_data_direction dir, | |
00085f1e | 263 | unsigned long attrs) |
e1c7e324 | 264 | { |
5299709d | 265 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
266 | int i, ents; |
267 | struct scatterlist *s; | |
268 | ||
269 | for_each_sg(sg, s, nents, i) | |
270 | kmemcheck_mark_initialized(sg_virt(s), s->length); | |
271 | BUG_ON(!valid_dma_direction(dir)); | |
272 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | |
273 | BUG_ON(ents < 0); | |
274 | debug_dma_map_sg(dev, sg, nents, ents, dir); | |
275 | ||
276 | return ents; | |
277 | } | |
278 | ||
279 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | |
280 | int nents, enum dma_data_direction dir, | |
00085f1e | 281 | unsigned long attrs) |
e1c7e324 | 282 | { |
5299709d | 283 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
284 | |
285 | BUG_ON(!valid_dma_direction(dir)); | |
286 | debug_dma_unmap_sg(dev, sg, nents, dir); | |
287 | if (ops->unmap_sg) | |
288 | ops->unmap_sg(dev, sg, nents, dir, attrs); | |
289 | } | |
290 | ||
0495c3d3 AD |
291 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
292 | struct page *page, | |
293 | size_t offset, size_t size, | |
294 | enum dma_data_direction dir, | |
295 | unsigned long attrs) | |
e1c7e324 | 296 | { |
5299709d | 297 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
298 | dma_addr_t addr; |
299 | ||
300 | kmemcheck_mark_initialized(page_address(page) + offset, size); | |
301 | BUG_ON(!valid_dma_direction(dir)); | |
0495c3d3 | 302 | addr = ops->map_page(dev, page, offset, size, dir, attrs); |
e1c7e324 CH |
303 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
304 | ||
305 | return addr; | |
306 | } | |
307 | ||
0495c3d3 AD |
308 | static inline void dma_unmap_page_attrs(struct device *dev, |
309 | dma_addr_t addr, size_t size, | |
310 | enum dma_data_direction dir, | |
311 | unsigned long attrs) | |
e1c7e324 | 312 | { |
5299709d | 313 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
314 | |
315 | BUG_ON(!valid_dma_direction(dir)); | |
316 | if (ops->unmap_page) | |
0495c3d3 | 317 | ops->unmap_page(dev, addr, size, dir, attrs); |
e1c7e324 CH |
318 | debug_dma_unmap_page(dev, addr, size, dir, false); |
319 | } | |
320 | ||
6f3d8796 NS |
321 | static inline dma_addr_t dma_map_resource(struct device *dev, |
322 | phys_addr_t phys_addr, | |
323 | size_t size, | |
324 | enum dma_data_direction dir, | |
325 | unsigned long attrs) | |
326 | { | |
5299709d | 327 | const struct dma_map_ops *ops = get_dma_ops(dev); |
6f3d8796 NS |
328 | dma_addr_t addr; |
329 | ||
330 | BUG_ON(!valid_dma_direction(dir)); | |
331 | ||
332 | /* Don't allow RAM to be mapped */ | |
3757dc48 | 333 | BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); |
6f3d8796 NS |
334 | |
335 | addr = phys_addr; | |
336 | if (ops->map_resource) | |
337 | addr = ops->map_resource(dev, phys_addr, size, dir, attrs); | |
338 | ||
339 | debug_dma_map_resource(dev, phys_addr, size, dir, addr); | |
340 | ||
341 | return addr; | |
342 | } | |
343 | ||
344 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, | |
345 | size_t size, enum dma_data_direction dir, | |
346 | unsigned long attrs) | |
347 | { | |
5299709d | 348 | const struct dma_map_ops *ops = get_dma_ops(dev); |
6f3d8796 NS |
349 | |
350 | BUG_ON(!valid_dma_direction(dir)); | |
351 | if (ops->unmap_resource) | |
352 | ops->unmap_resource(dev, addr, size, dir, attrs); | |
353 | debug_dma_unmap_resource(dev, addr, size, dir); | |
354 | } | |
355 | ||
e1c7e324 CH |
356 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
357 | size_t size, | |
358 | enum dma_data_direction dir) | |
359 | { | |
5299709d | 360 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
361 | |
362 | BUG_ON(!valid_dma_direction(dir)); | |
363 | if (ops->sync_single_for_cpu) | |
364 | ops->sync_single_for_cpu(dev, addr, size, dir); | |
365 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | |
366 | } | |
367 | ||
368 | static inline void dma_sync_single_for_device(struct device *dev, | |
369 | dma_addr_t addr, size_t size, | |
370 | enum dma_data_direction dir) | |
371 | { | |
5299709d | 372 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
373 | |
374 | BUG_ON(!valid_dma_direction(dir)); | |
375 | if (ops->sync_single_for_device) | |
376 | ops->sync_single_for_device(dev, addr, size, dir); | |
377 | debug_dma_sync_single_for_device(dev, addr, size, dir); | |
378 | } | |
379 | ||
380 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
381 | dma_addr_t addr, | |
382 | unsigned long offset, | |
383 | size_t size, | |
384 | enum dma_data_direction dir) | |
385 | { | |
386 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
387 | ||
388 | BUG_ON(!valid_dma_direction(dir)); | |
389 | if (ops->sync_single_for_cpu) | |
390 | ops->sync_single_for_cpu(dev, addr + offset, size, dir); | |
391 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | |
392 | } | |
393 | ||
394 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
395 | dma_addr_t addr, | |
396 | unsigned long offset, | |
397 | size_t size, | |
398 | enum dma_data_direction dir) | |
399 | { | |
400 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
401 | ||
402 | BUG_ON(!valid_dma_direction(dir)); | |
403 | if (ops->sync_single_for_device) | |
404 | ops->sync_single_for_device(dev, addr + offset, size, dir); | |
405 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | |
406 | } | |
407 | ||
408 | static inline void | |
409 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
410 | int nelems, enum dma_data_direction dir) | |
411 | { | |
5299709d | 412 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
413 | |
414 | BUG_ON(!valid_dma_direction(dir)); | |
415 | if (ops->sync_sg_for_cpu) | |
416 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | |
417 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | |
418 | } | |
419 | ||
420 | static inline void | |
421 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
422 | int nelems, enum dma_data_direction dir) | |
423 | { | |
5299709d | 424 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
425 | |
426 | BUG_ON(!valid_dma_direction(dir)); | |
427 | if (ops->sync_sg_for_device) | |
428 | ops->sync_sg_for_device(dev, sg, nelems, dir); | |
429 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | |
430 | ||
431 | } | |
432 | ||
00085f1e KK |
433 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
434 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) | |
435 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) | |
436 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) | |
0495c3d3 AD |
437 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
438 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) | |
e1c7e324 CH |
439 | |
440 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |
441 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | |
442 | ||
443 | void *dma_common_contiguous_remap(struct page *page, size_t size, | |
444 | unsigned long vm_flags, | |
445 | pgprot_t prot, const void *caller); | |
446 | ||
447 | void *dma_common_pages_remap(struct page **pages, size_t size, | |
448 | unsigned long vm_flags, pgprot_t prot, | |
449 | const void *caller); | |
450 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | |
451 | ||
452 | /** | |
453 | * dma_mmap_attrs - map a coherent DMA allocation into user space | |
454 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
455 | * @vma: vm_area_struct describing requested user mapping | |
456 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | |
457 | * @handle: device-view address returned from dma_alloc_attrs | |
458 | * @size: size of memory originally requested in dma_alloc_attrs | |
459 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | |
460 | * | |
461 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs | |
462 | * into user space. The coherent DMA buffer must not be freed by the | |
463 | * driver until the user space mapping has been released. | |
464 | */ | |
465 | static inline int | |
466 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | |
00085f1e | 467 | dma_addr_t dma_addr, size_t size, unsigned long attrs) |
e1c7e324 | 468 | { |
5299709d | 469 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
470 | BUG_ON(!ops); |
471 | if (ops->mmap) | |
472 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | |
473 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | |
474 | } | |
475 | ||
00085f1e | 476 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) |
e1c7e324 CH |
477 | |
478 | int | |
479 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | |
480 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | |
481 | ||
482 | static inline int | |
483 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | |
00085f1e KK |
484 | dma_addr_t dma_addr, size_t size, |
485 | unsigned long attrs) | |
e1c7e324 | 486 | { |
5299709d | 487 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
488 | BUG_ON(!ops); |
489 | if (ops->get_sgtable) | |
490 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | |
491 | attrs); | |
492 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); | |
493 | } | |
494 | ||
00085f1e | 495 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
e1c7e324 CH |
496 | |
497 | #ifndef arch_dma_alloc_attrs | |
498 | #define arch_dma_alloc_attrs(dev, flag) (true) | |
499 | #endif | |
500 | ||
501 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
502 | dma_addr_t *dma_handle, gfp_t flag, | |
00085f1e | 503 | unsigned long attrs) |
e1c7e324 | 504 | { |
5299709d | 505 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
506 | void *cpu_addr; |
507 | ||
508 | BUG_ON(!ops); | |
509 | ||
43fc509c | 510 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) |
e1c7e324 CH |
511 | return cpu_addr; |
512 | ||
513 | if (!arch_dma_alloc_attrs(&dev, &flag)) | |
514 | return NULL; | |
515 | if (!ops->alloc) | |
516 | return NULL; | |
517 | ||
518 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | |
519 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | |
520 | return cpu_addr; | |
521 | } | |
522 | ||
523 | static inline void dma_free_attrs(struct device *dev, size_t size, | |
524 | void *cpu_addr, dma_addr_t dma_handle, | |
00085f1e | 525 | unsigned long attrs) |
e1c7e324 | 526 | { |
5299709d | 527 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
528 | |
529 | BUG_ON(!ops); | |
530 | WARN_ON(irqs_disabled()); | |
531 | ||
43fc509c | 532 | if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) |
e1c7e324 CH |
533 | return; |
534 | ||
d6b7eaeb | 535 | if (!ops->free || !cpu_addr) |
e1c7e324 CH |
536 | return; |
537 | ||
538 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
539 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | |
540 | } | |
541 | ||
542 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
543 | dma_addr_t *dma_handle, gfp_t flag) | |
544 | { | |
00085f1e | 545 | return dma_alloc_attrs(dev, size, dma_handle, flag, 0); |
e1c7e324 CH |
546 | } |
547 | ||
548 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
549 | void *cpu_addr, dma_addr_t dma_handle) | |
550 | { | |
00085f1e | 551 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
e1c7e324 CH |
552 | } |
553 | ||
e1c7e324 CH |
554 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
555 | { | |
5237e95f | 556 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 | 557 | |
5237e95f RM |
558 | debug_dma_mapping_error(dev, dma_addr); |
559 | if (ops->mapping_error) | |
560 | return ops->mapping_error(dev, dma_addr); | |
e1c7e324 | 561 | return 0; |
e1c7e324 CH |
562 | } |
563 | ||
648babb7 TL |
564 | static inline void dma_check_mask(struct device *dev, u64 mask) |
565 | { | |
566 | if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) | |
567 | dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); | |
568 | } | |
569 | ||
e1c7e324 CH |
570 | static inline int dma_supported(struct device *dev, u64 mask) |
571 | { | |
5299709d | 572 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
573 | |
574 | if (!ops) | |
575 | return 0; | |
576 | if (!ops->dma_supported) | |
577 | return 1; | |
578 | return ops->dma_supported(dev, mask); | |
579 | } | |
e1c7e324 CH |
580 | |
581 | #ifndef HAVE_ARCH_DMA_SET_MASK | |
582 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
583 | { | |
e1c7e324 CH |
584 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
585 | return -EIO; | |
648babb7 TL |
586 | |
587 | dma_check_mask(dev, mask); | |
588 | ||
e1c7e324 CH |
589 | *dev->dma_mask = mask; |
590 | return 0; | |
591 | } | |
1b0fac45 | 592 | #endif |
1da177e4 | 593 | |
589fc9a6 FT |
594 | static inline u64 dma_get_mask(struct device *dev) |
595 | { | |
07a2c01a | 596 | if (dev && dev->dma_mask && *dev->dma_mask) |
589fc9a6 | 597 | return *dev->dma_mask; |
284901a9 | 598 | return DMA_BIT_MASK(32); |
589fc9a6 FT |
599 | } |
600 | ||
58af4a24 | 601 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
710224fa FT |
602 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
603 | #else | |
6a1961f4 FT |
604 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
605 | { | |
606 | if (!dma_supported(dev, mask)) | |
607 | return -EIO; | |
648babb7 TL |
608 | |
609 | dma_check_mask(dev, mask); | |
610 | ||
6a1961f4 FT |
611 | dev->coherent_dma_mask = mask; |
612 | return 0; | |
613 | } | |
710224fa | 614 | #endif |
6a1961f4 | 615 | |
4aa806b7 RK |
616 | /* |
617 | * Set both the DMA mask and the coherent DMA mask to the same thing. | |
618 | * Note that we don't check the return value from dma_set_coherent_mask() | |
619 | * as the DMA API guarantees that the coherent DMA mask can be set to | |
620 | * the same or smaller than the streaming DMA mask. | |
621 | */ | |
622 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) | |
623 | { | |
624 | int rc = dma_set_mask(dev, mask); | |
625 | if (rc == 0) | |
626 | dma_set_coherent_mask(dev, mask); | |
627 | return rc; | |
628 | } | |
629 | ||
fa6a8d6d RK |
630 | /* |
631 | * Similar to the above, except it deals with the case where the device | |
632 | * does not have dev->dma_mask appropriately setup. | |
633 | */ | |
634 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |
635 | { | |
636 | dev->dma_mask = &dev->coherent_dma_mask; | |
637 | return dma_set_mask_and_coherent(dev, mask); | |
638 | } | |
639 | ||
1da177e4 LT |
640 | extern u64 dma_get_required_mask(struct device *dev); |
641 | ||
a3a60f81 | 642 | #ifndef arch_setup_dma_ops |
97890ba9 | 643 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
53c92d79 | 644 | u64 size, const struct iommu_ops *iommu, |
97890ba9 WD |
645 | bool coherent) { } |
646 | #endif | |
647 | ||
648 | #ifndef arch_teardown_dma_ops | |
649 | static inline void arch_teardown_dma_ops(struct device *dev) { } | |
591c1ee4 SS |
650 | #endif |
651 | ||
6b7b6510 FT |
652 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
653 | { | |
002edb6f RM |
654 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
655 | return dev->dma_parms->max_segment_size; | |
656 | return SZ_64K; | |
6b7b6510 FT |
657 | } |
658 | ||
659 | static inline unsigned int dma_set_max_seg_size(struct device *dev, | |
660 | unsigned int size) | |
661 | { | |
662 | if (dev->dma_parms) { | |
663 | dev->dma_parms->max_segment_size = size; | |
664 | return 0; | |
002edb6f RM |
665 | } |
666 | return -EIO; | |
6b7b6510 FT |
667 | } |
668 | ||
d22a6966 FT |
669 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
670 | { | |
002edb6f RM |
671 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
672 | return dev->dma_parms->segment_boundary_mask; | |
673 | return DMA_BIT_MASK(32); | |
d22a6966 FT |
674 | } |
675 | ||
676 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |
677 | { | |
678 | if (dev->dma_parms) { | |
679 | dev->dma_parms->segment_boundary_mask = mask; | |
680 | return 0; | |
002edb6f RM |
681 | } |
682 | return -EIO; | |
d22a6966 FT |
683 | } |
684 | ||
00c8f162 SS |
685 | #ifndef dma_max_pfn |
686 | static inline unsigned long dma_max_pfn(struct device *dev) | |
687 | { | |
688 | return *dev->dma_mask >> PAGE_SHIFT; | |
689 | } | |
690 | #endif | |
691 | ||
842fa69f AM |
692 | static inline void *dma_zalloc_coherent(struct device *dev, size_t size, |
693 | dma_addr_t *dma_handle, gfp_t flag) | |
694 | { | |
ede23fa8 JP |
695 | void *ret = dma_alloc_coherent(dev, size, dma_handle, |
696 | flag | __GFP_ZERO); | |
842fa69f AM |
697 | return ret; |
698 | } | |
699 | ||
e259f191 | 700 | #ifdef CONFIG_HAS_DMA |
4565f017 FT |
701 | static inline int dma_get_cache_alignment(void) |
702 | { | |
703 | #ifdef ARCH_DMA_MINALIGN | |
704 | return ARCH_DMA_MINALIGN; | |
705 | #endif | |
706 | return 1; | |
707 | } | |
e259f191 | 708 | #endif |
4565f017 | 709 | |
1da177e4 | 710 | /* flags for the coherent memory api */ |
2436bdcd | 711 | #define DMA_MEMORY_EXCLUSIVE 0x01 |
1da177e4 | 712 | |
20d666e4 CH |
713 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
714 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
715 | dma_addr_t device_addr, size_t size, int flags); | |
716 | void dma_release_declared_memory(struct device *dev); | |
717 | void *dma_mark_declared_memory_occupied(struct device *dev, | |
718 | dma_addr_t device_addr, size_t size); | |
719 | #else | |
1da177e4 | 720 | static inline int |
88a984ba | 721 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
1da177e4 LT |
722 | dma_addr_t device_addr, size_t size, int flags) |
723 | { | |
2436bdcd | 724 | return -ENOSYS; |
1da177e4 LT |
725 | } |
726 | ||
727 | static inline void | |
728 | dma_release_declared_memory(struct device *dev) | |
729 | { | |
730 | } | |
731 | ||
732 | static inline void * | |
733 | dma_mark_declared_memory_occupied(struct device *dev, | |
734 | dma_addr_t device_addr, size_t size) | |
735 | { | |
736 | return ERR_PTR(-EBUSY); | |
737 | } | |
20d666e4 | 738 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
1da177e4 | 739 | |
09515ef5 S |
740 | #ifdef CONFIG_HAS_DMA |
741 | int dma_configure(struct device *dev); | |
742 | void dma_deconfigure(struct device *dev); | |
743 | #else | |
744 | static inline int dma_configure(struct device *dev) | |
745 | { | |
746 | return 0; | |
747 | } | |
748 | ||
749 | static inline void dma_deconfigure(struct device *dev) {} | |
750 | #endif | |
751 | ||
9ac7849e TH |
752 | /* |
753 | * Managed DMA API | |
754 | */ | |
755 | extern void *dmam_alloc_coherent(struct device *dev, size_t size, | |
756 | dma_addr_t *dma_handle, gfp_t gfp); | |
757 | extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
758 | dma_addr_t dma_handle); | |
63d36c95 CH |
759 | extern void *dmam_alloc_attrs(struct device *dev, size_t size, |
760 | dma_addr_t *dma_handle, gfp_t gfp, | |
761 | unsigned long attrs); | |
20d666e4 | 762 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
88a984ba BH |
763 | extern int dmam_declare_coherent_memory(struct device *dev, |
764 | phys_addr_t phys_addr, | |
9ac7849e TH |
765 | dma_addr_t device_addr, size_t size, |
766 | int flags); | |
767 | extern void dmam_release_declared_memory(struct device *dev); | |
20d666e4 | 768 | #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
9ac7849e | 769 | static inline int dmam_declare_coherent_memory(struct device *dev, |
88a984ba | 770 | phys_addr_t phys_addr, dma_addr_t device_addr, |
9ac7849e TH |
771 | size_t size, gfp_t gfp) |
772 | { | |
773 | return 0; | |
774 | } | |
1da177e4 | 775 | |
9ac7849e TH |
776 | static inline void dmam_release_declared_memory(struct device *dev) |
777 | { | |
778 | } | |
20d666e4 | 779 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
1da177e4 | 780 | |
f6e45661 LR |
781 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
782 | dma_addr_t *dma_addr, gfp_t gfp) | |
b4bbb107 | 783 | { |
00085f1e KK |
784 | return dma_alloc_attrs(dev, size, dma_addr, gfp, |
785 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 786 | } |
f6e45661 LR |
787 | #ifndef dma_alloc_writecombine |
788 | #define dma_alloc_writecombine dma_alloc_wc | |
789 | #endif | |
b4bbb107 | 790 | |
f6e45661 LR |
791 | static inline void dma_free_wc(struct device *dev, size_t size, |
792 | void *cpu_addr, dma_addr_t dma_addr) | |
b4bbb107 | 793 | { |
00085f1e KK |
794 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, |
795 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 796 | } |
f6e45661 LR |
797 | #ifndef dma_free_writecombine |
798 | #define dma_free_writecombine dma_free_wc | |
799 | #endif | |
b4bbb107 | 800 | |
f6e45661 LR |
801 | static inline int dma_mmap_wc(struct device *dev, |
802 | struct vm_area_struct *vma, | |
803 | void *cpu_addr, dma_addr_t dma_addr, | |
804 | size_t size) | |
b4bbb107 | 805 | { |
00085f1e KK |
806 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
807 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 808 | } |
f6e45661 LR |
809 | #ifndef dma_mmap_writecombine |
810 | #define dma_mmap_writecombine dma_mmap_wc | |
811 | #endif | |
74bc7cee | 812 | |
2481366a | 813 | #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG) |
0acedc12 FT |
814 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
815 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | |
816 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | |
817 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) | |
818 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) | |
819 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) | |
820 | #else | |
821 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) | |
822 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) | |
823 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) | |
824 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | |
825 | #define dma_unmap_len(PTR, LEN_NAME) (0) | |
826 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | |
827 | #endif | |
828 | ||
9ac7849e | 829 | #endif |