]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
96532bab RD |
2 | #ifndef _LINUX_DMA_MAPPING_H |
3 | #define _LINUX_DMA_MAPPING_H | |
1da177e4 | 4 | |
002edb6f | 5 | #include <linux/sizes.h> |
842fa69f | 6 | #include <linux/string.h> |
1da177e4 LT |
7 | #include <linux/device.h> |
8 | #include <linux/err.h> | |
e1c7e324 | 9 | #include <linux/dma-debug.h> |
b7f080cf | 10 | #include <linux/dma-direction.h> |
f0402a26 | 11 | #include <linux/scatterlist.h> |
e1c7e324 | 12 | #include <linux/bug.h> |
648babb7 | 13 | #include <linux/mem_encrypt.h> |
1da177e4 | 14 | |
00085f1e KK |
15 | /** |
16 | * List of possible attributes associated with a DMA mapping. The semantics | |
17 | * of each attribute should be defined in Documentation/DMA-attributes.txt. | |
18 | * | |
19 | * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute | |
20 | * forces all pending DMA writes to complete. | |
21 | */ | |
22 | #define DMA_ATTR_WRITE_BARRIER (1UL << 0) | |
23 | /* | |
24 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping | |
25 | * may be weakly ordered, that is that reads and writes may pass each other. | |
26 | */ | |
27 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) | |
28 | /* | |
29 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be | |
30 | * buffered to improve performance. | |
31 | */ | |
32 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) | |
33 | /* | |
34 | * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either | |
35 | * consistent or non-consistent memory as it sees fit. | |
36 | */ | |
37 | #define DMA_ATTR_NON_CONSISTENT (1UL << 3) | |
38 | /* | |
39 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel | |
40 | * virtual mapping for the allocated buffer. | |
41 | */ | |
42 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) | |
43 | /* | |
44 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of | |
45 | * the CPU cache for the given buffer assuming that it has been already | |
46 | * transferred to 'device' domain. | |
47 | */ | |
48 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) | |
49 | /* | |
50 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer | |
51 | * in physical memory. | |
52 | */ | |
53 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) | |
54 | /* | |
55 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem | |
56 | * that it's probably not worth the time to try to allocate memory to in a way | |
57 | * that gives better TLB efficiency. | |
58 | */ | |
59 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) | |
a9a62c93 MFO |
60 | /* |
61 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress | |
62 | * allocation failure reports (similarly to __GFP_NOWARN). | |
63 | */ | |
64 | #define DMA_ATTR_NO_WARN (1UL << 8) | |
00085f1e | 65 | |
b2fb3664 MH |
66 | /* |
67 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully | |
68 | * accessible at an elevated privilege level (and ideally inaccessible or | |
69 | * at least read-only at lesser-privileged levels). | |
70 | */ | |
71 | #define DMA_ATTR_PRIVILEGED (1UL << 9) | |
72 | ||
77f2ea2f BH |
73 | /* |
74 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | |
75 | * It can be given to a device to use as a DMA source or target. A CPU cannot | |
76 | * reference a dma_addr_t directly because there may be translation between | |
77 | * its physical address space and the bus address space. | |
78 | */ | |
f0402a26 | 79 | struct dma_map_ops { |
613c4578 MS |
80 | void* (*alloc)(struct device *dev, size_t size, |
81 | dma_addr_t *dma_handle, gfp_t gfp, | |
00085f1e | 82 | unsigned long attrs); |
613c4578 MS |
83 | void (*free)(struct device *dev, size_t size, |
84 | void *vaddr, dma_addr_t dma_handle, | |
00085f1e | 85 | unsigned long attrs); |
9adc5374 | 86 | int (*mmap)(struct device *, struct vm_area_struct *, |
00085f1e KK |
87 | void *, dma_addr_t, size_t, |
88 | unsigned long attrs); | |
9adc5374 | 89 | |
d2b7428e | 90 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
00085f1e | 91 | dma_addr_t, size_t, unsigned long attrs); |
d2b7428e | 92 | |
f0402a26 FT |
93 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
94 | unsigned long offset, size_t size, | |
95 | enum dma_data_direction dir, | |
00085f1e | 96 | unsigned long attrs); |
f0402a26 FT |
97 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
98 | size_t size, enum dma_data_direction dir, | |
00085f1e | 99 | unsigned long attrs); |
04abab69 RRD |
100 | /* |
101 | * map_sg returns 0 on error and a value > 0 on success. | |
102 | * It should never return a value < 0. | |
103 | */ | |
f0402a26 FT |
104 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
105 | int nents, enum dma_data_direction dir, | |
00085f1e | 106 | unsigned long attrs); |
f0402a26 FT |
107 | void (*unmap_sg)(struct device *dev, |
108 | struct scatterlist *sg, int nents, | |
109 | enum dma_data_direction dir, | |
00085f1e | 110 | unsigned long attrs); |
ba409b31 NS |
111 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, |
112 | size_t size, enum dma_data_direction dir, | |
113 | unsigned long attrs); | |
114 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, | |
115 | size_t size, enum dma_data_direction dir, | |
116 | unsigned long attrs); | |
f0402a26 FT |
117 | void (*sync_single_for_cpu)(struct device *dev, |
118 | dma_addr_t dma_handle, size_t size, | |
119 | enum dma_data_direction dir); | |
120 | void (*sync_single_for_device)(struct device *dev, | |
121 | dma_addr_t dma_handle, size_t size, | |
122 | enum dma_data_direction dir); | |
f0402a26 FT |
123 | void (*sync_sg_for_cpu)(struct device *dev, |
124 | struct scatterlist *sg, int nents, | |
125 | enum dma_data_direction dir); | |
126 | void (*sync_sg_for_device)(struct device *dev, | |
127 | struct scatterlist *sg, int nents, | |
128 | enum dma_data_direction dir); | |
c9eb6172 CH |
129 | void (*cache_sync)(struct device *dev, void *vaddr, size_t size, |
130 | enum dma_data_direction direction); | |
f0402a26 FT |
131 | int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); |
132 | int (*dma_supported)(struct device *dev, u64 mask); | |
3a8f7558 | 133 | u64 (*get_required_mask)(struct device *dev); |
f0402a26 FT |
134 | }; |
135 | ||
002e6745 | 136 | extern const struct dma_map_ops dma_direct_ops; |
551199ac | 137 | extern const struct dma_map_ops dma_virt_ops; |
a8463d4b | 138 | |
8f286c33 | 139 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
34c65384 | 140 | |
32e8f702 JB |
141 | #define DMA_MASK_NONE 0x0ULL |
142 | ||
d6bd3a39 REB |
143 | static inline int valid_dma_direction(int dma_direction) |
144 | { | |
145 | return ((dma_direction == DMA_BIDIRECTIONAL) || | |
146 | (dma_direction == DMA_TO_DEVICE) || | |
147 | (dma_direction == DMA_FROM_DEVICE)); | |
148 | } | |
149 | ||
32e8f702 JB |
150 | static inline int is_device_dma_capable(struct device *dev) |
151 | { | |
152 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; | |
153 | } | |
154 | ||
20d666e4 CH |
155 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
156 | /* | |
157 | * These three functions are only for dma allocator. | |
158 | * Don't use them in device drivers. | |
159 | */ | |
43fc509c | 160 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
20d666e4 | 161 | dma_addr_t *dma_handle, void **ret); |
43fc509c | 162 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); |
20d666e4 | 163 | |
43fc509c | 164 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
20d666e4 | 165 | void *cpu_addr, size_t size, int *ret); |
43fc509c VM |
166 | |
167 | void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); | |
168 | int dma_release_from_global_coherent(int order, void *vaddr); | |
169 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, | |
170 | size_t size, int *ret); | |
171 | ||
20d666e4 | 172 | #else |
43fc509c VM |
173 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
174 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) | |
175 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) | |
176 | ||
177 | static inline void *dma_alloc_from_global_coherent(ssize_t size, | |
178 | dma_addr_t *dma_handle) | |
179 | { | |
180 | return NULL; | |
181 | } | |
182 | ||
183 | static inline int dma_release_from_global_coherent(int order, void *vaddr) | |
184 | { | |
185 | return 0; | |
186 | } | |
187 | ||
188 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, | |
189 | void *cpu_addr, size_t size, | |
190 | int *ret) | |
191 | { | |
192 | return 0; | |
193 | } | |
20d666e4 CH |
194 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
195 | ||
1b0fac45 | 196 | #ifdef CONFIG_HAS_DMA |
1da177e4 | 197 | #include <asm/dma-mapping.h> |
815dd187 BVA |
198 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
199 | { | |
200 | if (dev && dev->dma_ops) | |
201 | return dev->dma_ops; | |
202 | return get_arch_dma_ops(dev ? dev->bus : NULL); | |
203 | } | |
204 | ||
ca6e8e10 BVA |
205 | static inline void set_dma_ops(struct device *dev, |
206 | const struct dma_map_ops *dma_ops) | |
207 | { | |
208 | dev->dma_ops = dma_ops; | |
209 | } | |
1b0fac45 | 210 | #else |
e1c7e324 | 211 | /* |
f29ab49b GU |
212 | * Define the dma api to allow compilation of dma dependent code. |
213 | * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA' | |
214 | * in its Kconfig, unless it already depends on <something> || COMPILE_TEST, | |
215 | * where <something> guarantuees the availability of the dma-mapping API. | |
e1c7e324 | 216 | */ |
5299709d | 217 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
e1c7e324 | 218 | { |
f29ab49b | 219 | return NULL; |
e1c7e324 CH |
220 | } |
221 | #endif | |
222 | ||
223 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | |
224 | size_t size, | |
225 | enum dma_data_direction dir, | |
00085f1e | 226 | unsigned long attrs) |
e1c7e324 | 227 | { |
5299709d | 228 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
229 | dma_addr_t addr; |
230 | ||
e1c7e324 | 231 | BUG_ON(!valid_dma_direction(dir)); |
99c65fa7 | 232 | debug_dma_map_single(dev, ptr, size); |
e1c7e324 | 233 | addr = ops->map_page(dev, virt_to_page(ptr), |
8e99469a | 234 | offset_in_page(ptr), size, |
e1c7e324 CH |
235 | dir, attrs); |
236 | debug_dma_map_page(dev, virt_to_page(ptr), | |
8e99469a | 237 | offset_in_page(ptr), size, |
e1c7e324 CH |
238 | dir, addr, true); |
239 | return addr; | |
240 | } | |
241 | ||
242 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | |
243 | size_t size, | |
244 | enum dma_data_direction dir, | |
00085f1e | 245 | unsigned long attrs) |
e1c7e324 | 246 | { |
5299709d | 247 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
248 | |
249 | BUG_ON(!valid_dma_direction(dir)); | |
250 | if (ops->unmap_page) | |
251 | ops->unmap_page(dev, addr, size, dir, attrs); | |
252 | debug_dma_unmap_page(dev, addr, size, dir, true); | |
253 | } | |
254 | ||
255 | /* | |
256 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. | |
257 | * It should never return a value < 0. | |
258 | */ | |
259 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
260 | int nents, enum dma_data_direction dir, | |
00085f1e | 261 | unsigned long attrs) |
e1c7e324 | 262 | { |
5299709d | 263 | const struct dma_map_ops *ops = get_dma_ops(dev); |
49502766 | 264 | int ents; |
e1c7e324 | 265 | |
e1c7e324 CH |
266 | BUG_ON(!valid_dma_direction(dir)); |
267 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | |
268 | BUG_ON(ents < 0); | |
269 | debug_dma_map_sg(dev, sg, nents, ents, dir); | |
270 | ||
271 | return ents; | |
272 | } | |
273 | ||
274 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | |
275 | int nents, enum dma_data_direction dir, | |
00085f1e | 276 | unsigned long attrs) |
e1c7e324 | 277 | { |
5299709d | 278 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
279 | |
280 | BUG_ON(!valid_dma_direction(dir)); | |
281 | debug_dma_unmap_sg(dev, sg, nents, dir); | |
282 | if (ops->unmap_sg) | |
283 | ops->unmap_sg(dev, sg, nents, dir, attrs); | |
284 | } | |
285 | ||
0495c3d3 AD |
286 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
287 | struct page *page, | |
288 | size_t offset, size_t size, | |
289 | enum dma_data_direction dir, | |
290 | unsigned long attrs) | |
e1c7e324 | 291 | { |
5299709d | 292 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
293 | dma_addr_t addr; |
294 | ||
e1c7e324 | 295 | BUG_ON(!valid_dma_direction(dir)); |
0495c3d3 | 296 | addr = ops->map_page(dev, page, offset, size, dir, attrs); |
e1c7e324 CH |
297 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
298 | ||
299 | return addr; | |
300 | } | |
301 | ||
0495c3d3 AD |
302 | static inline void dma_unmap_page_attrs(struct device *dev, |
303 | dma_addr_t addr, size_t size, | |
304 | enum dma_data_direction dir, | |
305 | unsigned long attrs) | |
e1c7e324 | 306 | { |
5299709d | 307 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
308 | |
309 | BUG_ON(!valid_dma_direction(dir)); | |
310 | if (ops->unmap_page) | |
0495c3d3 | 311 | ops->unmap_page(dev, addr, size, dir, attrs); |
e1c7e324 CH |
312 | debug_dma_unmap_page(dev, addr, size, dir, false); |
313 | } | |
314 | ||
6f3d8796 NS |
315 | static inline dma_addr_t dma_map_resource(struct device *dev, |
316 | phys_addr_t phys_addr, | |
317 | size_t size, | |
318 | enum dma_data_direction dir, | |
319 | unsigned long attrs) | |
320 | { | |
5299709d | 321 | const struct dma_map_ops *ops = get_dma_ops(dev); |
6f3d8796 NS |
322 | dma_addr_t addr; |
323 | ||
324 | BUG_ON(!valid_dma_direction(dir)); | |
325 | ||
326 | /* Don't allow RAM to be mapped */ | |
3757dc48 | 327 | BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); |
6f3d8796 NS |
328 | |
329 | addr = phys_addr; | |
330 | if (ops->map_resource) | |
331 | addr = ops->map_resource(dev, phys_addr, size, dir, attrs); | |
332 | ||
333 | debug_dma_map_resource(dev, phys_addr, size, dir, addr); | |
334 | ||
335 | return addr; | |
336 | } | |
337 | ||
338 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, | |
339 | size_t size, enum dma_data_direction dir, | |
340 | unsigned long attrs) | |
341 | { | |
5299709d | 342 | const struct dma_map_ops *ops = get_dma_ops(dev); |
6f3d8796 NS |
343 | |
344 | BUG_ON(!valid_dma_direction(dir)); | |
345 | if (ops->unmap_resource) | |
346 | ops->unmap_resource(dev, addr, size, dir, attrs); | |
347 | debug_dma_unmap_resource(dev, addr, size, dir); | |
348 | } | |
349 | ||
e1c7e324 CH |
350 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
351 | size_t size, | |
352 | enum dma_data_direction dir) | |
353 | { | |
5299709d | 354 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
355 | |
356 | BUG_ON(!valid_dma_direction(dir)); | |
357 | if (ops->sync_single_for_cpu) | |
358 | ops->sync_single_for_cpu(dev, addr, size, dir); | |
359 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | |
360 | } | |
361 | ||
362 | static inline void dma_sync_single_for_device(struct device *dev, | |
363 | dma_addr_t addr, size_t size, | |
364 | enum dma_data_direction dir) | |
365 | { | |
5299709d | 366 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
367 | |
368 | BUG_ON(!valid_dma_direction(dir)); | |
369 | if (ops->sync_single_for_device) | |
370 | ops->sync_single_for_device(dev, addr, size, dir); | |
371 | debug_dma_sync_single_for_device(dev, addr, size, dir); | |
372 | } | |
373 | ||
374 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
375 | dma_addr_t addr, | |
376 | unsigned long offset, | |
377 | size_t size, | |
378 | enum dma_data_direction dir) | |
379 | { | |
380 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
381 | ||
382 | BUG_ON(!valid_dma_direction(dir)); | |
383 | if (ops->sync_single_for_cpu) | |
384 | ops->sync_single_for_cpu(dev, addr + offset, size, dir); | |
385 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | |
386 | } | |
387 | ||
388 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
389 | dma_addr_t addr, | |
390 | unsigned long offset, | |
391 | size_t size, | |
392 | enum dma_data_direction dir) | |
393 | { | |
394 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
395 | ||
396 | BUG_ON(!valid_dma_direction(dir)); | |
397 | if (ops->sync_single_for_device) | |
398 | ops->sync_single_for_device(dev, addr + offset, size, dir); | |
399 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | |
400 | } | |
401 | ||
402 | static inline void | |
403 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
404 | int nelems, enum dma_data_direction dir) | |
405 | { | |
5299709d | 406 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
407 | |
408 | BUG_ON(!valid_dma_direction(dir)); | |
409 | if (ops->sync_sg_for_cpu) | |
410 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | |
411 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | |
412 | } | |
413 | ||
414 | static inline void | |
415 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
416 | int nelems, enum dma_data_direction dir) | |
417 | { | |
5299709d | 418 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
419 | |
420 | BUG_ON(!valid_dma_direction(dir)); | |
421 | if (ops->sync_sg_for_device) | |
422 | ops->sync_sg_for_device(dev, sg, nelems, dir); | |
423 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | |
424 | ||
425 | } | |
426 | ||
00085f1e KK |
427 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
428 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) | |
429 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) | |
430 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) | |
0495c3d3 AD |
431 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
432 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) | |
e1c7e324 | 433 | |
c9eb6172 CH |
434 | static inline void |
435 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
436 | enum dma_data_direction dir) | |
437 | { | |
438 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
439 | ||
440 | BUG_ON(!valid_dma_direction(dir)); | |
441 | if (ops->cache_sync) | |
442 | ops->cache_sync(dev, vaddr, size, dir); | |
443 | } | |
444 | ||
e1c7e324 | 445 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
58b04406 CH |
446 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
447 | unsigned long attrs); | |
e1c7e324 CH |
448 | |
449 | void *dma_common_contiguous_remap(struct page *page, size_t size, | |
450 | unsigned long vm_flags, | |
451 | pgprot_t prot, const void *caller); | |
452 | ||
453 | void *dma_common_pages_remap(struct page **pages, size_t size, | |
454 | unsigned long vm_flags, pgprot_t prot, | |
455 | const void *caller); | |
456 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | |
457 | ||
458 | /** | |
459 | * dma_mmap_attrs - map a coherent DMA allocation into user space | |
460 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
461 | * @vma: vm_area_struct describing requested user mapping | |
462 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | |
463 | * @handle: device-view address returned from dma_alloc_attrs | |
464 | * @size: size of memory originally requested in dma_alloc_attrs | |
465 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | |
466 | * | |
467 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs | |
468 | * into user space. The coherent DMA buffer must not be freed by the | |
469 | * driver until the user space mapping has been released. | |
470 | */ | |
471 | static inline int | |
472 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | |
00085f1e | 473 | dma_addr_t dma_addr, size_t size, unsigned long attrs) |
e1c7e324 | 474 | { |
5299709d | 475 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
476 | BUG_ON(!ops); |
477 | if (ops->mmap) | |
478 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | |
58b04406 | 479 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
e1c7e324 CH |
480 | } |
481 | ||
00085f1e | 482 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) |
e1c7e324 CH |
483 | |
484 | int | |
9406a49f CH |
485 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, |
486 | dma_addr_t dma_addr, size_t size, unsigned long attrs); | |
e1c7e324 CH |
487 | |
488 | static inline int | |
489 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | |
00085f1e KK |
490 | dma_addr_t dma_addr, size_t size, |
491 | unsigned long attrs) | |
e1c7e324 | 492 | { |
5299709d | 493 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
494 | BUG_ON(!ops); |
495 | if (ops->get_sgtable) | |
496 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | |
497 | attrs); | |
9406a49f CH |
498 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, |
499 | attrs); | |
e1c7e324 CH |
500 | } |
501 | ||
00085f1e | 502 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
e1c7e324 CH |
503 | |
504 | #ifndef arch_dma_alloc_attrs | |
884571f0 | 505 | #define arch_dma_alloc_attrs(dev) (true) |
e1c7e324 CH |
506 | #endif |
507 | ||
508 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
509 | dma_addr_t *dma_handle, gfp_t flag, | |
00085f1e | 510 | unsigned long attrs) |
e1c7e324 | 511 | { |
5299709d | 512 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
513 | void *cpu_addr; |
514 | ||
515 | BUG_ON(!ops); | |
205e1b7f | 516 | WARN_ON_ONCE(dev && !dev->coherent_dma_mask); |
e1c7e324 | 517 | |
43fc509c | 518 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) |
e1c7e324 CH |
519 | return cpu_addr; |
520 | ||
e89f5b37 CH |
521 | /* let the implementation decide on the zone to allocate from: */ |
522 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | |
57bf5a89 | 523 | |
884571f0 | 524 | if (!arch_dma_alloc_attrs(&dev)) |
e1c7e324 CH |
525 | return NULL; |
526 | if (!ops->alloc) | |
527 | return NULL; | |
528 | ||
529 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | |
530 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | |
531 | return cpu_addr; | |
532 | } | |
533 | ||
534 | static inline void dma_free_attrs(struct device *dev, size_t size, | |
535 | void *cpu_addr, dma_addr_t dma_handle, | |
00085f1e | 536 | unsigned long attrs) |
e1c7e324 | 537 | { |
5299709d | 538 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
539 | |
540 | BUG_ON(!ops); | |
e1c7e324 | 541 | |
43fc509c | 542 | if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) |
e1c7e324 | 543 | return; |
d27fb99f RM |
544 | /* |
545 | * On non-coherent platforms which implement DMA-coherent buffers via | |
546 | * non-cacheable remaps, ops->free() may call vunmap(). Thus getting | |
547 | * this far in IRQ context is a) at risk of a BUG_ON() or trying to | |
548 | * sleep on some machines, and b) an indication that the driver is | |
549 | * probably misusing the coherent API anyway. | |
550 | */ | |
551 | WARN_ON(irqs_disabled()); | |
e1c7e324 | 552 | |
d6b7eaeb | 553 | if (!ops->free || !cpu_addr) |
e1c7e324 CH |
554 | return; |
555 | ||
556 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
557 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | |
558 | } | |
559 | ||
560 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
7ed1d91a | 561 | dma_addr_t *dma_handle, gfp_t gfp) |
e1c7e324 | 562 | { |
7ed1d91a CH |
563 | |
564 | return dma_alloc_attrs(dev, size, dma_handle, gfp, | |
565 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); | |
e1c7e324 CH |
566 | } |
567 | ||
568 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
569 | void *cpu_addr, dma_addr_t dma_handle) | |
570 | { | |
00085f1e | 571 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
e1c7e324 CH |
572 | } |
573 | ||
e1c7e324 CH |
574 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
575 | { | |
5237e95f | 576 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 | 577 | |
5237e95f RM |
578 | debug_dma_mapping_error(dev, dma_addr); |
579 | if (ops->mapping_error) | |
580 | return ops->mapping_error(dev, dma_addr); | |
e1c7e324 | 581 | return 0; |
e1c7e324 CH |
582 | } |
583 | ||
648babb7 TL |
584 | static inline void dma_check_mask(struct device *dev, u64 mask) |
585 | { | |
586 | if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) | |
587 | dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); | |
588 | } | |
589 | ||
e1c7e324 CH |
590 | static inline int dma_supported(struct device *dev, u64 mask) |
591 | { | |
5299709d | 592 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
593 | |
594 | if (!ops) | |
595 | return 0; | |
596 | if (!ops->dma_supported) | |
597 | return 1; | |
598 | return ops->dma_supported(dev, mask); | |
599 | } | |
e1c7e324 CH |
600 | |
601 | #ifndef HAVE_ARCH_DMA_SET_MASK | |
602 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
603 | { | |
e1c7e324 CH |
604 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
605 | return -EIO; | |
648babb7 TL |
606 | |
607 | dma_check_mask(dev, mask); | |
608 | ||
e1c7e324 CH |
609 | *dev->dma_mask = mask; |
610 | return 0; | |
611 | } | |
1b0fac45 | 612 | #endif |
1da177e4 | 613 | |
589fc9a6 FT |
614 | static inline u64 dma_get_mask(struct device *dev) |
615 | { | |
07a2c01a | 616 | if (dev && dev->dma_mask && *dev->dma_mask) |
589fc9a6 | 617 | return *dev->dma_mask; |
284901a9 | 618 | return DMA_BIT_MASK(32); |
589fc9a6 FT |
619 | } |
620 | ||
58af4a24 | 621 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
710224fa FT |
622 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
623 | #else | |
6a1961f4 FT |
624 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
625 | { | |
626 | if (!dma_supported(dev, mask)) | |
627 | return -EIO; | |
648babb7 TL |
628 | |
629 | dma_check_mask(dev, mask); | |
630 | ||
6a1961f4 FT |
631 | dev->coherent_dma_mask = mask; |
632 | return 0; | |
633 | } | |
710224fa | 634 | #endif |
6a1961f4 | 635 | |
4aa806b7 RK |
636 | /* |
637 | * Set both the DMA mask and the coherent DMA mask to the same thing. | |
638 | * Note that we don't check the return value from dma_set_coherent_mask() | |
639 | * as the DMA API guarantees that the coherent DMA mask can be set to | |
640 | * the same or smaller than the streaming DMA mask. | |
641 | */ | |
642 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) | |
643 | { | |
644 | int rc = dma_set_mask(dev, mask); | |
645 | if (rc == 0) | |
646 | dma_set_coherent_mask(dev, mask); | |
647 | return rc; | |
648 | } | |
649 | ||
fa6a8d6d RK |
650 | /* |
651 | * Similar to the above, except it deals with the case where the device | |
652 | * does not have dev->dma_mask appropriately setup. | |
653 | */ | |
654 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |
655 | { | |
656 | dev->dma_mask = &dev->coherent_dma_mask; | |
657 | return dma_set_mask_and_coherent(dev, mask); | |
658 | } | |
659 | ||
1da177e4 LT |
660 | extern u64 dma_get_required_mask(struct device *dev); |
661 | ||
a3a60f81 | 662 | #ifndef arch_setup_dma_ops |
97890ba9 | 663 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
53c92d79 | 664 | u64 size, const struct iommu_ops *iommu, |
97890ba9 WD |
665 | bool coherent) { } |
666 | #endif | |
667 | ||
668 | #ifndef arch_teardown_dma_ops | |
1a0afc14 | 669 | static inline void arch_teardown_dma_ops(struct device *dev) { } |
591c1ee4 SS |
670 | #endif |
671 | ||
6b7b6510 FT |
672 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
673 | { | |
002edb6f RM |
674 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
675 | return dev->dma_parms->max_segment_size; | |
676 | return SZ_64K; | |
6b7b6510 FT |
677 | } |
678 | ||
679 | static inline unsigned int dma_set_max_seg_size(struct device *dev, | |
680 | unsigned int size) | |
681 | { | |
682 | if (dev->dma_parms) { | |
683 | dev->dma_parms->max_segment_size = size; | |
684 | return 0; | |
002edb6f RM |
685 | } |
686 | return -EIO; | |
6b7b6510 FT |
687 | } |
688 | ||
d22a6966 FT |
689 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
690 | { | |
002edb6f RM |
691 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
692 | return dev->dma_parms->segment_boundary_mask; | |
693 | return DMA_BIT_MASK(32); | |
d22a6966 FT |
694 | } |
695 | ||
696 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |
697 | { | |
698 | if (dev->dma_parms) { | |
699 | dev->dma_parms->segment_boundary_mask = mask; | |
700 | return 0; | |
002edb6f RM |
701 | } |
702 | return -EIO; | |
d22a6966 FT |
703 | } |
704 | ||
00c8f162 SS |
705 | #ifndef dma_max_pfn |
706 | static inline unsigned long dma_max_pfn(struct device *dev) | |
707 | { | |
a41ef1e4 | 708 | return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; |
00c8f162 SS |
709 | } |
710 | #endif | |
711 | ||
842fa69f AM |
712 | static inline void *dma_zalloc_coherent(struct device *dev, size_t size, |
713 | dma_addr_t *dma_handle, gfp_t flag) | |
714 | { | |
ede23fa8 JP |
715 | void *ret = dma_alloc_coherent(dev, size, dma_handle, |
716 | flag | __GFP_ZERO); | |
842fa69f AM |
717 | return ret; |
718 | } | |
719 | ||
4565f017 FT |
720 | static inline int dma_get_cache_alignment(void) |
721 | { | |
722 | #ifdef ARCH_DMA_MINALIGN | |
723 | return ARCH_DMA_MINALIGN; | |
724 | #endif | |
725 | return 1; | |
726 | } | |
727 | ||
1da177e4 | 728 | /* flags for the coherent memory api */ |
2436bdcd | 729 | #define DMA_MEMORY_EXCLUSIVE 0x01 |
1da177e4 | 730 | |
20d666e4 CH |
731 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
732 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
733 | dma_addr_t device_addr, size_t size, int flags); | |
734 | void dma_release_declared_memory(struct device *dev); | |
735 | void *dma_mark_declared_memory_occupied(struct device *dev, | |
736 | dma_addr_t device_addr, size_t size); | |
737 | #else | |
1da177e4 | 738 | static inline int |
88a984ba | 739 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
1da177e4 LT |
740 | dma_addr_t device_addr, size_t size, int flags) |
741 | { | |
2436bdcd | 742 | return -ENOSYS; |
1da177e4 LT |
743 | } |
744 | ||
745 | static inline void | |
746 | dma_release_declared_memory(struct device *dev) | |
747 | { | |
748 | } | |
749 | ||
750 | static inline void * | |
751 | dma_mark_declared_memory_occupied(struct device *dev, | |
752 | dma_addr_t device_addr, size_t size) | |
753 | { | |
754 | return ERR_PTR(-EBUSY); | |
755 | } | |
20d666e4 | 756 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
1da177e4 | 757 | |
9ac7849e TH |
758 | /* |
759 | * Managed DMA API | |
760 | */ | |
ab642e95 | 761 | #ifdef CONFIG_HAS_DMA |
9ac7849e TH |
762 | extern void *dmam_alloc_coherent(struct device *dev, size_t size, |
763 | dma_addr_t *dma_handle, gfp_t gfp); | |
764 | extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
765 | dma_addr_t dma_handle); | |
ab642e95 GU |
766 | #else /* !CONFIG_HAS_DMA */ |
767 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, | |
768 | dma_addr_t *dma_handle, gfp_t gfp) | |
769 | { return NULL; } | |
770 | static inline void dmam_free_coherent(struct device *dev, size_t size, | |
771 | void *vaddr, dma_addr_t dma_handle) { } | |
772 | #endif /* !CONFIG_HAS_DMA */ | |
773 | ||
63d36c95 CH |
774 | extern void *dmam_alloc_attrs(struct device *dev, size_t size, |
775 | dma_addr_t *dma_handle, gfp_t gfp, | |
776 | unsigned long attrs); | |
20d666e4 | 777 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
88a984ba BH |
778 | extern int dmam_declare_coherent_memory(struct device *dev, |
779 | phys_addr_t phys_addr, | |
9ac7849e TH |
780 | dma_addr_t device_addr, size_t size, |
781 | int flags); | |
782 | extern void dmam_release_declared_memory(struct device *dev); | |
20d666e4 | 783 | #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
9ac7849e | 784 | static inline int dmam_declare_coherent_memory(struct device *dev, |
88a984ba | 785 | phys_addr_t phys_addr, dma_addr_t device_addr, |
9ac7849e TH |
786 | size_t size, gfp_t gfp) |
787 | { | |
788 | return 0; | |
789 | } | |
1da177e4 | 790 | |
9ac7849e TH |
791 | static inline void dmam_release_declared_memory(struct device *dev) |
792 | { | |
793 | } | |
20d666e4 | 794 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
1da177e4 | 795 | |
f6e45661 LR |
796 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
797 | dma_addr_t *dma_addr, gfp_t gfp) | |
b4bbb107 | 798 | { |
7ed1d91a CH |
799 | unsigned long attrs = DMA_ATTR_NO_WARN; |
800 | ||
801 | if (gfp & __GFP_NOWARN) | |
802 | attrs |= DMA_ATTR_NO_WARN; | |
803 | ||
804 | return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); | |
b4bbb107 | 805 | } |
f6e45661 LR |
806 | #ifndef dma_alloc_writecombine |
807 | #define dma_alloc_writecombine dma_alloc_wc | |
808 | #endif | |
b4bbb107 | 809 | |
f6e45661 LR |
810 | static inline void dma_free_wc(struct device *dev, size_t size, |
811 | void *cpu_addr, dma_addr_t dma_addr) | |
b4bbb107 | 812 | { |
00085f1e KK |
813 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, |
814 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 815 | } |
f6e45661 LR |
816 | #ifndef dma_free_writecombine |
817 | #define dma_free_writecombine dma_free_wc | |
818 | #endif | |
b4bbb107 | 819 | |
f6e45661 LR |
820 | static inline int dma_mmap_wc(struct device *dev, |
821 | struct vm_area_struct *vma, | |
822 | void *cpu_addr, dma_addr_t dma_addr, | |
823 | size_t size) | |
b4bbb107 | 824 | { |
00085f1e KK |
825 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
826 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 827 | } |
f6e45661 LR |
828 | #ifndef dma_mmap_writecombine |
829 | #define dma_mmap_writecombine dma_mmap_wc | |
830 | #endif | |
74bc7cee | 831 | |
f616ab59 | 832 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
0acedc12 FT |
833 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
834 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | |
835 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | |
836 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) | |
837 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) | |
838 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) | |
839 | #else | |
840 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) | |
841 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) | |
842 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) | |
843 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | |
844 | #define dma_unmap_len(PTR, LEN_NAME) (0) | |
845 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | |
846 | #endif | |
847 | ||
9ac7849e | 848 | #endif |