]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
96532bab RD |
2 | #ifndef _LINUX_DMA_MAPPING_H |
3 | #define _LINUX_DMA_MAPPING_H | |
1da177e4 | 4 | |
002edb6f | 5 | #include <linux/sizes.h> |
842fa69f | 6 | #include <linux/string.h> |
1da177e4 LT |
7 | #include <linux/device.h> |
8 | #include <linux/err.h> | |
e1c7e324 | 9 | #include <linux/dma-debug.h> |
b7f080cf | 10 | #include <linux/dma-direction.h> |
f0402a26 | 11 | #include <linux/scatterlist.h> |
e1c7e324 | 12 | #include <linux/bug.h> |
648babb7 | 13 | #include <linux/mem_encrypt.h> |
1da177e4 | 14 | |
00085f1e KK |
15 | /** |
16 | * List of possible attributes associated with a DMA mapping. The semantics | |
17 | * of each attribute should be defined in Documentation/DMA-attributes.txt. | |
18 | * | |
19 | * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute | |
20 | * forces all pending DMA writes to complete. | |
21 | */ | |
22 | #define DMA_ATTR_WRITE_BARRIER (1UL << 0) | |
23 | /* | |
24 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping | |
25 | * may be weakly ordered, that is that reads and writes may pass each other. | |
26 | */ | |
27 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) | |
28 | /* | |
29 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be | |
30 | * buffered to improve performance. | |
31 | */ | |
32 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) | |
33 | /* | |
34 | * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either | |
35 | * consistent or non-consistent memory as it sees fit. | |
36 | */ | |
37 | #define DMA_ATTR_NON_CONSISTENT (1UL << 3) | |
38 | /* | |
39 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel | |
40 | * virtual mapping for the allocated buffer. | |
41 | */ | |
42 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) | |
43 | /* | |
44 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of | |
45 | * the CPU cache for the given buffer assuming that it has been already | |
46 | * transferred to 'device' domain. | |
47 | */ | |
48 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) | |
49 | /* | |
50 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer | |
51 | * in physical memory. | |
52 | */ | |
53 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) | |
54 | /* | |
55 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem | |
56 | * that it's probably not worth the time to try to allocate memory to in a way | |
57 | * that gives better TLB efficiency. | |
58 | */ | |
59 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) | |
a9a62c93 MFO |
60 | /* |
61 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress | |
62 | * allocation failure reports (similarly to __GFP_NOWARN). | |
63 | */ | |
64 | #define DMA_ATTR_NO_WARN (1UL << 8) | |
00085f1e | 65 | |
b2fb3664 MH |
66 | /* |
67 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully | |
68 | * accessible at an elevated privilege level (and ideally inaccessible or | |
69 | * at least read-only at lesser-privileged levels). | |
70 | */ | |
71 | #define DMA_ATTR_PRIVILEGED (1UL << 9) | |
72 | ||
77f2ea2f BH |
73 | /* |
74 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | |
75 | * It can be given to a device to use as a DMA source or target. A CPU cannot | |
76 | * reference a dma_addr_t directly because there may be translation between | |
77 | * its physical address space and the bus address space. | |
78 | */ | |
f0402a26 | 79 | struct dma_map_ops { |
613c4578 MS |
80 | void* (*alloc)(struct device *dev, size_t size, |
81 | dma_addr_t *dma_handle, gfp_t gfp, | |
00085f1e | 82 | unsigned long attrs); |
613c4578 MS |
83 | void (*free)(struct device *dev, size_t size, |
84 | void *vaddr, dma_addr_t dma_handle, | |
00085f1e | 85 | unsigned long attrs); |
9adc5374 | 86 | int (*mmap)(struct device *, struct vm_area_struct *, |
00085f1e KK |
87 | void *, dma_addr_t, size_t, |
88 | unsigned long attrs); | |
9adc5374 | 89 | |
d2b7428e | 90 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
00085f1e | 91 | dma_addr_t, size_t, unsigned long attrs); |
d2b7428e | 92 | |
f0402a26 FT |
93 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
94 | unsigned long offset, size_t size, | |
95 | enum dma_data_direction dir, | |
00085f1e | 96 | unsigned long attrs); |
f0402a26 FT |
97 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
98 | size_t size, enum dma_data_direction dir, | |
00085f1e | 99 | unsigned long attrs); |
04abab69 RRD |
100 | /* |
101 | * map_sg returns 0 on error and a value > 0 on success. | |
102 | * It should never return a value < 0. | |
103 | */ | |
f0402a26 FT |
104 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
105 | int nents, enum dma_data_direction dir, | |
00085f1e | 106 | unsigned long attrs); |
f0402a26 FT |
107 | void (*unmap_sg)(struct device *dev, |
108 | struct scatterlist *sg, int nents, | |
109 | enum dma_data_direction dir, | |
00085f1e | 110 | unsigned long attrs); |
ba409b31 NS |
111 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, |
112 | size_t size, enum dma_data_direction dir, | |
113 | unsigned long attrs); | |
114 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, | |
115 | size_t size, enum dma_data_direction dir, | |
116 | unsigned long attrs); | |
f0402a26 FT |
117 | void (*sync_single_for_cpu)(struct device *dev, |
118 | dma_addr_t dma_handle, size_t size, | |
119 | enum dma_data_direction dir); | |
120 | void (*sync_single_for_device)(struct device *dev, | |
121 | dma_addr_t dma_handle, size_t size, | |
122 | enum dma_data_direction dir); | |
f0402a26 FT |
123 | void (*sync_sg_for_cpu)(struct device *dev, |
124 | struct scatterlist *sg, int nents, | |
125 | enum dma_data_direction dir); | |
126 | void (*sync_sg_for_device)(struct device *dev, | |
127 | struct scatterlist *sg, int nents, | |
128 | enum dma_data_direction dir); | |
c9eb6172 CH |
129 | void (*cache_sync)(struct device *dev, void *vaddr, size_t size, |
130 | enum dma_data_direction direction); | |
f0402a26 FT |
131 | int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); |
132 | int (*dma_supported)(struct device *dev, u64 mask); | |
3a8f7558 MM |
133 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK |
134 | u64 (*get_required_mask)(struct device *dev); | |
135 | #endif | |
f0402a26 FT |
136 | int is_phys; |
137 | }; | |
138 | ||
5299709d | 139 | extern const struct dma_map_ops dma_noop_ops; |
551199ac | 140 | extern const struct dma_map_ops dma_virt_ops; |
a8463d4b | 141 | |
8f286c33 | 142 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
34c65384 | 143 | |
32e8f702 JB |
144 | #define DMA_MASK_NONE 0x0ULL |
145 | ||
d6bd3a39 REB |
146 | static inline int valid_dma_direction(int dma_direction) |
147 | { | |
148 | return ((dma_direction == DMA_BIDIRECTIONAL) || | |
149 | (dma_direction == DMA_TO_DEVICE) || | |
150 | (dma_direction == DMA_FROM_DEVICE)); | |
151 | } | |
152 | ||
32e8f702 JB |
153 | static inline int is_device_dma_capable(struct device *dev) |
154 | { | |
155 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; | |
156 | } | |
157 | ||
20d666e4 CH |
158 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
159 | /* | |
160 | * These three functions are only for dma allocator. | |
161 | * Don't use them in device drivers. | |
162 | */ | |
43fc509c | 163 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
20d666e4 | 164 | dma_addr_t *dma_handle, void **ret); |
43fc509c | 165 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); |
20d666e4 | 166 | |
43fc509c | 167 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
20d666e4 | 168 | void *cpu_addr, size_t size, int *ret); |
43fc509c VM |
169 | |
170 | void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); | |
171 | int dma_release_from_global_coherent(int order, void *vaddr); | |
172 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, | |
173 | size_t size, int *ret); | |
174 | ||
20d666e4 | 175 | #else |
43fc509c VM |
176 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
177 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) | |
178 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) | |
179 | ||
180 | static inline void *dma_alloc_from_global_coherent(ssize_t size, | |
181 | dma_addr_t *dma_handle) | |
182 | { | |
183 | return NULL; | |
184 | } | |
185 | ||
186 | static inline int dma_release_from_global_coherent(int order, void *vaddr) | |
187 | { | |
188 | return 0; | |
189 | } | |
190 | ||
191 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, | |
192 | void *cpu_addr, size_t size, | |
193 | int *ret) | |
194 | { | |
195 | return 0; | |
196 | } | |
20d666e4 CH |
197 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
198 | ||
1b0fac45 | 199 | #ifdef CONFIG_HAS_DMA |
1da177e4 | 200 | #include <asm/dma-mapping.h> |
815dd187 BVA |
201 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
202 | { | |
203 | if (dev && dev->dma_ops) | |
204 | return dev->dma_ops; | |
205 | return get_arch_dma_ops(dev ? dev->bus : NULL); | |
206 | } | |
207 | ||
ca6e8e10 BVA |
208 | static inline void set_dma_ops(struct device *dev, |
209 | const struct dma_map_ops *dma_ops) | |
210 | { | |
211 | dev->dma_ops = dma_ops; | |
212 | } | |
1b0fac45 | 213 | #else |
e1c7e324 CH |
214 | /* |
215 | * Define the dma api to allow compilation but not linking of | |
216 | * dma dependent code. Code that depends on the dma-mapping | |
217 | * API needs to set 'depends on HAS_DMA' in its Kconfig | |
218 | */ | |
5299709d BVA |
219 | extern const struct dma_map_ops bad_dma_ops; |
220 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) | |
e1c7e324 CH |
221 | { |
222 | return &bad_dma_ops; | |
223 | } | |
224 | #endif | |
225 | ||
226 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | |
227 | size_t size, | |
228 | enum dma_data_direction dir, | |
00085f1e | 229 | unsigned long attrs) |
e1c7e324 | 230 | { |
5299709d | 231 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
232 | dma_addr_t addr; |
233 | ||
e1c7e324 CH |
234 | BUG_ON(!valid_dma_direction(dir)); |
235 | addr = ops->map_page(dev, virt_to_page(ptr), | |
8e99469a | 236 | offset_in_page(ptr), size, |
e1c7e324 CH |
237 | dir, attrs); |
238 | debug_dma_map_page(dev, virt_to_page(ptr), | |
8e99469a | 239 | offset_in_page(ptr), size, |
e1c7e324 CH |
240 | dir, addr, true); |
241 | return addr; | |
242 | } | |
243 | ||
244 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | |
245 | size_t size, | |
246 | enum dma_data_direction dir, | |
00085f1e | 247 | unsigned long attrs) |
e1c7e324 | 248 | { |
5299709d | 249 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
250 | |
251 | BUG_ON(!valid_dma_direction(dir)); | |
252 | if (ops->unmap_page) | |
253 | ops->unmap_page(dev, addr, size, dir, attrs); | |
254 | debug_dma_unmap_page(dev, addr, size, dir, true); | |
255 | } | |
256 | ||
257 | /* | |
258 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. | |
259 | * It should never return a value < 0. | |
260 | */ | |
261 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
262 | int nents, enum dma_data_direction dir, | |
00085f1e | 263 | unsigned long attrs) |
e1c7e324 | 264 | { |
5299709d | 265 | const struct dma_map_ops *ops = get_dma_ops(dev); |
49502766 | 266 | int ents; |
e1c7e324 | 267 | |
e1c7e324 CH |
268 | BUG_ON(!valid_dma_direction(dir)); |
269 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | |
270 | BUG_ON(ents < 0); | |
271 | debug_dma_map_sg(dev, sg, nents, ents, dir); | |
272 | ||
273 | return ents; | |
274 | } | |
275 | ||
276 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | |
277 | int nents, enum dma_data_direction dir, | |
00085f1e | 278 | unsigned long attrs) |
e1c7e324 | 279 | { |
5299709d | 280 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
281 | |
282 | BUG_ON(!valid_dma_direction(dir)); | |
283 | debug_dma_unmap_sg(dev, sg, nents, dir); | |
284 | if (ops->unmap_sg) | |
285 | ops->unmap_sg(dev, sg, nents, dir, attrs); | |
286 | } | |
287 | ||
0495c3d3 AD |
288 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
289 | struct page *page, | |
290 | size_t offset, size_t size, | |
291 | enum dma_data_direction dir, | |
292 | unsigned long attrs) | |
e1c7e324 | 293 | { |
5299709d | 294 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
295 | dma_addr_t addr; |
296 | ||
e1c7e324 | 297 | BUG_ON(!valid_dma_direction(dir)); |
0495c3d3 | 298 | addr = ops->map_page(dev, page, offset, size, dir, attrs); |
e1c7e324 CH |
299 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
300 | ||
301 | return addr; | |
302 | } | |
303 | ||
0495c3d3 AD |
304 | static inline void dma_unmap_page_attrs(struct device *dev, |
305 | dma_addr_t addr, size_t size, | |
306 | enum dma_data_direction dir, | |
307 | unsigned long attrs) | |
e1c7e324 | 308 | { |
5299709d | 309 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
310 | |
311 | BUG_ON(!valid_dma_direction(dir)); | |
312 | if (ops->unmap_page) | |
0495c3d3 | 313 | ops->unmap_page(dev, addr, size, dir, attrs); |
e1c7e324 CH |
314 | debug_dma_unmap_page(dev, addr, size, dir, false); |
315 | } | |
316 | ||
6f3d8796 NS |
317 | static inline dma_addr_t dma_map_resource(struct device *dev, |
318 | phys_addr_t phys_addr, | |
319 | size_t size, | |
320 | enum dma_data_direction dir, | |
321 | unsigned long attrs) | |
322 | { | |
5299709d | 323 | const struct dma_map_ops *ops = get_dma_ops(dev); |
6f3d8796 NS |
324 | dma_addr_t addr; |
325 | ||
326 | BUG_ON(!valid_dma_direction(dir)); | |
327 | ||
328 | /* Don't allow RAM to be mapped */ | |
3757dc48 | 329 | BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); |
6f3d8796 NS |
330 | |
331 | addr = phys_addr; | |
332 | if (ops->map_resource) | |
333 | addr = ops->map_resource(dev, phys_addr, size, dir, attrs); | |
334 | ||
335 | debug_dma_map_resource(dev, phys_addr, size, dir, addr); | |
336 | ||
337 | return addr; | |
338 | } | |
339 | ||
340 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, | |
341 | size_t size, enum dma_data_direction dir, | |
342 | unsigned long attrs) | |
343 | { | |
5299709d | 344 | const struct dma_map_ops *ops = get_dma_ops(dev); |
6f3d8796 NS |
345 | |
346 | BUG_ON(!valid_dma_direction(dir)); | |
347 | if (ops->unmap_resource) | |
348 | ops->unmap_resource(dev, addr, size, dir, attrs); | |
349 | debug_dma_unmap_resource(dev, addr, size, dir); | |
350 | } | |
351 | ||
e1c7e324 CH |
352 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
353 | size_t size, | |
354 | enum dma_data_direction dir) | |
355 | { | |
5299709d | 356 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
357 | |
358 | BUG_ON(!valid_dma_direction(dir)); | |
359 | if (ops->sync_single_for_cpu) | |
360 | ops->sync_single_for_cpu(dev, addr, size, dir); | |
361 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | |
362 | } | |
363 | ||
364 | static inline void dma_sync_single_for_device(struct device *dev, | |
365 | dma_addr_t addr, size_t size, | |
366 | enum dma_data_direction dir) | |
367 | { | |
5299709d | 368 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
369 | |
370 | BUG_ON(!valid_dma_direction(dir)); | |
371 | if (ops->sync_single_for_device) | |
372 | ops->sync_single_for_device(dev, addr, size, dir); | |
373 | debug_dma_sync_single_for_device(dev, addr, size, dir); | |
374 | } | |
375 | ||
376 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
377 | dma_addr_t addr, | |
378 | unsigned long offset, | |
379 | size_t size, | |
380 | enum dma_data_direction dir) | |
381 | { | |
382 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
383 | ||
384 | BUG_ON(!valid_dma_direction(dir)); | |
385 | if (ops->sync_single_for_cpu) | |
386 | ops->sync_single_for_cpu(dev, addr + offset, size, dir); | |
387 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | |
388 | } | |
389 | ||
390 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
391 | dma_addr_t addr, | |
392 | unsigned long offset, | |
393 | size_t size, | |
394 | enum dma_data_direction dir) | |
395 | { | |
396 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
397 | ||
398 | BUG_ON(!valid_dma_direction(dir)); | |
399 | if (ops->sync_single_for_device) | |
400 | ops->sync_single_for_device(dev, addr + offset, size, dir); | |
401 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | |
402 | } | |
403 | ||
404 | static inline void | |
405 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
406 | int nelems, enum dma_data_direction dir) | |
407 | { | |
5299709d | 408 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
409 | |
410 | BUG_ON(!valid_dma_direction(dir)); | |
411 | if (ops->sync_sg_for_cpu) | |
412 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | |
413 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | |
414 | } | |
415 | ||
416 | static inline void | |
417 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
418 | int nelems, enum dma_data_direction dir) | |
419 | { | |
5299709d | 420 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
421 | |
422 | BUG_ON(!valid_dma_direction(dir)); | |
423 | if (ops->sync_sg_for_device) | |
424 | ops->sync_sg_for_device(dev, sg, nelems, dir); | |
425 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | |
426 | ||
427 | } | |
428 | ||
00085f1e KK |
429 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
430 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) | |
431 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) | |
432 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) | |
0495c3d3 AD |
433 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
434 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) | |
e1c7e324 | 435 | |
c9eb6172 CH |
436 | static inline void |
437 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
438 | enum dma_data_direction dir) | |
439 | { | |
440 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
441 | ||
442 | BUG_ON(!valid_dma_direction(dir)); | |
443 | if (ops->cache_sync) | |
444 | ops->cache_sync(dev, vaddr, size, dir); | |
445 | } | |
446 | ||
e1c7e324 CH |
447 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
448 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | |
449 | ||
450 | void *dma_common_contiguous_remap(struct page *page, size_t size, | |
451 | unsigned long vm_flags, | |
452 | pgprot_t prot, const void *caller); | |
453 | ||
454 | void *dma_common_pages_remap(struct page **pages, size_t size, | |
455 | unsigned long vm_flags, pgprot_t prot, | |
456 | const void *caller); | |
457 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | |
458 | ||
459 | /** | |
460 | * dma_mmap_attrs - map a coherent DMA allocation into user space | |
461 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
462 | * @vma: vm_area_struct describing requested user mapping | |
463 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | |
464 | * @handle: device-view address returned from dma_alloc_attrs | |
465 | * @size: size of memory originally requested in dma_alloc_attrs | |
466 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | |
467 | * | |
468 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs | |
469 | * into user space. The coherent DMA buffer must not be freed by the | |
470 | * driver until the user space mapping has been released. | |
471 | */ | |
472 | static inline int | |
473 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | |
00085f1e | 474 | dma_addr_t dma_addr, size_t size, unsigned long attrs) |
e1c7e324 | 475 | { |
5299709d | 476 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
477 | BUG_ON(!ops); |
478 | if (ops->mmap) | |
479 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | |
480 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | |
481 | } | |
482 | ||
00085f1e | 483 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) |
e1c7e324 CH |
484 | |
485 | int | |
486 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | |
487 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | |
488 | ||
489 | static inline int | |
490 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | |
00085f1e KK |
491 | dma_addr_t dma_addr, size_t size, |
492 | unsigned long attrs) | |
e1c7e324 | 493 | { |
5299709d | 494 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
495 | BUG_ON(!ops); |
496 | if (ops->get_sgtable) | |
497 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | |
498 | attrs); | |
499 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); | |
500 | } | |
501 | ||
00085f1e | 502 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
e1c7e324 CH |
503 | |
504 | #ifndef arch_dma_alloc_attrs | |
505 | #define arch_dma_alloc_attrs(dev, flag) (true) | |
506 | #endif | |
507 | ||
508 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
509 | dma_addr_t *dma_handle, gfp_t flag, | |
00085f1e | 510 | unsigned long attrs) |
e1c7e324 | 511 | { |
5299709d | 512 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
513 | void *cpu_addr; |
514 | ||
515 | BUG_ON(!ops); | |
516 | ||
43fc509c | 517 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) |
e1c7e324 CH |
518 | return cpu_addr; |
519 | ||
520 | if (!arch_dma_alloc_attrs(&dev, &flag)) | |
521 | return NULL; | |
522 | if (!ops->alloc) | |
523 | return NULL; | |
524 | ||
525 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | |
526 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | |
527 | return cpu_addr; | |
528 | } | |
529 | ||
530 | static inline void dma_free_attrs(struct device *dev, size_t size, | |
531 | void *cpu_addr, dma_addr_t dma_handle, | |
00085f1e | 532 | unsigned long attrs) |
e1c7e324 | 533 | { |
5299709d | 534 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
535 | |
536 | BUG_ON(!ops); | |
537 | WARN_ON(irqs_disabled()); | |
538 | ||
43fc509c | 539 | if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) |
e1c7e324 CH |
540 | return; |
541 | ||
d6b7eaeb | 542 | if (!ops->free || !cpu_addr) |
e1c7e324 CH |
543 | return; |
544 | ||
545 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
546 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | |
547 | } | |
548 | ||
549 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
550 | dma_addr_t *dma_handle, gfp_t flag) | |
551 | { | |
00085f1e | 552 | return dma_alloc_attrs(dev, size, dma_handle, flag, 0); |
e1c7e324 CH |
553 | } |
554 | ||
555 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
556 | void *cpu_addr, dma_addr_t dma_handle) | |
557 | { | |
00085f1e | 558 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
e1c7e324 CH |
559 | } |
560 | ||
e1c7e324 CH |
561 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
562 | { | |
5237e95f | 563 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 | 564 | |
5237e95f RM |
565 | debug_dma_mapping_error(dev, dma_addr); |
566 | if (ops->mapping_error) | |
567 | return ops->mapping_error(dev, dma_addr); | |
e1c7e324 | 568 | return 0; |
e1c7e324 CH |
569 | } |
570 | ||
648babb7 TL |
571 | static inline void dma_check_mask(struct device *dev, u64 mask) |
572 | { | |
573 | if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) | |
574 | dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); | |
575 | } | |
576 | ||
e1c7e324 CH |
577 | static inline int dma_supported(struct device *dev, u64 mask) |
578 | { | |
5299709d | 579 | const struct dma_map_ops *ops = get_dma_ops(dev); |
e1c7e324 CH |
580 | |
581 | if (!ops) | |
582 | return 0; | |
583 | if (!ops->dma_supported) | |
584 | return 1; | |
585 | return ops->dma_supported(dev, mask); | |
586 | } | |
e1c7e324 CH |
587 | |
588 | #ifndef HAVE_ARCH_DMA_SET_MASK | |
589 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
590 | { | |
e1c7e324 CH |
591 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
592 | return -EIO; | |
648babb7 TL |
593 | |
594 | dma_check_mask(dev, mask); | |
595 | ||
e1c7e324 CH |
596 | *dev->dma_mask = mask; |
597 | return 0; | |
598 | } | |
1b0fac45 | 599 | #endif |
1da177e4 | 600 | |
589fc9a6 FT |
601 | static inline u64 dma_get_mask(struct device *dev) |
602 | { | |
07a2c01a | 603 | if (dev && dev->dma_mask && *dev->dma_mask) |
589fc9a6 | 604 | return *dev->dma_mask; |
284901a9 | 605 | return DMA_BIT_MASK(32); |
589fc9a6 FT |
606 | } |
607 | ||
58af4a24 | 608 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
710224fa FT |
609 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
610 | #else | |
6a1961f4 FT |
611 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
612 | { | |
613 | if (!dma_supported(dev, mask)) | |
614 | return -EIO; | |
648babb7 TL |
615 | |
616 | dma_check_mask(dev, mask); | |
617 | ||
6a1961f4 FT |
618 | dev->coherent_dma_mask = mask; |
619 | return 0; | |
620 | } | |
710224fa | 621 | #endif |
6a1961f4 | 622 | |
4aa806b7 RK |
623 | /* |
624 | * Set both the DMA mask and the coherent DMA mask to the same thing. | |
625 | * Note that we don't check the return value from dma_set_coherent_mask() | |
626 | * as the DMA API guarantees that the coherent DMA mask can be set to | |
627 | * the same or smaller than the streaming DMA mask. | |
628 | */ | |
629 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) | |
630 | { | |
631 | int rc = dma_set_mask(dev, mask); | |
632 | if (rc == 0) | |
633 | dma_set_coherent_mask(dev, mask); | |
634 | return rc; | |
635 | } | |
636 | ||
fa6a8d6d RK |
637 | /* |
638 | * Similar to the above, except it deals with the case where the device | |
639 | * does not have dev->dma_mask appropriately setup. | |
640 | */ | |
641 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |
642 | { | |
643 | dev->dma_mask = &dev->coherent_dma_mask; | |
644 | return dma_set_mask_and_coherent(dev, mask); | |
645 | } | |
646 | ||
1da177e4 LT |
647 | extern u64 dma_get_required_mask(struct device *dev); |
648 | ||
a3a60f81 | 649 | #ifndef arch_setup_dma_ops |
97890ba9 | 650 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
53c92d79 | 651 | u64 size, const struct iommu_ops *iommu, |
97890ba9 WD |
652 | bool coherent) { } |
653 | #endif | |
654 | ||
655 | #ifndef arch_teardown_dma_ops | |
656 | static inline void arch_teardown_dma_ops(struct device *dev) { } | |
591c1ee4 SS |
657 | #endif |
658 | ||
6b7b6510 FT |
659 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
660 | { | |
002edb6f RM |
661 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
662 | return dev->dma_parms->max_segment_size; | |
663 | return SZ_64K; | |
6b7b6510 FT |
664 | } |
665 | ||
666 | static inline unsigned int dma_set_max_seg_size(struct device *dev, | |
667 | unsigned int size) | |
668 | { | |
669 | if (dev->dma_parms) { | |
670 | dev->dma_parms->max_segment_size = size; | |
671 | return 0; | |
002edb6f RM |
672 | } |
673 | return -EIO; | |
6b7b6510 FT |
674 | } |
675 | ||
d22a6966 FT |
676 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
677 | { | |
002edb6f RM |
678 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
679 | return dev->dma_parms->segment_boundary_mask; | |
680 | return DMA_BIT_MASK(32); | |
d22a6966 FT |
681 | } |
682 | ||
683 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |
684 | { | |
685 | if (dev->dma_parms) { | |
686 | dev->dma_parms->segment_boundary_mask = mask; | |
687 | return 0; | |
002edb6f RM |
688 | } |
689 | return -EIO; | |
d22a6966 FT |
690 | } |
691 | ||
00c8f162 SS |
692 | #ifndef dma_max_pfn |
693 | static inline unsigned long dma_max_pfn(struct device *dev) | |
694 | { | |
695 | return *dev->dma_mask >> PAGE_SHIFT; | |
696 | } | |
697 | #endif | |
698 | ||
842fa69f AM |
699 | static inline void *dma_zalloc_coherent(struct device *dev, size_t size, |
700 | dma_addr_t *dma_handle, gfp_t flag) | |
701 | { | |
ede23fa8 JP |
702 | void *ret = dma_alloc_coherent(dev, size, dma_handle, |
703 | flag | __GFP_ZERO); | |
842fa69f AM |
704 | return ret; |
705 | } | |
706 | ||
4565f017 FT |
707 | static inline int dma_get_cache_alignment(void) |
708 | { | |
709 | #ifdef ARCH_DMA_MINALIGN | |
710 | return ARCH_DMA_MINALIGN; | |
711 | #endif | |
712 | return 1; | |
713 | } | |
714 | ||
1da177e4 | 715 | /* flags for the coherent memory api */ |
2436bdcd | 716 | #define DMA_MEMORY_EXCLUSIVE 0x01 |
1da177e4 | 717 | |
20d666e4 CH |
718 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
719 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
720 | dma_addr_t device_addr, size_t size, int flags); | |
721 | void dma_release_declared_memory(struct device *dev); | |
722 | void *dma_mark_declared_memory_occupied(struct device *dev, | |
723 | dma_addr_t device_addr, size_t size); | |
724 | #else | |
1da177e4 | 725 | static inline int |
88a984ba | 726 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
1da177e4 LT |
727 | dma_addr_t device_addr, size_t size, int flags) |
728 | { | |
2436bdcd | 729 | return -ENOSYS; |
1da177e4 LT |
730 | } |
731 | ||
732 | static inline void | |
733 | dma_release_declared_memory(struct device *dev) | |
734 | { | |
735 | } | |
736 | ||
737 | static inline void * | |
738 | dma_mark_declared_memory_occupied(struct device *dev, | |
739 | dma_addr_t device_addr, size_t size) | |
740 | { | |
741 | return ERR_PTR(-EBUSY); | |
742 | } | |
20d666e4 | 743 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
1da177e4 | 744 | |
09515ef5 S |
745 | #ifdef CONFIG_HAS_DMA |
746 | int dma_configure(struct device *dev); | |
747 | void dma_deconfigure(struct device *dev); | |
748 | #else | |
749 | static inline int dma_configure(struct device *dev) | |
750 | { | |
751 | return 0; | |
752 | } | |
753 | ||
754 | static inline void dma_deconfigure(struct device *dev) {} | |
755 | #endif | |
756 | ||
9ac7849e TH |
757 | /* |
758 | * Managed DMA API | |
759 | */ | |
760 | extern void *dmam_alloc_coherent(struct device *dev, size_t size, | |
761 | dma_addr_t *dma_handle, gfp_t gfp); | |
762 | extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
763 | dma_addr_t dma_handle); | |
63d36c95 CH |
764 | extern void *dmam_alloc_attrs(struct device *dev, size_t size, |
765 | dma_addr_t *dma_handle, gfp_t gfp, | |
766 | unsigned long attrs); | |
20d666e4 | 767 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
88a984ba BH |
768 | extern int dmam_declare_coherent_memory(struct device *dev, |
769 | phys_addr_t phys_addr, | |
9ac7849e TH |
770 | dma_addr_t device_addr, size_t size, |
771 | int flags); | |
772 | extern void dmam_release_declared_memory(struct device *dev); | |
20d666e4 | 773 | #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
9ac7849e | 774 | static inline int dmam_declare_coherent_memory(struct device *dev, |
88a984ba | 775 | phys_addr_t phys_addr, dma_addr_t device_addr, |
9ac7849e TH |
776 | size_t size, gfp_t gfp) |
777 | { | |
778 | return 0; | |
779 | } | |
1da177e4 | 780 | |
9ac7849e TH |
781 | static inline void dmam_release_declared_memory(struct device *dev) |
782 | { | |
783 | } | |
20d666e4 | 784 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
1da177e4 | 785 | |
f6e45661 LR |
786 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
787 | dma_addr_t *dma_addr, gfp_t gfp) | |
b4bbb107 | 788 | { |
00085f1e KK |
789 | return dma_alloc_attrs(dev, size, dma_addr, gfp, |
790 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 791 | } |
f6e45661 LR |
792 | #ifndef dma_alloc_writecombine |
793 | #define dma_alloc_writecombine dma_alloc_wc | |
794 | #endif | |
b4bbb107 | 795 | |
f6e45661 LR |
796 | static inline void dma_free_wc(struct device *dev, size_t size, |
797 | void *cpu_addr, dma_addr_t dma_addr) | |
b4bbb107 | 798 | { |
00085f1e KK |
799 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, |
800 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 801 | } |
f6e45661 LR |
802 | #ifndef dma_free_writecombine |
803 | #define dma_free_writecombine dma_free_wc | |
804 | #endif | |
b4bbb107 | 805 | |
f6e45661 LR |
806 | static inline int dma_mmap_wc(struct device *dev, |
807 | struct vm_area_struct *vma, | |
808 | void *cpu_addr, dma_addr_t dma_addr, | |
809 | size_t size) | |
b4bbb107 | 810 | { |
00085f1e KK |
811 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
812 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 813 | } |
f6e45661 LR |
814 | #ifndef dma_mmap_writecombine |
815 | #define dma_mmap_writecombine dma_mmap_wc | |
816 | #endif | |
74bc7cee | 817 | |
2481366a | 818 | #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG) |
0acedc12 FT |
819 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
820 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | |
821 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | |
822 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) | |
823 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) | |
824 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) | |
825 | #else | |
826 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) | |
827 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) | |
828 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) | |
829 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | |
830 | #define dma_unmap_len(PTR, LEN_NAME) (0) | |
831 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | |
832 | #endif | |
833 | ||
9ac7849e | 834 | #endif |