]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef _LINUX_DMA_MAPPING_H | |
3 | #define _LINUX_DMA_MAPPING_H | |
4 | ||
5 | #include <linux/sizes.h> | |
6 | #include <linux/string.h> | |
7 | #include <linux/device.h> | |
8 | #include <linux/err.h> | |
9 | #include <linux/dma-debug.h> | |
10 | #include <linux/dma-direction.h> | |
11 | #include <linux/scatterlist.h> | |
12 | #include <linux/bug.h> | |
13 | #include <linux/mem_encrypt.h> | |
14 | ||
15 | /** | |
16 | * List of possible attributes associated with a DMA mapping. The semantics | |
17 | * of each attribute should be defined in Documentation/DMA-attributes.txt. | |
18 | * | |
19 | * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute | |
20 | * forces all pending DMA writes to complete. | |
21 | */ | |
22 | #define DMA_ATTR_WRITE_BARRIER (1UL << 0) | |
23 | /* | |
24 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping | |
25 | * may be weakly ordered, that is that reads and writes may pass each other. | |
26 | */ | |
27 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) | |
28 | /* | |
29 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be | |
30 | * buffered to improve performance. | |
31 | */ | |
32 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) | |
33 | /* | |
34 | * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either | |
35 | * consistent or non-consistent memory as it sees fit. | |
36 | */ | |
37 | #define DMA_ATTR_NON_CONSISTENT (1UL << 3) | |
38 | /* | |
39 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel | |
40 | * virtual mapping for the allocated buffer. | |
41 | */ | |
42 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) | |
43 | /* | |
44 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of | |
45 | * the CPU cache for the given buffer assuming that it has been already | |
46 | * transferred to 'device' domain. | |
47 | */ | |
48 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) | |
49 | /* | |
50 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer | |
51 | * in physical memory. | |
52 | */ | |
53 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) | |
54 | /* | |
55 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem | |
56 | * that it's probably not worth the time to try to allocate memory to in a way | |
57 | * that gives better TLB efficiency. | |
58 | */ | |
59 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) | |
60 | /* | |
61 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress | |
62 | * allocation failure reports (similarly to __GFP_NOWARN). | |
63 | */ | |
64 | #define DMA_ATTR_NO_WARN (1UL << 8) | |
65 | ||
66 | /* | |
67 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully | |
68 | * accessible at an elevated privilege level (and ideally inaccessible or | |
69 | * at least read-only at lesser-privileged levels). | |
70 | */ | |
71 | #define DMA_ATTR_PRIVILEGED (1UL << 9) | |
72 | ||
73 | /* | |
74 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | |
75 | * It can be given to a device to use as a DMA source or target. A CPU cannot | |
76 | * reference a dma_addr_t directly because there may be translation between | |
77 | * its physical address space and the bus address space. | |
78 | */ | |
79 | struct dma_map_ops { | |
80 | void* (*alloc)(struct device *dev, size_t size, | |
81 | dma_addr_t *dma_handle, gfp_t gfp, | |
82 | unsigned long attrs); | |
83 | void (*free)(struct device *dev, size_t size, | |
84 | void *vaddr, dma_addr_t dma_handle, | |
85 | unsigned long attrs); | |
86 | int (*mmap)(struct device *, struct vm_area_struct *, | |
87 | void *, dma_addr_t, size_t, | |
88 | unsigned long attrs); | |
89 | ||
90 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, | |
91 | dma_addr_t, size_t, unsigned long attrs); | |
92 | ||
93 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | |
94 | unsigned long offset, size_t size, | |
95 | enum dma_data_direction dir, | |
96 | unsigned long attrs); | |
97 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, | |
98 | size_t size, enum dma_data_direction dir, | |
99 | unsigned long attrs); | |
100 | /* | |
101 | * map_sg returns 0 on error and a value > 0 on success. | |
102 | * It should never return a value < 0. | |
103 | */ | |
104 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | |
105 | int nents, enum dma_data_direction dir, | |
106 | unsigned long attrs); | |
107 | void (*unmap_sg)(struct device *dev, | |
108 | struct scatterlist *sg, int nents, | |
109 | enum dma_data_direction dir, | |
110 | unsigned long attrs); | |
111 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, | |
112 | size_t size, enum dma_data_direction dir, | |
113 | unsigned long attrs); | |
114 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, | |
115 | size_t size, enum dma_data_direction dir, | |
116 | unsigned long attrs); | |
117 | void (*sync_single_for_cpu)(struct device *dev, | |
118 | dma_addr_t dma_handle, size_t size, | |
119 | enum dma_data_direction dir); | |
120 | void (*sync_single_for_device)(struct device *dev, | |
121 | dma_addr_t dma_handle, size_t size, | |
122 | enum dma_data_direction dir); | |
123 | void (*sync_sg_for_cpu)(struct device *dev, | |
124 | struct scatterlist *sg, int nents, | |
125 | enum dma_data_direction dir); | |
126 | void (*sync_sg_for_device)(struct device *dev, | |
127 | struct scatterlist *sg, int nents, | |
128 | enum dma_data_direction dir); | |
129 | void (*cache_sync)(struct device *dev, void *vaddr, size_t size, | |
130 | enum dma_data_direction direction); | |
131 | int (*dma_supported)(struct device *dev, u64 mask); | |
132 | u64 (*get_required_mask)(struct device *dev); | |
133 | size_t (*max_mapping_size)(struct device *dev); | |
134 | }; | |
135 | ||
136 | #define DMA_MAPPING_ERROR (~(dma_addr_t)0) | |
137 | ||
138 | extern const struct dma_map_ops dma_virt_ops; | |
139 | extern const struct dma_map_ops dma_dummy_ops; | |
140 | ||
141 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) | |
142 | ||
143 | #define DMA_MASK_NONE 0x0ULL | |
144 | ||
145 | static inline int valid_dma_direction(int dma_direction) | |
146 | { | |
147 | return ((dma_direction == DMA_BIDIRECTIONAL) || | |
148 | (dma_direction == DMA_TO_DEVICE) || | |
149 | (dma_direction == DMA_FROM_DEVICE)); | |
150 | } | |
151 | ||
152 | static inline int is_device_dma_capable(struct device *dev) | |
153 | { | |
154 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; | |
155 | } | |
156 | ||
157 | #ifdef CONFIG_DMA_DECLARE_COHERENT | |
158 | /* | |
159 | * These three functions are only for dma allocator. | |
160 | * Don't use them in device drivers. | |
161 | */ | |
162 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, | |
163 | dma_addr_t *dma_handle, void **ret); | |
164 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); | |
165 | ||
166 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, | |
167 | void *cpu_addr, size_t size, int *ret); | |
168 | ||
169 | void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); | |
170 | int dma_release_from_global_coherent(int order, void *vaddr); | |
171 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, | |
172 | size_t size, int *ret); | |
173 | ||
174 | #else | |
175 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) | |
176 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) | |
177 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) | |
178 | ||
179 | static inline void *dma_alloc_from_global_coherent(ssize_t size, | |
180 | dma_addr_t *dma_handle) | |
181 | { | |
182 | return NULL; | |
183 | } | |
184 | ||
185 | static inline int dma_release_from_global_coherent(int order, void *vaddr) | |
186 | { | |
187 | return 0; | |
188 | } | |
189 | ||
190 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, | |
191 | void *cpu_addr, size_t size, | |
192 | int *ret) | |
193 | { | |
194 | return 0; | |
195 | } | |
196 | #endif /* CONFIG_DMA_DECLARE_COHERENT */ | |
197 | ||
198 | static inline bool dma_is_direct(const struct dma_map_ops *ops) | |
199 | { | |
200 | return likely(!ops); | |
201 | } | |
202 | ||
203 | /* | |
204 | * All the dma_direct_* declarations are here just for the indirect call bypass, | |
205 | * and must not be used directly drivers! | |
206 | */ | |
207 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | |
208 | unsigned long offset, size_t size, enum dma_data_direction dir, | |
209 | unsigned long attrs); | |
210 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | |
211 | enum dma_data_direction dir, unsigned long attrs); | |
212 | dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, | |
213 | size_t size, enum dma_data_direction dir, unsigned long attrs); | |
214 | ||
215 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ | |
216 | defined(CONFIG_SWIOTLB) | |
217 | void dma_direct_sync_single_for_device(struct device *dev, | |
218 | dma_addr_t addr, size_t size, enum dma_data_direction dir); | |
219 | void dma_direct_sync_sg_for_device(struct device *dev, | |
220 | struct scatterlist *sgl, int nents, enum dma_data_direction dir); | |
221 | #else | |
222 | static inline void dma_direct_sync_single_for_device(struct device *dev, | |
223 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | |
224 | { | |
225 | } | |
226 | static inline void dma_direct_sync_sg_for_device(struct device *dev, | |
227 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | |
228 | { | |
229 | } | |
230 | #endif | |
231 | ||
232 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ | |
233 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ | |
234 | defined(CONFIG_SWIOTLB) | |
235 | void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, | |
236 | size_t size, enum dma_data_direction dir, unsigned long attrs); | |
237 | void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, | |
238 | int nents, enum dma_data_direction dir, unsigned long attrs); | |
239 | void dma_direct_sync_single_for_cpu(struct device *dev, | |
240 | dma_addr_t addr, size_t size, enum dma_data_direction dir); | |
241 | void dma_direct_sync_sg_for_cpu(struct device *dev, | |
242 | struct scatterlist *sgl, int nents, enum dma_data_direction dir); | |
243 | #else | |
244 | static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, | |
245 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
246 | { | |
247 | } | |
248 | static inline void dma_direct_unmap_sg(struct device *dev, | |
249 | struct scatterlist *sgl, int nents, enum dma_data_direction dir, | |
250 | unsigned long attrs) | |
251 | { | |
252 | } | |
253 | static inline void dma_direct_sync_single_for_cpu(struct device *dev, | |
254 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | |
255 | { | |
256 | } | |
257 | static inline void dma_direct_sync_sg_for_cpu(struct device *dev, | |
258 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | |
259 | { | |
260 | } | |
261 | #endif | |
262 | ||
263 | size_t dma_direct_max_mapping_size(struct device *dev); | |
264 | ||
265 | #ifdef CONFIG_HAS_DMA | |
266 | #include <asm/dma-mapping.h> | |
267 | ||
268 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) | |
269 | { | |
270 | if (dev->dma_ops) | |
271 | return dev->dma_ops; | |
272 | return get_arch_dma_ops(dev->bus); | |
273 | } | |
274 | ||
275 | static inline void set_dma_ops(struct device *dev, | |
276 | const struct dma_map_ops *dma_ops) | |
277 | { | |
278 | dev->dma_ops = dma_ops; | |
279 | } | |
280 | ||
281 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | |
282 | struct page *page, size_t offset, size_t size, | |
283 | enum dma_data_direction dir, unsigned long attrs) | |
284 | { | |
285 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
286 | dma_addr_t addr; | |
287 | ||
288 | BUG_ON(!valid_dma_direction(dir)); | |
289 | if (dma_is_direct(ops)) | |
290 | addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); | |
291 | else | |
292 | addr = ops->map_page(dev, page, offset, size, dir, attrs); | |
293 | debug_dma_map_page(dev, page, offset, size, dir, addr); | |
294 | ||
295 | return addr; | |
296 | } | |
297 | ||
298 | static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, | |
299 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
300 | { | |
301 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
302 | ||
303 | BUG_ON(!valid_dma_direction(dir)); | |
304 | if (dma_is_direct(ops)) | |
305 | dma_direct_unmap_page(dev, addr, size, dir, attrs); | |
306 | else if (ops->unmap_page) | |
307 | ops->unmap_page(dev, addr, size, dir, attrs); | |
308 | debug_dma_unmap_page(dev, addr, size, dir); | |
309 | } | |
310 | ||
311 | /* | |
312 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. | |
313 | * It should never return a value < 0. | |
314 | */ | |
315 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
316 | int nents, enum dma_data_direction dir, | |
317 | unsigned long attrs) | |
318 | { | |
319 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
320 | int ents; | |
321 | ||
322 | BUG_ON(!valid_dma_direction(dir)); | |
323 | if (dma_is_direct(ops)) | |
324 | ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); | |
325 | else | |
326 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | |
327 | BUG_ON(ents < 0); | |
328 | debug_dma_map_sg(dev, sg, nents, ents, dir); | |
329 | ||
330 | return ents; | |
331 | } | |
332 | ||
333 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | |
334 | int nents, enum dma_data_direction dir, | |
335 | unsigned long attrs) | |
336 | { | |
337 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
338 | ||
339 | BUG_ON(!valid_dma_direction(dir)); | |
340 | debug_dma_unmap_sg(dev, sg, nents, dir); | |
341 | if (dma_is_direct(ops)) | |
342 | dma_direct_unmap_sg(dev, sg, nents, dir, attrs); | |
343 | else if (ops->unmap_sg) | |
344 | ops->unmap_sg(dev, sg, nents, dir, attrs); | |
345 | } | |
346 | ||
347 | static inline dma_addr_t dma_map_resource(struct device *dev, | |
348 | phys_addr_t phys_addr, | |
349 | size_t size, | |
350 | enum dma_data_direction dir, | |
351 | unsigned long attrs) | |
352 | { | |
353 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
354 | dma_addr_t addr = DMA_MAPPING_ERROR; | |
355 | ||
356 | BUG_ON(!valid_dma_direction(dir)); | |
357 | ||
358 | /* Don't allow RAM to be mapped */ | |
359 | if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) | |
360 | return DMA_MAPPING_ERROR; | |
361 | ||
362 | if (dma_is_direct(ops)) | |
363 | addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); | |
364 | else if (ops->map_resource) | |
365 | addr = ops->map_resource(dev, phys_addr, size, dir, attrs); | |
366 | ||
367 | debug_dma_map_resource(dev, phys_addr, size, dir, addr); | |
368 | return addr; | |
369 | } | |
370 | ||
371 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, | |
372 | size_t size, enum dma_data_direction dir, | |
373 | unsigned long attrs) | |
374 | { | |
375 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
376 | ||
377 | BUG_ON(!valid_dma_direction(dir)); | |
378 | if (!dma_is_direct(ops) && ops->unmap_resource) | |
379 | ops->unmap_resource(dev, addr, size, dir, attrs); | |
380 | debug_dma_unmap_resource(dev, addr, size, dir); | |
381 | } | |
382 | ||
383 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |
384 | size_t size, | |
385 | enum dma_data_direction dir) | |
386 | { | |
387 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
388 | ||
389 | BUG_ON(!valid_dma_direction(dir)); | |
390 | if (dma_is_direct(ops)) | |
391 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); | |
392 | else if (ops->sync_single_for_cpu) | |
393 | ops->sync_single_for_cpu(dev, addr, size, dir); | |
394 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | |
395 | } | |
396 | ||
397 | static inline void dma_sync_single_for_device(struct device *dev, | |
398 | dma_addr_t addr, size_t size, | |
399 | enum dma_data_direction dir) | |
400 | { | |
401 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
402 | ||
403 | BUG_ON(!valid_dma_direction(dir)); | |
404 | if (dma_is_direct(ops)) | |
405 | dma_direct_sync_single_for_device(dev, addr, size, dir); | |
406 | else if (ops->sync_single_for_device) | |
407 | ops->sync_single_for_device(dev, addr, size, dir); | |
408 | debug_dma_sync_single_for_device(dev, addr, size, dir); | |
409 | } | |
410 | ||
411 | static inline void | |
412 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
413 | int nelems, enum dma_data_direction dir) | |
414 | { | |
415 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
416 | ||
417 | BUG_ON(!valid_dma_direction(dir)); | |
418 | if (dma_is_direct(ops)) | |
419 | dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); | |
420 | else if (ops->sync_sg_for_cpu) | |
421 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | |
422 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | |
423 | } | |
424 | ||
425 | static inline void | |
426 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
427 | int nelems, enum dma_data_direction dir) | |
428 | { | |
429 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
430 | ||
431 | BUG_ON(!valid_dma_direction(dir)); | |
432 | if (dma_is_direct(ops)) | |
433 | dma_direct_sync_sg_for_device(dev, sg, nelems, dir); | |
434 | else if (ops->sync_sg_for_device) | |
435 | ops->sync_sg_for_device(dev, sg, nelems, dir); | |
436 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | |
437 | ||
438 | } | |
439 | ||
440 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
441 | { | |
442 | debug_dma_mapping_error(dev, dma_addr); | |
443 | ||
444 | if (dma_addr == DMA_MAPPING_ERROR) | |
445 | return -ENOMEM; | |
446 | return 0; | |
447 | } | |
448 | ||
449 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
450 | gfp_t flag, unsigned long attrs); | |
451 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
452 | dma_addr_t dma_handle, unsigned long attrs); | |
453 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
454 | gfp_t gfp, unsigned long attrs); | |
455 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
456 | dma_addr_t dma_handle); | |
457 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
458 | enum dma_data_direction dir); | |
459 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, | |
460 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
461 | unsigned long attrs); | |
462 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
463 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
464 | unsigned long attrs); | |
465 | int dma_supported(struct device *dev, u64 mask); | |
466 | int dma_set_mask(struct device *dev, u64 mask); | |
467 | int dma_set_coherent_mask(struct device *dev, u64 mask); | |
468 | u64 dma_get_required_mask(struct device *dev); | |
469 | size_t dma_max_mapping_size(struct device *dev); | |
470 | #else /* CONFIG_HAS_DMA */ | |
471 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | |
472 | struct page *page, size_t offset, size_t size, | |
473 | enum dma_data_direction dir, unsigned long attrs) | |
474 | { | |
475 | return DMA_MAPPING_ERROR; | |
476 | } | |
477 | static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, | |
478 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
479 | { | |
480 | } | |
481 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
482 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
483 | { | |
484 | return 0; | |
485 | } | |
486 | static inline void dma_unmap_sg_attrs(struct device *dev, | |
487 | struct scatterlist *sg, int nents, enum dma_data_direction dir, | |
488 | unsigned long attrs) | |
489 | { | |
490 | } | |
491 | static inline dma_addr_t dma_map_resource(struct device *dev, | |
492 | phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, | |
493 | unsigned long attrs) | |
494 | { | |
495 | return DMA_MAPPING_ERROR; | |
496 | } | |
497 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, | |
498 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
499 | { | |
500 | } | |
501 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |
502 | size_t size, enum dma_data_direction dir) | |
503 | { | |
504 | } | |
505 | static inline void dma_sync_single_for_device(struct device *dev, | |
506 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | |
507 | { | |
508 | } | |
509 | static inline void dma_sync_sg_for_cpu(struct device *dev, | |
510 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) | |
511 | { | |
512 | } | |
513 | static inline void dma_sync_sg_for_device(struct device *dev, | |
514 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) | |
515 | { | |
516 | } | |
517 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
518 | { | |
519 | return -ENOMEM; | |
520 | } | |
521 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
522 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) | |
523 | { | |
524 | return NULL; | |
525 | } | |
526 | static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
527 | dma_addr_t dma_handle, unsigned long attrs) | |
528 | { | |
529 | } | |
530 | static inline void *dmam_alloc_attrs(struct device *dev, size_t size, | |
531 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | |
532 | { | |
533 | return NULL; | |
534 | } | |
535 | static inline void dmam_free_coherent(struct device *dev, size_t size, | |
536 | void *vaddr, dma_addr_t dma_handle) | |
537 | { | |
538 | } | |
539 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
540 | enum dma_data_direction dir) | |
541 | { | |
542 | } | |
543 | static inline int dma_get_sgtable_attrs(struct device *dev, | |
544 | struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, | |
545 | size_t size, unsigned long attrs) | |
546 | { | |
547 | return -ENXIO; | |
548 | } | |
549 | static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
550 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
551 | unsigned long attrs) | |
552 | { | |
553 | return -ENXIO; | |
554 | } | |
555 | static inline int dma_supported(struct device *dev, u64 mask) | |
556 | { | |
557 | return 0; | |
558 | } | |
559 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
560 | { | |
561 | return -EIO; | |
562 | } | |
563 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) | |
564 | { | |
565 | return -EIO; | |
566 | } | |
567 | static inline u64 dma_get_required_mask(struct device *dev) | |
568 | { | |
569 | return 0; | |
570 | } | |
571 | static inline size_t dma_max_mapping_size(struct device *dev) | |
572 | { | |
573 | return 0; | |
574 | } | |
575 | #endif /* CONFIG_HAS_DMA */ | |
576 | ||
577 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | |
578 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
579 | { | |
580 | debug_dma_map_single(dev, ptr, size); | |
581 | return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), | |
582 | size, dir, attrs); | |
583 | } | |
584 | ||
585 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | |
586 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
587 | { | |
588 | return dma_unmap_page_attrs(dev, addr, size, dir, attrs); | |
589 | } | |
590 | ||
591 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
592 | dma_addr_t addr, unsigned long offset, size_t size, | |
593 | enum dma_data_direction dir) | |
594 | { | |
595 | return dma_sync_single_for_cpu(dev, addr + offset, size, dir); | |
596 | } | |
597 | ||
598 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
599 | dma_addr_t addr, unsigned long offset, size_t size, | |
600 | enum dma_data_direction dir) | |
601 | { | |
602 | return dma_sync_single_for_device(dev, addr + offset, size, dir); | |
603 | } | |
604 | ||
605 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) | |
606 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) | |
607 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) | |
608 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) | |
609 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) | |
610 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) | |
611 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) | |
612 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) | |
613 | ||
614 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |
615 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
616 | unsigned long attrs); | |
617 | ||
618 | void *dma_common_contiguous_remap(struct page *page, size_t size, | |
619 | unsigned long vm_flags, | |
620 | pgprot_t prot, const void *caller); | |
621 | ||
622 | void *dma_common_pages_remap(struct page **pages, size_t size, | |
623 | unsigned long vm_flags, pgprot_t prot, | |
624 | const void *caller); | |
625 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | |
626 | ||
627 | int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot); | |
628 | bool dma_in_atomic_pool(void *start, size_t size); | |
629 | void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); | |
630 | bool dma_free_from_pool(void *start, size_t size); | |
631 | ||
632 | int | |
633 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, | |
634 | dma_addr_t dma_addr, size_t size, unsigned long attrs); | |
635 | ||
636 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
637 | dma_addr_t *dma_handle, gfp_t gfp) | |
638 | { | |
639 | ||
640 | return dma_alloc_attrs(dev, size, dma_handle, gfp, | |
641 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); | |
642 | } | |
643 | ||
644 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
645 | void *cpu_addr, dma_addr_t dma_handle) | |
646 | { | |
647 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); | |
648 | } | |
649 | ||
650 | ||
651 | static inline u64 dma_get_mask(struct device *dev) | |
652 | { | |
653 | if (dev->dma_mask && *dev->dma_mask) | |
654 | return *dev->dma_mask; | |
655 | return DMA_BIT_MASK(32); | |
656 | } | |
657 | ||
658 | /* | |
659 | * Set both the DMA mask and the coherent DMA mask to the same thing. | |
660 | * Note that we don't check the return value from dma_set_coherent_mask() | |
661 | * as the DMA API guarantees that the coherent DMA mask can be set to | |
662 | * the same or smaller than the streaming DMA mask. | |
663 | */ | |
664 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) | |
665 | { | |
666 | int rc = dma_set_mask(dev, mask); | |
667 | if (rc == 0) | |
668 | dma_set_coherent_mask(dev, mask); | |
669 | return rc; | |
670 | } | |
671 | ||
672 | /* | |
673 | * Similar to the above, except it deals with the case where the device | |
674 | * does not have dev->dma_mask appropriately setup. | |
675 | */ | |
676 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |
677 | { | |
678 | dev->dma_mask = &dev->coherent_dma_mask; | |
679 | return dma_set_mask_and_coherent(dev, mask); | |
680 | } | |
681 | ||
682 | /** | |
683 | * dma_addressing_limited - return if the device is addressing limited | |
684 | * @dev: device to check | |
685 | * | |
686 | * Return %true if the devices DMA mask is too small to address all memory in | |
687 | * the system, else %false. Lack of addressing bits is the prime reason for | |
688 | * bounce buffering, but might not be the only one. | |
689 | */ | |
690 | static inline bool dma_addressing_limited(struct device *dev) | |
691 | { | |
692 | return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) < | |
693 | dma_get_required_mask(dev); | |
694 | } | |
695 | ||
696 | #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS | |
697 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |
698 | const struct iommu_ops *iommu, bool coherent); | |
699 | #else | |
700 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, | |
701 | u64 size, const struct iommu_ops *iommu, bool coherent) | |
702 | { | |
703 | } | |
704 | #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ | |
705 | ||
706 | #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS | |
707 | void arch_teardown_dma_ops(struct device *dev); | |
708 | #else | |
709 | static inline void arch_teardown_dma_ops(struct device *dev) | |
710 | { | |
711 | } | |
712 | #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ | |
713 | ||
714 | static inline unsigned int dma_get_max_seg_size(struct device *dev) | |
715 | { | |
716 | if (dev->dma_parms && dev->dma_parms->max_segment_size) | |
717 | return dev->dma_parms->max_segment_size; | |
718 | return SZ_64K; | |
719 | } | |
720 | ||
721 | static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) | |
722 | { | |
723 | if (dev->dma_parms) { | |
724 | dev->dma_parms->max_segment_size = size; | |
725 | return 0; | |
726 | } | |
727 | return -EIO; | |
728 | } | |
729 | ||
730 | static inline unsigned long dma_get_seg_boundary(struct device *dev) | |
731 | { | |
732 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) | |
733 | return dev->dma_parms->segment_boundary_mask; | |
734 | return DMA_BIT_MASK(32); | |
735 | } | |
736 | ||
737 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |
738 | { | |
739 | if (dev->dma_parms) { | |
740 | dev->dma_parms->segment_boundary_mask = mask; | |
741 | return 0; | |
742 | } | |
743 | return -EIO; | |
744 | } | |
745 | ||
746 | static inline int dma_get_cache_alignment(void) | |
747 | { | |
748 | #ifdef ARCH_DMA_MINALIGN | |
749 | return ARCH_DMA_MINALIGN; | |
750 | #endif | |
751 | return 1; | |
752 | } | |
753 | ||
754 | #ifdef CONFIG_DMA_DECLARE_COHERENT | |
755 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
756 | dma_addr_t device_addr, size_t size); | |
757 | void dma_release_declared_memory(struct device *dev); | |
758 | #else | |
759 | static inline int | |
760 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
761 | dma_addr_t device_addr, size_t size) | |
762 | { | |
763 | return -ENOSYS; | |
764 | } | |
765 | ||
766 | static inline void | |
767 | dma_release_declared_memory(struct device *dev) | |
768 | { | |
769 | } | |
770 | #endif /* CONFIG_DMA_DECLARE_COHERENT */ | |
771 | ||
772 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, | |
773 | dma_addr_t *dma_handle, gfp_t gfp) | |
774 | { | |
775 | return dmam_alloc_attrs(dev, size, dma_handle, gfp, | |
776 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); | |
777 | } | |
778 | ||
779 | static inline void *dma_alloc_wc(struct device *dev, size_t size, | |
780 | dma_addr_t *dma_addr, gfp_t gfp) | |
781 | { | |
782 | unsigned long attrs = DMA_ATTR_WRITE_COMBINE; | |
783 | ||
784 | if (gfp & __GFP_NOWARN) | |
785 | attrs |= DMA_ATTR_NO_WARN; | |
786 | ||
787 | return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); | |
788 | } | |
789 | #ifndef dma_alloc_writecombine | |
790 | #define dma_alloc_writecombine dma_alloc_wc | |
791 | #endif | |
792 | ||
793 | static inline void dma_free_wc(struct device *dev, size_t size, | |
794 | void *cpu_addr, dma_addr_t dma_addr) | |
795 | { | |
796 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, | |
797 | DMA_ATTR_WRITE_COMBINE); | |
798 | } | |
799 | #ifndef dma_free_writecombine | |
800 | #define dma_free_writecombine dma_free_wc | |
801 | #endif | |
802 | ||
803 | static inline int dma_mmap_wc(struct device *dev, | |
804 | struct vm_area_struct *vma, | |
805 | void *cpu_addr, dma_addr_t dma_addr, | |
806 | size_t size) | |
807 | { | |
808 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, | |
809 | DMA_ATTR_WRITE_COMBINE); | |
810 | } | |
811 | #ifndef dma_mmap_writecombine | |
812 | #define dma_mmap_writecombine dma_mmap_wc | |
813 | #endif | |
814 | ||
815 | #ifdef CONFIG_NEED_DMA_MAP_STATE | |
816 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME | |
817 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | |
818 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | |
819 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) | |
820 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) | |
821 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) | |
822 | #else | |
823 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) | |
824 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) | |
825 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) | |
826 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | |
827 | #define dma_unmap_len(PTR, LEN_NAME) (0) | |
828 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | |
829 | #endif | |
830 | ||
831 | #endif |