]>
Commit | Line | Data |
---|---|---|
96532bab RD |
1 | #ifndef _LINUX_DMA_MAPPING_H |
2 | #define _LINUX_DMA_MAPPING_H | |
1da177e4 | 3 | |
002edb6f | 4 | #include <linux/sizes.h> |
842fa69f | 5 | #include <linux/string.h> |
1da177e4 LT |
6 | #include <linux/device.h> |
7 | #include <linux/err.h> | |
e1c7e324 | 8 | #include <linux/dma-debug.h> |
b7f080cf | 9 | #include <linux/dma-direction.h> |
f0402a26 | 10 | #include <linux/scatterlist.h> |
e1c7e324 CH |
11 | #include <linux/kmemcheck.h> |
12 | #include <linux/bug.h> | |
1da177e4 | 13 | |
00085f1e KK |
14 | /** |
15 | * List of possible attributes associated with a DMA mapping. The semantics | |
16 | * of each attribute should be defined in Documentation/DMA-attributes.txt. | |
17 | * | |
18 | * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute | |
19 | * forces all pending DMA writes to complete. | |
20 | */ | |
21 | #define DMA_ATTR_WRITE_BARRIER (1UL << 0) | |
22 | /* | |
23 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping | |
24 | * may be weakly ordered, that is that reads and writes may pass each other. | |
25 | */ | |
26 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) | |
27 | /* | |
28 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be | |
29 | * buffered to improve performance. | |
30 | */ | |
31 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) | |
32 | /* | |
33 | * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either | |
34 | * consistent or non-consistent memory as it sees fit. | |
35 | */ | |
36 | #define DMA_ATTR_NON_CONSISTENT (1UL << 3) | |
37 | /* | |
38 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel | |
39 | * virtual mapping for the allocated buffer. | |
40 | */ | |
41 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) | |
42 | /* | |
43 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of | |
44 | * the CPU cache for the given buffer assuming that it has been already | |
45 | * transferred to 'device' domain. | |
46 | */ | |
47 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) | |
48 | /* | |
49 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer | |
50 | * in physical memory. | |
51 | */ | |
52 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) | |
53 | /* | |
54 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem | |
55 | * that it's probably not worth the time to try to allocate memory to in a way | |
56 | * that gives better TLB efficiency. | |
57 | */ | |
58 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) | |
59 | ||
77f2ea2f BH |
60 | /* |
61 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | |
62 | * It can be given to a device to use as a DMA source or target. A CPU cannot | |
63 | * reference a dma_addr_t directly because there may be translation between | |
64 | * its physical address space and the bus address space. | |
65 | */ | |
f0402a26 | 66 | struct dma_map_ops { |
613c4578 MS |
67 | void* (*alloc)(struct device *dev, size_t size, |
68 | dma_addr_t *dma_handle, gfp_t gfp, | |
00085f1e | 69 | unsigned long attrs); |
613c4578 MS |
70 | void (*free)(struct device *dev, size_t size, |
71 | void *vaddr, dma_addr_t dma_handle, | |
00085f1e | 72 | unsigned long attrs); |
9adc5374 | 73 | int (*mmap)(struct device *, struct vm_area_struct *, |
00085f1e KK |
74 | void *, dma_addr_t, size_t, |
75 | unsigned long attrs); | |
9adc5374 | 76 | |
d2b7428e | 77 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
00085f1e | 78 | dma_addr_t, size_t, unsigned long attrs); |
d2b7428e | 79 | |
f0402a26 FT |
80 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
81 | unsigned long offset, size_t size, | |
82 | enum dma_data_direction dir, | |
00085f1e | 83 | unsigned long attrs); |
f0402a26 FT |
84 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
85 | size_t size, enum dma_data_direction dir, | |
00085f1e | 86 | unsigned long attrs); |
04abab69 RRD |
87 | /* |
88 | * map_sg returns 0 on error and a value > 0 on success. | |
89 | * It should never return a value < 0. | |
90 | */ | |
f0402a26 FT |
91 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
92 | int nents, enum dma_data_direction dir, | |
00085f1e | 93 | unsigned long attrs); |
f0402a26 FT |
94 | void (*unmap_sg)(struct device *dev, |
95 | struct scatterlist *sg, int nents, | |
96 | enum dma_data_direction dir, | |
00085f1e | 97 | unsigned long attrs); |
ba409b31 NS |
98 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, |
99 | size_t size, enum dma_data_direction dir, | |
100 | unsigned long attrs); | |
101 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, | |
102 | size_t size, enum dma_data_direction dir, | |
103 | unsigned long attrs); | |
f0402a26 FT |
104 | void (*sync_single_for_cpu)(struct device *dev, |
105 | dma_addr_t dma_handle, size_t size, | |
106 | enum dma_data_direction dir); | |
107 | void (*sync_single_for_device)(struct device *dev, | |
108 | dma_addr_t dma_handle, size_t size, | |
109 | enum dma_data_direction dir); | |
f0402a26 FT |
110 | void (*sync_sg_for_cpu)(struct device *dev, |
111 | struct scatterlist *sg, int nents, | |
112 | enum dma_data_direction dir); | |
113 | void (*sync_sg_for_device)(struct device *dev, | |
114 | struct scatterlist *sg, int nents, | |
115 | enum dma_data_direction dir); | |
116 | int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); | |
117 | int (*dma_supported)(struct device *dev, u64 mask); | |
f726f30e | 118 | int (*set_dma_mask)(struct device *dev, u64 mask); |
3a8f7558 MM |
119 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK |
120 | u64 (*get_required_mask)(struct device *dev); | |
121 | #endif | |
f0402a26 FT |
122 | int is_phys; |
123 | }; | |
124 | ||
a8463d4b CB |
125 | extern struct dma_map_ops dma_noop_ops; |
126 | ||
8f286c33 | 127 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
34c65384 | 128 | |
32e8f702 JB |
129 | #define DMA_MASK_NONE 0x0ULL |
130 | ||
d6bd3a39 REB |
131 | static inline int valid_dma_direction(int dma_direction) |
132 | { | |
133 | return ((dma_direction == DMA_BIDIRECTIONAL) || | |
134 | (dma_direction == DMA_TO_DEVICE) || | |
135 | (dma_direction == DMA_FROM_DEVICE)); | |
136 | } | |
137 | ||
32e8f702 JB |
138 | static inline int is_device_dma_capable(struct device *dev) |
139 | { | |
140 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; | |
141 | } | |
142 | ||
20d666e4 CH |
143 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
144 | /* | |
145 | * These three functions are only for dma allocator. | |
146 | * Don't use them in device drivers. | |
147 | */ | |
148 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | |
149 | dma_addr_t *dma_handle, void **ret); | |
150 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr); | |
151 | ||
152 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | |
153 | void *cpu_addr, size_t size, int *ret); | |
154 | #else | |
155 | #define dma_alloc_from_coherent(dev, size, handle, ret) (0) | |
156 | #define dma_release_from_coherent(dev, order, vaddr) (0) | |
157 | #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) | |
158 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ | |
159 | ||
1b0fac45 | 160 | #ifdef CONFIG_HAS_DMA |
1da177e4 | 161 | #include <asm/dma-mapping.h> |
1b0fac45 | 162 | #else |
e1c7e324 CH |
163 | /* |
164 | * Define the dma api to allow compilation but not linking of | |
165 | * dma dependent code. Code that depends on the dma-mapping | |
166 | * API needs to set 'depends on HAS_DMA' in its Kconfig | |
167 | */ | |
168 | extern struct dma_map_ops bad_dma_ops; | |
169 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |
170 | { | |
171 | return &bad_dma_ops; | |
172 | } | |
173 | #endif | |
174 | ||
175 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | |
176 | size_t size, | |
177 | enum dma_data_direction dir, | |
00085f1e | 178 | unsigned long attrs) |
e1c7e324 CH |
179 | { |
180 | struct dma_map_ops *ops = get_dma_ops(dev); | |
181 | dma_addr_t addr; | |
182 | ||
183 | kmemcheck_mark_initialized(ptr, size); | |
184 | BUG_ON(!valid_dma_direction(dir)); | |
185 | addr = ops->map_page(dev, virt_to_page(ptr), | |
8e99469a | 186 | offset_in_page(ptr), size, |
e1c7e324 CH |
187 | dir, attrs); |
188 | debug_dma_map_page(dev, virt_to_page(ptr), | |
8e99469a | 189 | offset_in_page(ptr), size, |
e1c7e324 CH |
190 | dir, addr, true); |
191 | return addr; | |
192 | } | |
193 | ||
194 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | |
195 | size_t size, | |
196 | enum dma_data_direction dir, | |
00085f1e | 197 | unsigned long attrs) |
e1c7e324 CH |
198 | { |
199 | struct dma_map_ops *ops = get_dma_ops(dev); | |
200 | ||
201 | BUG_ON(!valid_dma_direction(dir)); | |
202 | if (ops->unmap_page) | |
203 | ops->unmap_page(dev, addr, size, dir, attrs); | |
204 | debug_dma_unmap_page(dev, addr, size, dir, true); | |
205 | } | |
206 | ||
207 | /* | |
208 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. | |
209 | * It should never return a value < 0. | |
210 | */ | |
211 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
212 | int nents, enum dma_data_direction dir, | |
00085f1e | 213 | unsigned long attrs) |
e1c7e324 CH |
214 | { |
215 | struct dma_map_ops *ops = get_dma_ops(dev); | |
216 | int i, ents; | |
217 | struct scatterlist *s; | |
218 | ||
219 | for_each_sg(sg, s, nents, i) | |
220 | kmemcheck_mark_initialized(sg_virt(s), s->length); | |
221 | BUG_ON(!valid_dma_direction(dir)); | |
222 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | |
223 | BUG_ON(ents < 0); | |
224 | debug_dma_map_sg(dev, sg, nents, ents, dir); | |
225 | ||
226 | return ents; | |
227 | } | |
228 | ||
229 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | |
230 | int nents, enum dma_data_direction dir, | |
00085f1e | 231 | unsigned long attrs) |
e1c7e324 CH |
232 | { |
233 | struct dma_map_ops *ops = get_dma_ops(dev); | |
234 | ||
235 | BUG_ON(!valid_dma_direction(dir)); | |
236 | debug_dma_unmap_sg(dev, sg, nents, dir); | |
237 | if (ops->unmap_sg) | |
238 | ops->unmap_sg(dev, sg, nents, dir, attrs); | |
239 | } | |
240 | ||
241 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
242 | size_t offset, size_t size, | |
243 | enum dma_data_direction dir) | |
244 | { | |
245 | struct dma_map_ops *ops = get_dma_ops(dev); | |
246 | dma_addr_t addr; | |
247 | ||
248 | kmemcheck_mark_initialized(page_address(page) + offset, size); | |
249 | BUG_ON(!valid_dma_direction(dir)); | |
00085f1e | 250 | addr = ops->map_page(dev, page, offset, size, dir, 0); |
e1c7e324 CH |
251 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
252 | ||
253 | return addr; | |
254 | } | |
255 | ||
256 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | |
257 | size_t size, enum dma_data_direction dir) | |
258 | { | |
259 | struct dma_map_ops *ops = get_dma_ops(dev); | |
260 | ||
261 | BUG_ON(!valid_dma_direction(dir)); | |
262 | if (ops->unmap_page) | |
00085f1e | 263 | ops->unmap_page(dev, addr, size, dir, 0); |
e1c7e324 CH |
264 | debug_dma_unmap_page(dev, addr, size, dir, false); |
265 | } | |
266 | ||
267 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |
268 | size_t size, | |
269 | enum dma_data_direction dir) | |
270 | { | |
271 | struct dma_map_ops *ops = get_dma_ops(dev); | |
272 | ||
273 | BUG_ON(!valid_dma_direction(dir)); | |
274 | if (ops->sync_single_for_cpu) | |
275 | ops->sync_single_for_cpu(dev, addr, size, dir); | |
276 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | |
277 | } | |
278 | ||
279 | static inline void dma_sync_single_for_device(struct device *dev, | |
280 | dma_addr_t addr, size_t size, | |
281 | enum dma_data_direction dir) | |
282 | { | |
283 | struct dma_map_ops *ops = get_dma_ops(dev); | |
284 | ||
285 | BUG_ON(!valid_dma_direction(dir)); | |
286 | if (ops->sync_single_for_device) | |
287 | ops->sync_single_for_device(dev, addr, size, dir); | |
288 | debug_dma_sync_single_for_device(dev, addr, size, dir); | |
289 | } | |
290 | ||
291 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
292 | dma_addr_t addr, | |
293 | unsigned long offset, | |
294 | size_t size, | |
295 | enum dma_data_direction dir) | |
296 | { | |
297 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
298 | ||
299 | BUG_ON(!valid_dma_direction(dir)); | |
300 | if (ops->sync_single_for_cpu) | |
301 | ops->sync_single_for_cpu(dev, addr + offset, size, dir); | |
302 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | |
303 | } | |
304 | ||
305 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
306 | dma_addr_t addr, | |
307 | unsigned long offset, | |
308 | size_t size, | |
309 | enum dma_data_direction dir) | |
310 | { | |
311 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
312 | ||
313 | BUG_ON(!valid_dma_direction(dir)); | |
314 | if (ops->sync_single_for_device) | |
315 | ops->sync_single_for_device(dev, addr + offset, size, dir); | |
316 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | |
317 | } | |
318 | ||
319 | static inline void | |
320 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
321 | int nelems, enum dma_data_direction dir) | |
322 | { | |
323 | struct dma_map_ops *ops = get_dma_ops(dev); | |
324 | ||
325 | BUG_ON(!valid_dma_direction(dir)); | |
326 | if (ops->sync_sg_for_cpu) | |
327 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | |
328 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | |
329 | } | |
330 | ||
331 | static inline void | |
332 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
333 | int nelems, enum dma_data_direction dir) | |
334 | { | |
335 | struct dma_map_ops *ops = get_dma_ops(dev); | |
336 | ||
337 | BUG_ON(!valid_dma_direction(dir)); | |
338 | if (ops->sync_sg_for_device) | |
339 | ops->sync_sg_for_device(dev, sg, nelems, dir); | |
340 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | |
341 | ||
342 | } | |
343 | ||
00085f1e KK |
344 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
345 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) | |
346 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) | |
347 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) | |
e1c7e324 CH |
348 | |
349 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |
350 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | |
351 | ||
352 | void *dma_common_contiguous_remap(struct page *page, size_t size, | |
353 | unsigned long vm_flags, | |
354 | pgprot_t prot, const void *caller); | |
355 | ||
356 | void *dma_common_pages_remap(struct page **pages, size_t size, | |
357 | unsigned long vm_flags, pgprot_t prot, | |
358 | const void *caller); | |
359 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | |
360 | ||
361 | /** | |
362 | * dma_mmap_attrs - map a coherent DMA allocation into user space | |
363 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
364 | * @vma: vm_area_struct describing requested user mapping | |
365 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | |
366 | * @handle: device-view address returned from dma_alloc_attrs | |
367 | * @size: size of memory originally requested in dma_alloc_attrs | |
368 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | |
369 | * | |
370 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs | |
371 | * into user space. The coherent DMA buffer must not be freed by the | |
372 | * driver until the user space mapping has been released. | |
373 | */ | |
374 | static inline int | |
375 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | |
00085f1e | 376 | dma_addr_t dma_addr, size_t size, unsigned long attrs) |
e1c7e324 CH |
377 | { |
378 | struct dma_map_ops *ops = get_dma_ops(dev); | |
379 | BUG_ON(!ops); | |
380 | if (ops->mmap) | |
381 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | |
382 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | |
383 | } | |
384 | ||
00085f1e | 385 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) |
e1c7e324 CH |
386 | |
387 | int | |
388 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | |
389 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | |
390 | ||
391 | static inline int | |
392 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | |
00085f1e KK |
393 | dma_addr_t dma_addr, size_t size, |
394 | unsigned long attrs) | |
e1c7e324 CH |
395 | { |
396 | struct dma_map_ops *ops = get_dma_ops(dev); | |
397 | BUG_ON(!ops); | |
398 | if (ops->get_sgtable) | |
399 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | |
400 | attrs); | |
401 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); | |
402 | } | |
403 | ||
00085f1e | 404 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
e1c7e324 CH |
405 | |
406 | #ifndef arch_dma_alloc_attrs | |
407 | #define arch_dma_alloc_attrs(dev, flag) (true) | |
408 | #endif | |
409 | ||
410 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
411 | dma_addr_t *dma_handle, gfp_t flag, | |
00085f1e | 412 | unsigned long attrs) |
e1c7e324 CH |
413 | { |
414 | struct dma_map_ops *ops = get_dma_ops(dev); | |
415 | void *cpu_addr; | |
416 | ||
417 | BUG_ON(!ops); | |
418 | ||
419 | if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) | |
420 | return cpu_addr; | |
421 | ||
422 | if (!arch_dma_alloc_attrs(&dev, &flag)) | |
423 | return NULL; | |
424 | if (!ops->alloc) | |
425 | return NULL; | |
426 | ||
427 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | |
428 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | |
429 | return cpu_addr; | |
430 | } | |
431 | ||
432 | static inline void dma_free_attrs(struct device *dev, size_t size, | |
433 | void *cpu_addr, dma_addr_t dma_handle, | |
00085f1e | 434 | unsigned long attrs) |
e1c7e324 CH |
435 | { |
436 | struct dma_map_ops *ops = get_dma_ops(dev); | |
437 | ||
438 | BUG_ON(!ops); | |
439 | WARN_ON(irqs_disabled()); | |
440 | ||
441 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | |
442 | return; | |
443 | ||
d6b7eaeb | 444 | if (!ops->free || !cpu_addr) |
e1c7e324 CH |
445 | return; |
446 | ||
447 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
448 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | |
449 | } | |
450 | ||
451 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
452 | dma_addr_t *dma_handle, gfp_t flag) | |
453 | { | |
00085f1e | 454 | return dma_alloc_attrs(dev, size, dma_handle, flag, 0); |
e1c7e324 CH |
455 | } |
456 | ||
457 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
458 | void *cpu_addr, dma_addr_t dma_handle) | |
459 | { | |
00085f1e | 460 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
e1c7e324 CH |
461 | } |
462 | ||
463 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
464 | dma_addr_t *dma_handle, gfp_t gfp) | |
465 | { | |
00085f1e KK |
466 | return dma_alloc_attrs(dev, size, dma_handle, gfp, |
467 | DMA_ATTR_NON_CONSISTENT); | |
e1c7e324 CH |
468 | } |
469 | ||
470 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | |
471 | void *cpu_addr, dma_addr_t dma_handle) | |
472 | { | |
00085f1e KK |
473 | dma_free_attrs(dev, size, cpu_addr, dma_handle, |
474 | DMA_ATTR_NON_CONSISTENT); | |
e1c7e324 CH |
475 | } |
476 | ||
477 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
478 | { | |
479 | debug_dma_mapping_error(dev, dma_addr); | |
480 | ||
481 | if (get_dma_ops(dev)->mapping_error) | |
482 | return get_dma_ops(dev)->mapping_error(dev, dma_addr); | |
483 | ||
484 | #ifdef DMA_ERROR_CODE | |
485 | return dma_addr == DMA_ERROR_CODE; | |
486 | #else | |
487 | return 0; | |
488 | #endif | |
489 | } | |
490 | ||
491 | #ifndef HAVE_ARCH_DMA_SUPPORTED | |
492 | static inline int dma_supported(struct device *dev, u64 mask) | |
493 | { | |
494 | struct dma_map_ops *ops = get_dma_ops(dev); | |
495 | ||
496 | if (!ops) | |
497 | return 0; | |
498 | if (!ops->dma_supported) | |
499 | return 1; | |
500 | return ops->dma_supported(dev, mask); | |
501 | } | |
502 | #endif | |
503 | ||
504 | #ifndef HAVE_ARCH_DMA_SET_MASK | |
505 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
506 | { | |
507 | struct dma_map_ops *ops = get_dma_ops(dev); | |
508 | ||
509 | if (ops->set_dma_mask) | |
510 | return ops->set_dma_mask(dev, mask); | |
511 | ||
512 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
513 | return -EIO; | |
514 | *dev->dma_mask = mask; | |
515 | return 0; | |
516 | } | |
1b0fac45 | 517 | #endif |
1da177e4 | 518 | |
589fc9a6 FT |
519 | static inline u64 dma_get_mask(struct device *dev) |
520 | { | |
07a2c01a | 521 | if (dev && dev->dma_mask && *dev->dma_mask) |
589fc9a6 | 522 | return *dev->dma_mask; |
284901a9 | 523 | return DMA_BIT_MASK(32); |
589fc9a6 FT |
524 | } |
525 | ||
58af4a24 | 526 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
710224fa FT |
527 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
528 | #else | |
6a1961f4 FT |
529 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
530 | { | |
531 | if (!dma_supported(dev, mask)) | |
532 | return -EIO; | |
533 | dev->coherent_dma_mask = mask; | |
534 | return 0; | |
535 | } | |
710224fa | 536 | #endif |
6a1961f4 | 537 | |
4aa806b7 RK |
538 | /* |
539 | * Set both the DMA mask and the coherent DMA mask to the same thing. | |
540 | * Note that we don't check the return value from dma_set_coherent_mask() | |
541 | * as the DMA API guarantees that the coherent DMA mask can be set to | |
542 | * the same or smaller than the streaming DMA mask. | |
543 | */ | |
544 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) | |
545 | { | |
546 | int rc = dma_set_mask(dev, mask); | |
547 | if (rc == 0) | |
548 | dma_set_coherent_mask(dev, mask); | |
549 | return rc; | |
550 | } | |
551 | ||
fa6a8d6d RK |
552 | /* |
553 | * Similar to the above, except it deals with the case where the device | |
554 | * does not have dev->dma_mask appropriately setup. | |
555 | */ | |
556 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |
557 | { | |
558 | dev->dma_mask = &dev->coherent_dma_mask; | |
559 | return dma_set_mask_and_coherent(dev, mask); | |
560 | } | |
561 | ||
1da177e4 LT |
562 | extern u64 dma_get_required_mask(struct device *dev); |
563 | ||
a3a60f81 | 564 | #ifndef arch_setup_dma_ops |
97890ba9 | 565 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
53c92d79 | 566 | u64 size, const struct iommu_ops *iommu, |
97890ba9 WD |
567 | bool coherent) { } |
568 | #endif | |
569 | ||
570 | #ifndef arch_teardown_dma_ops | |
571 | static inline void arch_teardown_dma_ops(struct device *dev) { } | |
591c1ee4 SS |
572 | #endif |
573 | ||
6b7b6510 FT |
574 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
575 | { | |
002edb6f RM |
576 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
577 | return dev->dma_parms->max_segment_size; | |
578 | return SZ_64K; | |
6b7b6510 FT |
579 | } |
580 | ||
581 | static inline unsigned int dma_set_max_seg_size(struct device *dev, | |
582 | unsigned int size) | |
583 | { | |
584 | if (dev->dma_parms) { | |
585 | dev->dma_parms->max_segment_size = size; | |
586 | return 0; | |
002edb6f RM |
587 | } |
588 | return -EIO; | |
6b7b6510 FT |
589 | } |
590 | ||
d22a6966 FT |
591 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
592 | { | |
002edb6f RM |
593 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
594 | return dev->dma_parms->segment_boundary_mask; | |
595 | return DMA_BIT_MASK(32); | |
d22a6966 FT |
596 | } |
597 | ||
598 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |
599 | { | |
600 | if (dev->dma_parms) { | |
601 | dev->dma_parms->segment_boundary_mask = mask; | |
602 | return 0; | |
002edb6f RM |
603 | } |
604 | return -EIO; | |
d22a6966 FT |
605 | } |
606 | ||
00c8f162 SS |
607 | #ifndef dma_max_pfn |
608 | static inline unsigned long dma_max_pfn(struct device *dev) | |
609 | { | |
610 | return *dev->dma_mask >> PAGE_SHIFT; | |
611 | } | |
612 | #endif | |
613 | ||
842fa69f AM |
614 | static inline void *dma_zalloc_coherent(struct device *dev, size_t size, |
615 | dma_addr_t *dma_handle, gfp_t flag) | |
616 | { | |
ede23fa8 JP |
617 | void *ret = dma_alloc_coherent(dev, size, dma_handle, |
618 | flag | __GFP_ZERO); | |
842fa69f AM |
619 | return ret; |
620 | } | |
621 | ||
e259f191 | 622 | #ifdef CONFIG_HAS_DMA |
4565f017 FT |
623 | static inline int dma_get_cache_alignment(void) |
624 | { | |
625 | #ifdef ARCH_DMA_MINALIGN | |
626 | return ARCH_DMA_MINALIGN; | |
627 | #endif | |
628 | return 1; | |
629 | } | |
e259f191 | 630 | #endif |
4565f017 | 631 | |
1da177e4 LT |
632 | /* flags for the coherent memory api */ |
633 | #define DMA_MEMORY_MAP 0x01 | |
634 | #define DMA_MEMORY_IO 0x02 | |
635 | #define DMA_MEMORY_INCLUDES_CHILDREN 0x04 | |
636 | #define DMA_MEMORY_EXCLUSIVE 0x08 | |
637 | ||
20d666e4 CH |
638 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
639 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
640 | dma_addr_t device_addr, size_t size, int flags); | |
641 | void dma_release_declared_memory(struct device *dev); | |
642 | void *dma_mark_declared_memory_occupied(struct device *dev, | |
643 | dma_addr_t device_addr, size_t size); | |
644 | #else | |
1da177e4 | 645 | static inline int |
88a984ba | 646 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
1da177e4 LT |
647 | dma_addr_t device_addr, size_t size, int flags) |
648 | { | |
649 | return 0; | |
650 | } | |
651 | ||
652 | static inline void | |
653 | dma_release_declared_memory(struct device *dev) | |
654 | { | |
655 | } | |
656 | ||
657 | static inline void * | |
658 | dma_mark_declared_memory_occupied(struct device *dev, | |
659 | dma_addr_t device_addr, size_t size) | |
660 | { | |
661 | return ERR_PTR(-EBUSY); | |
662 | } | |
20d666e4 | 663 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
1da177e4 | 664 | |
9ac7849e TH |
665 | /* |
666 | * Managed DMA API | |
667 | */ | |
668 | extern void *dmam_alloc_coherent(struct device *dev, size_t size, | |
669 | dma_addr_t *dma_handle, gfp_t gfp); | |
670 | extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
671 | dma_addr_t dma_handle); | |
672 | extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, | |
673 | dma_addr_t *dma_handle, gfp_t gfp); | |
674 | extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
675 | dma_addr_t dma_handle); | |
20d666e4 | 676 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
88a984ba BH |
677 | extern int dmam_declare_coherent_memory(struct device *dev, |
678 | phys_addr_t phys_addr, | |
9ac7849e TH |
679 | dma_addr_t device_addr, size_t size, |
680 | int flags); | |
681 | extern void dmam_release_declared_memory(struct device *dev); | |
20d666e4 | 682 | #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
9ac7849e | 683 | static inline int dmam_declare_coherent_memory(struct device *dev, |
88a984ba | 684 | phys_addr_t phys_addr, dma_addr_t device_addr, |
9ac7849e TH |
685 | size_t size, gfp_t gfp) |
686 | { | |
687 | return 0; | |
688 | } | |
1da177e4 | 689 | |
9ac7849e TH |
690 | static inline void dmam_release_declared_memory(struct device *dev) |
691 | { | |
692 | } | |
20d666e4 | 693 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
1da177e4 | 694 | |
f6e45661 LR |
695 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
696 | dma_addr_t *dma_addr, gfp_t gfp) | |
b4bbb107 | 697 | { |
00085f1e KK |
698 | return dma_alloc_attrs(dev, size, dma_addr, gfp, |
699 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 700 | } |
f6e45661 LR |
701 | #ifndef dma_alloc_writecombine |
702 | #define dma_alloc_writecombine dma_alloc_wc | |
703 | #endif | |
b4bbb107 | 704 | |
f6e45661 LR |
705 | static inline void dma_free_wc(struct device *dev, size_t size, |
706 | void *cpu_addr, dma_addr_t dma_addr) | |
b4bbb107 | 707 | { |
00085f1e KK |
708 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, |
709 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 710 | } |
f6e45661 LR |
711 | #ifndef dma_free_writecombine |
712 | #define dma_free_writecombine dma_free_wc | |
713 | #endif | |
b4bbb107 | 714 | |
f6e45661 LR |
715 | static inline int dma_mmap_wc(struct device *dev, |
716 | struct vm_area_struct *vma, | |
717 | void *cpu_addr, dma_addr_t dma_addr, | |
718 | size_t size) | |
b4bbb107 | 719 | { |
00085f1e KK |
720 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
721 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 722 | } |
f6e45661 LR |
723 | #ifndef dma_mmap_writecombine |
724 | #define dma_mmap_writecombine dma_mmap_wc | |
725 | #endif | |
74bc7cee | 726 | |
0acedc12 FT |
727 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
728 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME | |
729 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | |
730 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | |
731 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) | |
732 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) | |
733 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) | |
734 | #else | |
735 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) | |
736 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) | |
737 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) | |
738 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | |
739 | #define dma_unmap_len(PTR, LEN_NAME) (0) | |
740 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | |
741 | #endif | |
742 | ||
9ac7849e | 743 | #endif |