]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/dma-mapping.h
dma-mapping: use unsigned long for dma_attrs
[mirror_ubuntu-artful-kernel.git] / include / linux / dma-mapping.h
CommitLineData
96532bab
RD
1#ifndef _LINUX_DMA_MAPPING_H
2#define _LINUX_DMA_MAPPING_H
1da177e4 3
002edb6f 4#include <linux/sizes.h>
842fa69f 5#include <linux/string.h>
1da177e4
LT
6#include <linux/device.h>
7#include <linux/err.h>
e1c7e324 8#include <linux/dma-debug.h>
b7f080cf 9#include <linux/dma-direction.h>
f0402a26 10#include <linux/scatterlist.h>
e1c7e324
CH
11#include <linux/kmemcheck.h>
12#include <linux/bug.h>
1da177e4 13
00085f1e
KK
14/**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22/*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27/*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32/*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37/*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42/*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48/*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53/*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
59
77f2ea2f
BH
60/*
61 * A dma_addr_t can hold any valid DMA or bus address for the platform.
62 * It can be given to a device to use as a DMA source or target. A CPU cannot
63 * reference a dma_addr_t directly because there may be translation between
64 * its physical address space and the bus address space.
65 */
f0402a26 66struct dma_map_ops {
613c4578
MS
67 void* (*alloc)(struct device *dev, size_t size,
68 dma_addr_t *dma_handle, gfp_t gfp,
00085f1e 69 unsigned long attrs);
613c4578
MS
70 void (*free)(struct device *dev, size_t size,
71 void *vaddr, dma_addr_t dma_handle,
00085f1e 72 unsigned long attrs);
9adc5374 73 int (*mmap)(struct device *, struct vm_area_struct *,
00085f1e
KK
74 void *, dma_addr_t, size_t,
75 unsigned long attrs);
9adc5374 76
d2b7428e 77 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
00085f1e 78 dma_addr_t, size_t, unsigned long attrs);
d2b7428e 79
f0402a26
FT
80 dma_addr_t (*map_page)(struct device *dev, struct page *page,
81 unsigned long offset, size_t size,
82 enum dma_data_direction dir,
00085f1e 83 unsigned long attrs);
f0402a26
FT
84 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
85 size_t size, enum dma_data_direction dir,
00085f1e 86 unsigned long attrs);
04abab69
RRD
87 /*
88 * map_sg returns 0 on error and a value > 0 on success.
89 * It should never return a value < 0.
90 */
f0402a26
FT
91 int (*map_sg)(struct device *dev, struct scatterlist *sg,
92 int nents, enum dma_data_direction dir,
00085f1e 93 unsigned long attrs);
f0402a26
FT
94 void (*unmap_sg)(struct device *dev,
95 struct scatterlist *sg, int nents,
96 enum dma_data_direction dir,
00085f1e 97 unsigned long attrs);
f0402a26
FT
98 void (*sync_single_for_cpu)(struct device *dev,
99 dma_addr_t dma_handle, size_t size,
100 enum dma_data_direction dir);
101 void (*sync_single_for_device)(struct device *dev,
102 dma_addr_t dma_handle, size_t size,
103 enum dma_data_direction dir);
f0402a26
FT
104 void (*sync_sg_for_cpu)(struct device *dev,
105 struct scatterlist *sg, int nents,
106 enum dma_data_direction dir);
107 void (*sync_sg_for_device)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir);
110 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
111 int (*dma_supported)(struct device *dev, u64 mask);
f726f30e 112 int (*set_dma_mask)(struct device *dev, u64 mask);
3a8f7558
MM
113#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
114 u64 (*get_required_mask)(struct device *dev);
115#endif
f0402a26
FT
116 int is_phys;
117};
118
a8463d4b
CB
119extern struct dma_map_ops dma_noop_ops;
120
8f286c33 121#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
34c65384 122
32e8f702
JB
123#define DMA_MASK_NONE 0x0ULL
124
d6bd3a39
REB
125static inline int valid_dma_direction(int dma_direction)
126{
127 return ((dma_direction == DMA_BIDIRECTIONAL) ||
128 (dma_direction == DMA_TO_DEVICE) ||
129 (dma_direction == DMA_FROM_DEVICE));
130}
131
32e8f702
JB
132static inline int is_device_dma_capable(struct device *dev)
133{
134 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
135}
136
20d666e4
CH
137#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
138/*
139 * These three functions are only for dma allocator.
140 * Don't use them in device drivers.
141 */
142int dma_alloc_from_coherent(struct device *dev, ssize_t size,
143 dma_addr_t *dma_handle, void **ret);
144int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
145
146int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
147 void *cpu_addr, size_t size, int *ret);
148#else
149#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
150#define dma_release_from_coherent(dev, order, vaddr) (0)
151#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
152#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
153
1b0fac45 154#ifdef CONFIG_HAS_DMA
1da177e4 155#include <asm/dma-mapping.h>
1b0fac45 156#else
e1c7e324
CH
157/*
158 * Define the dma api to allow compilation but not linking of
159 * dma dependent code. Code that depends on the dma-mapping
160 * API needs to set 'depends on HAS_DMA' in its Kconfig
161 */
162extern struct dma_map_ops bad_dma_ops;
163static inline struct dma_map_ops *get_dma_ops(struct device *dev)
164{
165 return &bad_dma_ops;
166}
167#endif
168
169static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
170 size_t size,
171 enum dma_data_direction dir,
00085f1e 172 unsigned long attrs)
e1c7e324
CH
173{
174 struct dma_map_ops *ops = get_dma_ops(dev);
175 dma_addr_t addr;
176
177 kmemcheck_mark_initialized(ptr, size);
178 BUG_ON(!valid_dma_direction(dir));
179 addr = ops->map_page(dev, virt_to_page(ptr),
8e99469a 180 offset_in_page(ptr), size,
e1c7e324
CH
181 dir, attrs);
182 debug_dma_map_page(dev, virt_to_page(ptr),
8e99469a 183 offset_in_page(ptr), size,
e1c7e324
CH
184 dir, addr, true);
185 return addr;
186}
187
188static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
189 size_t size,
190 enum dma_data_direction dir,
00085f1e 191 unsigned long attrs)
e1c7e324
CH
192{
193 struct dma_map_ops *ops = get_dma_ops(dev);
194
195 BUG_ON(!valid_dma_direction(dir));
196 if (ops->unmap_page)
197 ops->unmap_page(dev, addr, size, dir, attrs);
198 debug_dma_unmap_page(dev, addr, size, dir, true);
199}
200
201/*
202 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
203 * It should never return a value < 0.
204 */
205static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
206 int nents, enum dma_data_direction dir,
00085f1e 207 unsigned long attrs)
e1c7e324
CH
208{
209 struct dma_map_ops *ops = get_dma_ops(dev);
210 int i, ents;
211 struct scatterlist *s;
212
213 for_each_sg(sg, s, nents, i)
214 kmemcheck_mark_initialized(sg_virt(s), s->length);
215 BUG_ON(!valid_dma_direction(dir));
216 ents = ops->map_sg(dev, sg, nents, dir, attrs);
217 BUG_ON(ents < 0);
218 debug_dma_map_sg(dev, sg, nents, ents, dir);
219
220 return ents;
221}
222
223static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
224 int nents, enum dma_data_direction dir,
00085f1e 225 unsigned long attrs)
e1c7e324
CH
226{
227 struct dma_map_ops *ops = get_dma_ops(dev);
228
229 BUG_ON(!valid_dma_direction(dir));
230 debug_dma_unmap_sg(dev, sg, nents, dir);
231 if (ops->unmap_sg)
232 ops->unmap_sg(dev, sg, nents, dir, attrs);
233}
234
235static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
236 size_t offset, size_t size,
237 enum dma_data_direction dir)
238{
239 struct dma_map_ops *ops = get_dma_ops(dev);
240 dma_addr_t addr;
241
242 kmemcheck_mark_initialized(page_address(page) + offset, size);
243 BUG_ON(!valid_dma_direction(dir));
00085f1e 244 addr = ops->map_page(dev, page, offset, size, dir, 0);
e1c7e324
CH
245 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
246
247 return addr;
248}
249
250static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
251 size_t size, enum dma_data_direction dir)
252{
253 struct dma_map_ops *ops = get_dma_ops(dev);
254
255 BUG_ON(!valid_dma_direction(dir));
256 if (ops->unmap_page)
00085f1e 257 ops->unmap_page(dev, addr, size, dir, 0);
e1c7e324
CH
258 debug_dma_unmap_page(dev, addr, size, dir, false);
259}
260
261static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
262 size_t size,
263 enum dma_data_direction dir)
264{
265 struct dma_map_ops *ops = get_dma_ops(dev);
266
267 BUG_ON(!valid_dma_direction(dir));
268 if (ops->sync_single_for_cpu)
269 ops->sync_single_for_cpu(dev, addr, size, dir);
270 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
271}
272
273static inline void dma_sync_single_for_device(struct device *dev,
274 dma_addr_t addr, size_t size,
275 enum dma_data_direction dir)
276{
277 struct dma_map_ops *ops = get_dma_ops(dev);
278
279 BUG_ON(!valid_dma_direction(dir));
280 if (ops->sync_single_for_device)
281 ops->sync_single_for_device(dev, addr, size, dir);
282 debug_dma_sync_single_for_device(dev, addr, size, dir);
283}
284
285static inline void dma_sync_single_range_for_cpu(struct device *dev,
286 dma_addr_t addr,
287 unsigned long offset,
288 size_t size,
289 enum dma_data_direction dir)
290{
291 const struct dma_map_ops *ops = get_dma_ops(dev);
292
293 BUG_ON(!valid_dma_direction(dir));
294 if (ops->sync_single_for_cpu)
295 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
296 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
297}
298
299static inline void dma_sync_single_range_for_device(struct device *dev,
300 dma_addr_t addr,
301 unsigned long offset,
302 size_t size,
303 enum dma_data_direction dir)
304{
305 const struct dma_map_ops *ops = get_dma_ops(dev);
306
307 BUG_ON(!valid_dma_direction(dir));
308 if (ops->sync_single_for_device)
309 ops->sync_single_for_device(dev, addr + offset, size, dir);
310 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
311}
312
313static inline void
314dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
315 int nelems, enum dma_data_direction dir)
316{
317 struct dma_map_ops *ops = get_dma_ops(dev);
318
319 BUG_ON(!valid_dma_direction(dir));
320 if (ops->sync_sg_for_cpu)
321 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
322 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
323}
324
325static inline void
326dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
327 int nelems, enum dma_data_direction dir)
328{
329 struct dma_map_ops *ops = get_dma_ops(dev);
330
331 BUG_ON(!valid_dma_direction(dir));
332 if (ops->sync_sg_for_device)
333 ops->sync_sg_for_device(dev, sg, nelems, dir);
334 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
335
336}
337
00085f1e
KK
338#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
339#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
340#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
341#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
e1c7e324
CH
342
343extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
344 void *cpu_addr, dma_addr_t dma_addr, size_t size);
345
346void *dma_common_contiguous_remap(struct page *page, size_t size,
347 unsigned long vm_flags,
348 pgprot_t prot, const void *caller);
349
350void *dma_common_pages_remap(struct page **pages, size_t size,
351 unsigned long vm_flags, pgprot_t prot,
352 const void *caller);
353void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
354
355/**
356 * dma_mmap_attrs - map a coherent DMA allocation into user space
357 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
358 * @vma: vm_area_struct describing requested user mapping
359 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
360 * @handle: device-view address returned from dma_alloc_attrs
361 * @size: size of memory originally requested in dma_alloc_attrs
362 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
363 *
364 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
365 * into user space. The coherent DMA buffer must not be freed by the
366 * driver until the user space mapping has been released.
367 */
368static inline int
369dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
00085f1e 370 dma_addr_t dma_addr, size_t size, unsigned long attrs)
e1c7e324
CH
371{
372 struct dma_map_ops *ops = get_dma_ops(dev);
373 BUG_ON(!ops);
374 if (ops->mmap)
375 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
376 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
377}
378
00085f1e 379#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
e1c7e324
CH
380
381int
382dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
383 void *cpu_addr, dma_addr_t dma_addr, size_t size);
384
385static inline int
386dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
00085f1e
KK
387 dma_addr_t dma_addr, size_t size,
388 unsigned long attrs)
e1c7e324
CH
389{
390 struct dma_map_ops *ops = get_dma_ops(dev);
391 BUG_ON(!ops);
392 if (ops->get_sgtable)
393 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
394 attrs);
395 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
396}
397
00085f1e 398#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
e1c7e324
CH
399
400#ifndef arch_dma_alloc_attrs
401#define arch_dma_alloc_attrs(dev, flag) (true)
402#endif
403
404static inline void *dma_alloc_attrs(struct device *dev, size_t size,
405 dma_addr_t *dma_handle, gfp_t flag,
00085f1e 406 unsigned long attrs)
e1c7e324
CH
407{
408 struct dma_map_ops *ops = get_dma_ops(dev);
409 void *cpu_addr;
410
411 BUG_ON(!ops);
412
413 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
414 return cpu_addr;
415
416 if (!arch_dma_alloc_attrs(&dev, &flag))
417 return NULL;
418 if (!ops->alloc)
419 return NULL;
420
421 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
422 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
423 return cpu_addr;
424}
425
426static inline void dma_free_attrs(struct device *dev, size_t size,
427 void *cpu_addr, dma_addr_t dma_handle,
00085f1e 428 unsigned long attrs)
e1c7e324
CH
429{
430 struct dma_map_ops *ops = get_dma_ops(dev);
431
432 BUG_ON(!ops);
433 WARN_ON(irqs_disabled());
434
435 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
436 return;
437
d6b7eaeb 438 if (!ops->free || !cpu_addr)
e1c7e324
CH
439 return;
440
441 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
442 ops->free(dev, size, cpu_addr, dma_handle, attrs);
443}
444
445static inline void *dma_alloc_coherent(struct device *dev, size_t size,
446 dma_addr_t *dma_handle, gfp_t flag)
447{
00085f1e 448 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
e1c7e324
CH
449}
450
451static inline void dma_free_coherent(struct device *dev, size_t size,
452 void *cpu_addr, dma_addr_t dma_handle)
453{
00085f1e 454 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
e1c7e324
CH
455}
456
457static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
458 dma_addr_t *dma_handle, gfp_t gfp)
459{
00085f1e
KK
460 return dma_alloc_attrs(dev, size, dma_handle, gfp,
461 DMA_ATTR_NON_CONSISTENT);
e1c7e324
CH
462}
463
464static inline void dma_free_noncoherent(struct device *dev, size_t size,
465 void *cpu_addr, dma_addr_t dma_handle)
466{
00085f1e
KK
467 dma_free_attrs(dev, size, cpu_addr, dma_handle,
468 DMA_ATTR_NON_CONSISTENT);
e1c7e324
CH
469}
470
471static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
472{
473 debug_dma_mapping_error(dev, dma_addr);
474
475 if (get_dma_ops(dev)->mapping_error)
476 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
477
478#ifdef DMA_ERROR_CODE
479 return dma_addr == DMA_ERROR_CODE;
480#else
481 return 0;
482#endif
483}
484
485#ifndef HAVE_ARCH_DMA_SUPPORTED
486static inline int dma_supported(struct device *dev, u64 mask)
487{
488 struct dma_map_ops *ops = get_dma_ops(dev);
489
490 if (!ops)
491 return 0;
492 if (!ops->dma_supported)
493 return 1;
494 return ops->dma_supported(dev, mask);
495}
496#endif
497
498#ifndef HAVE_ARCH_DMA_SET_MASK
499static inline int dma_set_mask(struct device *dev, u64 mask)
500{
501 struct dma_map_ops *ops = get_dma_ops(dev);
502
503 if (ops->set_dma_mask)
504 return ops->set_dma_mask(dev, mask);
505
506 if (!dev->dma_mask || !dma_supported(dev, mask))
507 return -EIO;
508 *dev->dma_mask = mask;
509 return 0;
510}
1b0fac45 511#endif
1da177e4 512
589fc9a6
FT
513static inline u64 dma_get_mask(struct device *dev)
514{
07a2c01a 515 if (dev && dev->dma_mask && *dev->dma_mask)
589fc9a6 516 return *dev->dma_mask;
284901a9 517 return DMA_BIT_MASK(32);
589fc9a6
FT
518}
519
58af4a24 520#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
710224fa
FT
521int dma_set_coherent_mask(struct device *dev, u64 mask);
522#else
6a1961f4
FT
523static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
524{
525 if (!dma_supported(dev, mask))
526 return -EIO;
527 dev->coherent_dma_mask = mask;
528 return 0;
529}
710224fa 530#endif
6a1961f4 531
4aa806b7
RK
532/*
533 * Set both the DMA mask and the coherent DMA mask to the same thing.
534 * Note that we don't check the return value from dma_set_coherent_mask()
535 * as the DMA API guarantees that the coherent DMA mask can be set to
536 * the same or smaller than the streaming DMA mask.
537 */
538static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
539{
540 int rc = dma_set_mask(dev, mask);
541 if (rc == 0)
542 dma_set_coherent_mask(dev, mask);
543 return rc;
544}
545
fa6a8d6d
RK
546/*
547 * Similar to the above, except it deals with the case where the device
548 * does not have dev->dma_mask appropriately setup.
549 */
550static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
551{
552 dev->dma_mask = &dev->coherent_dma_mask;
553 return dma_set_mask_and_coherent(dev, mask);
554}
555
1da177e4
LT
556extern u64 dma_get_required_mask(struct device *dev);
557
a3a60f81 558#ifndef arch_setup_dma_ops
97890ba9 559static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
53c92d79 560 u64 size, const struct iommu_ops *iommu,
97890ba9
WD
561 bool coherent) { }
562#endif
563
564#ifndef arch_teardown_dma_ops
565static inline void arch_teardown_dma_ops(struct device *dev) { }
591c1ee4
SS
566#endif
567
6b7b6510
FT
568static inline unsigned int dma_get_max_seg_size(struct device *dev)
569{
002edb6f
RM
570 if (dev->dma_parms && dev->dma_parms->max_segment_size)
571 return dev->dma_parms->max_segment_size;
572 return SZ_64K;
6b7b6510
FT
573}
574
575static inline unsigned int dma_set_max_seg_size(struct device *dev,
576 unsigned int size)
577{
578 if (dev->dma_parms) {
579 dev->dma_parms->max_segment_size = size;
580 return 0;
002edb6f
RM
581 }
582 return -EIO;
6b7b6510
FT
583}
584
d22a6966
FT
585static inline unsigned long dma_get_seg_boundary(struct device *dev)
586{
002edb6f
RM
587 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
588 return dev->dma_parms->segment_boundary_mask;
589 return DMA_BIT_MASK(32);
d22a6966
FT
590}
591
592static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
593{
594 if (dev->dma_parms) {
595 dev->dma_parms->segment_boundary_mask = mask;
596 return 0;
002edb6f
RM
597 }
598 return -EIO;
d22a6966
FT
599}
600
00c8f162
SS
601#ifndef dma_max_pfn
602static inline unsigned long dma_max_pfn(struct device *dev)
603{
604 return *dev->dma_mask >> PAGE_SHIFT;
605}
606#endif
607
842fa69f
AM
608static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
609 dma_addr_t *dma_handle, gfp_t flag)
610{
ede23fa8
JP
611 void *ret = dma_alloc_coherent(dev, size, dma_handle,
612 flag | __GFP_ZERO);
842fa69f
AM
613 return ret;
614}
615
e259f191 616#ifdef CONFIG_HAS_DMA
4565f017
FT
617static inline int dma_get_cache_alignment(void)
618{
619#ifdef ARCH_DMA_MINALIGN
620 return ARCH_DMA_MINALIGN;
621#endif
622 return 1;
623}
e259f191 624#endif
4565f017 625
1da177e4
LT
626/* flags for the coherent memory api */
627#define DMA_MEMORY_MAP 0x01
628#define DMA_MEMORY_IO 0x02
629#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
630#define DMA_MEMORY_EXCLUSIVE 0x08
631
20d666e4
CH
632#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
633int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
634 dma_addr_t device_addr, size_t size, int flags);
635void dma_release_declared_memory(struct device *dev);
636void *dma_mark_declared_memory_occupied(struct device *dev,
637 dma_addr_t device_addr, size_t size);
638#else
1da177e4 639static inline int
88a984ba 640dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
1da177e4
LT
641 dma_addr_t device_addr, size_t size, int flags)
642{
643 return 0;
644}
645
646static inline void
647dma_release_declared_memory(struct device *dev)
648{
649}
650
651static inline void *
652dma_mark_declared_memory_occupied(struct device *dev,
653 dma_addr_t device_addr, size_t size)
654{
655 return ERR_PTR(-EBUSY);
656}
20d666e4 657#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
1da177e4 658
9ac7849e
TH
659/*
660 * Managed DMA API
661 */
662extern void *dmam_alloc_coherent(struct device *dev, size_t size,
663 dma_addr_t *dma_handle, gfp_t gfp);
664extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
665 dma_addr_t dma_handle);
666extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
667 dma_addr_t *dma_handle, gfp_t gfp);
668extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
669 dma_addr_t dma_handle);
20d666e4 670#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
88a984ba
BH
671extern int dmam_declare_coherent_memory(struct device *dev,
672 phys_addr_t phys_addr,
9ac7849e
TH
673 dma_addr_t device_addr, size_t size,
674 int flags);
675extern void dmam_release_declared_memory(struct device *dev);
20d666e4 676#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
9ac7849e 677static inline int dmam_declare_coherent_memory(struct device *dev,
88a984ba 678 phys_addr_t phys_addr, dma_addr_t device_addr,
9ac7849e
TH
679 size_t size, gfp_t gfp)
680{
681 return 0;
682}
1da177e4 683
9ac7849e
TH
684static inline void dmam_release_declared_memory(struct device *dev)
685{
686}
20d666e4 687#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
1da177e4 688
f6e45661
LR
689static inline void *dma_alloc_wc(struct device *dev, size_t size,
690 dma_addr_t *dma_addr, gfp_t gfp)
b4bbb107 691{
00085f1e
KK
692 return dma_alloc_attrs(dev, size, dma_addr, gfp,
693 DMA_ATTR_WRITE_COMBINE);
b4bbb107 694}
f6e45661
LR
695#ifndef dma_alloc_writecombine
696#define dma_alloc_writecombine dma_alloc_wc
697#endif
b4bbb107 698
f6e45661
LR
699static inline void dma_free_wc(struct device *dev, size_t size,
700 void *cpu_addr, dma_addr_t dma_addr)
b4bbb107 701{
00085f1e
KK
702 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
703 DMA_ATTR_WRITE_COMBINE);
b4bbb107 704}
f6e45661
LR
705#ifndef dma_free_writecombine
706#define dma_free_writecombine dma_free_wc
707#endif
b4bbb107 708
f6e45661
LR
709static inline int dma_mmap_wc(struct device *dev,
710 struct vm_area_struct *vma,
711 void *cpu_addr, dma_addr_t dma_addr,
712 size_t size)
b4bbb107 713{
00085f1e
KK
714 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
715 DMA_ATTR_WRITE_COMBINE);
b4bbb107 716}
f6e45661
LR
717#ifndef dma_mmap_writecombine
718#define dma_mmap_writecombine dma_mmap_wc
719#endif
74bc7cee 720
0acedc12
FT
721#ifdef CONFIG_NEED_DMA_MAP_STATE
722#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
723#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
724#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
725#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
726#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
727#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
728#else
729#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
730#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
731#define dma_unmap_addr(PTR, ADDR_NAME) (0)
732#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
733#define dma_unmap_len(PTR, LEN_NAME) (0)
734#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
735#endif
736
9ac7849e 737#endif