]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/dma-mapping.h
1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
63 #define DMA_ATTR_NO_WARN (1UL << 8)
66 * A dma_addr_t can hold any valid DMA or bus address for the platform.
67 * It can be given to a device to use as a DMA source or target. A CPU cannot
68 * reference a dma_addr_t directly because there may be translation between
69 * its physical address space and the bus address space.
72 void* (*alloc
)(struct device
*dev
, size_t size
,
73 dma_addr_t
*dma_handle
, gfp_t gfp
,
75 void (*free
)(struct device
*dev
, size_t size
,
76 void *vaddr
, dma_addr_t dma_handle
,
78 int (*mmap
)(struct device
*, struct vm_area_struct
*,
79 void *, dma_addr_t
, size_t,
82 int (*get_sgtable
)(struct device
*dev
, struct sg_table
*sgt
, void *,
83 dma_addr_t
, size_t, unsigned long attrs
);
85 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
86 unsigned long offset
, size_t size
,
87 enum dma_data_direction dir
,
89 void (*unmap_page
)(struct device
*dev
, dma_addr_t dma_handle
,
90 size_t size
, enum dma_data_direction dir
,
93 * map_sg returns 0 on error and a value > 0 on success.
94 * It should never return a value < 0.
96 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
97 int nents
, enum dma_data_direction dir
,
99 void (*unmap_sg
)(struct device
*dev
,
100 struct scatterlist
*sg
, int nents
,
101 enum dma_data_direction dir
,
102 unsigned long attrs
);
103 dma_addr_t (*map_resource
)(struct device
*dev
, phys_addr_t phys_addr
,
104 size_t size
, enum dma_data_direction dir
,
105 unsigned long attrs
);
106 void (*unmap_resource
)(struct device
*dev
, dma_addr_t dma_handle
,
107 size_t size
, enum dma_data_direction dir
,
108 unsigned long attrs
);
109 void (*sync_single_for_cpu
)(struct device
*dev
,
110 dma_addr_t dma_handle
, size_t size
,
111 enum dma_data_direction dir
);
112 void (*sync_single_for_device
)(struct device
*dev
,
113 dma_addr_t dma_handle
, size_t size
,
114 enum dma_data_direction dir
);
115 void (*sync_sg_for_cpu
)(struct device
*dev
,
116 struct scatterlist
*sg
, int nents
,
117 enum dma_data_direction dir
);
118 void (*sync_sg_for_device
)(struct device
*dev
,
119 struct scatterlist
*sg
, int nents
,
120 enum dma_data_direction dir
);
121 int (*mapping_error
)(struct device
*dev
, dma_addr_t dma_addr
);
122 int (*dma_supported
)(struct device
*dev
, u64 mask
);
123 int (*set_dma_mask
)(struct device
*dev
, u64 mask
);
124 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
125 u64 (*get_required_mask
)(struct device
*dev
);
130 extern struct dma_map_ops dma_noop_ops
;
132 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
134 #define DMA_MASK_NONE 0x0ULL
136 static inline int valid_dma_direction(int dma_direction
)
138 return ((dma_direction
== DMA_BIDIRECTIONAL
) ||
139 (dma_direction
== DMA_TO_DEVICE
) ||
140 (dma_direction
== DMA_FROM_DEVICE
));
143 static inline int is_device_dma_capable(struct device
*dev
)
145 return dev
->dma_mask
!= NULL
&& *dev
->dma_mask
!= DMA_MASK_NONE
;
148 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
150 * These three functions are only for dma allocator.
151 * Don't use them in device drivers.
153 int dma_alloc_from_coherent(struct device
*dev
, ssize_t size
,
154 dma_addr_t
*dma_handle
, void **ret
);
155 int dma_release_from_coherent(struct device
*dev
, int order
, void *vaddr
);
157 int dma_mmap_from_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
158 void *cpu_addr
, size_t size
, int *ret
);
160 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
161 #define dma_release_from_coherent(dev, order, vaddr) (0)
162 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
163 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
165 #ifdef CONFIG_HAS_DMA
166 #include <asm/dma-mapping.h>
169 * Define the dma api to allow compilation but not linking of
170 * dma dependent code. Code that depends on the dma-mapping
171 * API needs to set 'depends on HAS_DMA' in its Kconfig
173 extern struct dma_map_ops bad_dma_ops
;
174 static inline struct dma_map_ops
*get_dma_ops(struct device
*dev
)
180 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
, void *ptr
,
182 enum dma_data_direction dir
,
185 struct dma_map_ops
*ops
= get_dma_ops(dev
);
188 kmemcheck_mark_initialized(ptr
, size
);
189 BUG_ON(!valid_dma_direction(dir
));
190 addr
= ops
->map_page(dev
, virt_to_page(ptr
),
191 offset_in_page(ptr
), size
,
193 debug_dma_map_page(dev
, virt_to_page(ptr
),
194 offset_in_page(ptr
), size
,
199 static inline void dma_unmap_single_attrs(struct device
*dev
, dma_addr_t addr
,
201 enum dma_data_direction dir
,
204 struct dma_map_ops
*ops
= get_dma_ops(dev
);
206 BUG_ON(!valid_dma_direction(dir
));
208 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
209 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
213 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
214 * It should never return a value < 0.
216 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
217 int nents
, enum dma_data_direction dir
,
220 struct dma_map_ops
*ops
= get_dma_ops(dev
);
222 struct scatterlist
*s
;
224 for_each_sg(sg
, s
, nents
, i
)
225 kmemcheck_mark_initialized(sg_virt(s
), s
->length
);
226 BUG_ON(!valid_dma_direction(dir
));
227 ents
= ops
->map_sg(dev
, sg
, nents
, dir
, attrs
);
229 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
);
234 static inline void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
235 int nents
, enum dma_data_direction dir
,
238 struct dma_map_ops
*ops
= get_dma_ops(dev
);
240 BUG_ON(!valid_dma_direction(dir
));
241 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
243 ops
->unmap_sg(dev
, sg
, nents
, dir
, attrs
);
246 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
248 size_t offset
, size_t size
,
249 enum dma_data_direction dir
,
252 struct dma_map_ops
*ops
= get_dma_ops(dev
);
255 kmemcheck_mark_initialized(page_address(page
) + offset
, size
);
256 BUG_ON(!valid_dma_direction(dir
));
257 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, attrs
);
258 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, false);
263 static inline void dma_unmap_page_attrs(struct device
*dev
,
264 dma_addr_t addr
, size_t size
,
265 enum dma_data_direction dir
,
268 struct dma_map_ops
*ops
= get_dma_ops(dev
);
270 BUG_ON(!valid_dma_direction(dir
));
272 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
273 debug_dma_unmap_page(dev
, addr
, size
, dir
, false);
276 static inline dma_addr_t
dma_map_resource(struct device
*dev
,
277 phys_addr_t phys_addr
,
279 enum dma_data_direction dir
,
282 struct dma_map_ops
*ops
= get_dma_ops(dev
);
285 BUG_ON(!valid_dma_direction(dir
));
287 /* Don't allow RAM to be mapped */
288 BUG_ON(pfn_valid(PHYS_PFN(phys_addr
)));
291 if (ops
->map_resource
)
292 addr
= ops
->map_resource(dev
, phys_addr
, size
, dir
, attrs
);
294 debug_dma_map_resource(dev
, phys_addr
, size
, dir
, addr
);
299 static inline void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
,
300 size_t size
, enum dma_data_direction dir
,
303 struct dma_map_ops
*ops
= get_dma_ops(dev
);
305 BUG_ON(!valid_dma_direction(dir
));
306 if (ops
->unmap_resource
)
307 ops
->unmap_resource(dev
, addr
, size
, dir
, attrs
);
308 debug_dma_unmap_resource(dev
, addr
, size
, dir
);
311 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
313 enum dma_data_direction dir
)
315 struct dma_map_ops
*ops
= get_dma_ops(dev
);
317 BUG_ON(!valid_dma_direction(dir
));
318 if (ops
->sync_single_for_cpu
)
319 ops
->sync_single_for_cpu(dev
, addr
, size
, dir
);
320 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
323 static inline void dma_sync_single_for_device(struct device
*dev
,
324 dma_addr_t addr
, size_t size
,
325 enum dma_data_direction dir
)
327 struct dma_map_ops
*ops
= get_dma_ops(dev
);
329 BUG_ON(!valid_dma_direction(dir
));
330 if (ops
->sync_single_for_device
)
331 ops
->sync_single_for_device(dev
, addr
, size
, dir
);
332 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
335 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
337 unsigned long offset
,
339 enum dma_data_direction dir
)
341 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
343 BUG_ON(!valid_dma_direction(dir
));
344 if (ops
->sync_single_for_cpu
)
345 ops
->sync_single_for_cpu(dev
, addr
+ offset
, size
, dir
);
346 debug_dma_sync_single_range_for_cpu(dev
, addr
, offset
, size
, dir
);
349 static inline void dma_sync_single_range_for_device(struct device
*dev
,
351 unsigned long offset
,
353 enum dma_data_direction dir
)
355 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
357 BUG_ON(!valid_dma_direction(dir
));
358 if (ops
->sync_single_for_device
)
359 ops
->sync_single_for_device(dev
, addr
+ offset
, size
, dir
);
360 debug_dma_sync_single_range_for_device(dev
, addr
, offset
, size
, dir
);
364 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
365 int nelems
, enum dma_data_direction dir
)
367 struct dma_map_ops
*ops
= get_dma_ops(dev
);
369 BUG_ON(!valid_dma_direction(dir
));
370 if (ops
->sync_sg_for_cpu
)
371 ops
->sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
372 debug_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
376 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
377 int nelems
, enum dma_data_direction dir
)
379 struct dma_map_ops
*ops
= get_dma_ops(dev
);
381 BUG_ON(!valid_dma_direction(dir
));
382 if (ops
->sync_sg_for_device
)
383 ops
->sync_sg_for_device(dev
, sg
, nelems
, dir
);
384 debug_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
388 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
389 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
390 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
391 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
392 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
393 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
395 extern int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
396 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
398 void *dma_common_contiguous_remap(struct page
*page
, size_t size
,
399 unsigned long vm_flags
,
400 pgprot_t prot
, const void *caller
);
402 void *dma_common_pages_remap(struct page
**pages
, size_t size
,
403 unsigned long vm_flags
, pgprot_t prot
,
405 void dma_common_free_remap(void *cpu_addr
, size_t size
, unsigned long vm_flags
);
408 * dma_mmap_attrs - map a coherent DMA allocation into user space
409 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
410 * @vma: vm_area_struct describing requested user mapping
411 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
412 * @handle: device-view address returned from dma_alloc_attrs
413 * @size: size of memory originally requested in dma_alloc_attrs
414 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
416 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
417 * into user space. The coherent DMA buffer must not be freed by the
418 * driver until the user space mapping has been released.
421 dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
, void *cpu_addr
,
422 dma_addr_t dma_addr
, size_t size
, unsigned long attrs
)
424 struct dma_map_ops
*ops
= get_dma_ops(dev
);
427 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
428 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
431 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
434 dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
435 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
438 dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
, void *cpu_addr
,
439 dma_addr_t dma_addr
, size_t size
,
442 struct dma_map_ops
*ops
= get_dma_ops(dev
);
444 if (ops
->get_sgtable
)
445 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
447 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
);
450 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
452 #ifndef arch_dma_alloc_attrs
453 #define arch_dma_alloc_attrs(dev, flag) (true)
456 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
457 dma_addr_t
*dma_handle
, gfp_t flag
,
460 struct dma_map_ops
*ops
= get_dma_ops(dev
);
465 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &cpu_addr
))
468 if (!arch_dma_alloc_attrs(&dev
, &flag
))
473 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
474 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
478 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
479 void *cpu_addr
, dma_addr_t dma_handle
,
482 struct dma_map_ops
*ops
= get_dma_ops(dev
);
485 WARN_ON(irqs_disabled());
487 if (dma_release_from_coherent(dev
, get_order(size
), cpu_addr
))
490 if (!ops
->free
|| !cpu_addr
)
493 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
494 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
497 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
498 dma_addr_t
*dma_handle
, gfp_t flag
)
500 return dma_alloc_attrs(dev
, size
, dma_handle
, flag
, 0);
503 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
504 void *cpu_addr
, dma_addr_t dma_handle
)
506 return dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, 0);
509 static inline void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
510 dma_addr_t
*dma_handle
, gfp_t gfp
)
512 return dma_alloc_attrs(dev
, size
, dma_handle
, gfp
,
513 DMA_ATTR_NON_CONSISTENT
);
516 static inline void dma_free_noncoherent(struct device
*dev
, size_t size
,
517 void *cpu_addr
, dma_addr_t dma_handle
)
519 dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
,
520 DMA_ATTR_NON_CONSISTENT
);
523 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
525 debug_dma_mapping_error(dev
, dma_addr
);
527 if (get_dma_ops(dev
)->mapping_error
)
528 return get_dma_ops(dev
)->mapping_error(dev
, dma_addr
);
530 #ifdef DMA_ERROR_CODE
531 return dma_addr
== DMA_ERROR_CODE
;
537 #ifndef HAVE_ARCH_DMA_SUPPORTED
538 static inline int dma_supported(struct device
*dev
, u64 mask
)
540 struct dma_map_ops
*ops
= get_dma_ops(dev
);
544 if (!ops
->dma_supported
)
546 return ops
->dma_supported(dev
, mask
);
550 #ifndef HAVE_ARCH_DMA_SET_MASK
551 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
553 struct dma_map_ops
*ops
= get_dma_ops(dev
);
555 if (ops
->set_dma_mask
)
556 return ops
->set_dma_mask(dev
, mask
);
558 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
560 *dev
->dma_mask
= mask
;
565 static inline u64
dma_get_mask(struct device
*dev
)
567 if (dev
&& dev
->dma_mask
&& *dev
->dma_mask
)
568 return *dev
->dma_mask
;
569 return DMA_BIT_MASK(32);
572 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
573 int dma_set_coherent_mask(struct device
*dev
, u64 mask
);
575 static inline int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
577 if (!dma_supported(dev
, mask
))
579 dev
->coherent_dma_mask
= mask
;
585 * Set both the DMA mask and the coherent DMA mask to the same thing.
586 * Note that we don't check the return value from dma_set_coherent_mask()
587 * as the DMA API guarantees that the coherent DMA mask can be set to
588 * the same or smaller than the streaming DMA mask.
590 static inline int dma_set_mask_and_coherent(struct device
*dev
, u64 mask
)
592 int rc
= dma_set_mask(dev
, mask
);
594 dma_set_coherent_mask(dev
, mask
);
599 * Similar to the above, except it deals with the case where the device
600 * does not have dev->dma_mask appropriately setup.
602 static inline int dma_coerce_mask_and_coherent(struct device
*dev
, u64 mask
)
604 dev
->dma_mask
= &dev
->coherent_dma_mask
;
605 return dma_set_mask_and_coherent(dev
, mask
);
608 extern u64
dma_get_required_mask(struct device
*dev
);
610 #ifndef arch_setup_dma_ops
611 static inline void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
,
612 u64 size
, const struct iommu_ops
*iommu
,
616 #ifndef arch_teardown_dma_ops
617 static inline void arch_teardown_dma_ops(struct device
*dev
) { }
620 static inline unsigned int dma_get_max_seg_size(struct device
*dev
)
622 if (dev
->dma_parms
&& dev
->dma_parms
->max_segment_size
)
623 return dev
->dma_parms
->max_segment_size
;
627 static inline unsigned int dma_set_max_seg_size(struct device
*dev
,
630 if (dev
->dma_parms
) {
631 dev
->dma_parms
->max_segment_size
= size
;
637 static inline unsigned long dma_get_seg_boundary(struct device
*dev
)
639 if (dev
->dma_parms
&& dev
->dma_parms
->segment_boundary_mask
)
640 return dev
->dma_parms
->segment_boundary_mask
;
641 return DMA_BIT_MASK(32);
644 static inline int dma_set_seg_boundary(struct device
*dev
, unsigned long mask
)
646 if (dev
->dma_parms
) {
647 dev
->dma_parms
->segment_boundary_mask
= mask
;
654 static inline unsigned long dma_max_pfn(struct device
*dev
)
656 return *dev
->dma_mask
>> PAGE_SHIFT
;
660 static inline void *dma_zalloc_coherent(struct device
*dev
, size_t size
,
661 dma_addr_t
*dma_handle
, gfp_t flag
)
663 void *ret
= dma_alloc_coherent(dev
, size
, dma_handle
,
668 #ifdef CONFIG_HAS_DMA
669 static inline int dma_get_cache_alignment(void)
671 #ifdef ARCH_DMA_MINALIGN
672 return ARCH_DMA_MINALIGN
;
678 /* flags for the coherent memory api */
679 #define DMA_MEMORY_MAP 0x01
680 #define DMA_MEMORY_IO 0x02
681 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
682 #define DMA_MEMORY_EXCLUSIVE 0x08
684 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
685 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
686 dma_addr_t device_addr
, size_t size
, int flags
);
687 void dma_release_declared_memory(struct device
*dev
);
688 void *dma_mark_declared_memory_occupied(struct device
*dev
,
689 dma_addr_t device_addr
, size_t size
);
692 dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
693 dma_addr_t device_addr
, size_t size
, int flags
)
699 dma_release_declared_memory(struct device
*dev
)
704 dma_mark_declared_memory_occupied(struct device
*dev
,
705 dma_addr_t device_addr
, size_t size
)
707 return ERR_PTR(-EBUSY
);
709 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
714 extern void *dmam_alloc_coherent(struct device
*dev
, size_t size
,
715 dma_addr_t
*dma_handle
, gfp_t gfp
);
716 extern void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
717 dma_addr_t dma_handle
);
718 extern void *dmam_alloc_noncoherent(struct device
*dev
, size_t size
,
719 dma_addr_t
*dma_handle
, gfp_t gfp
);
720 extern void dmam_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
721 dma_addr_t dma_handle
);
722 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
723 extern int dmam_declare_coherent_memory(struct device
*dev
,
724 phys_addr_t phys_addr
,
725 dma_addr_t device_addr
, size_t size
,
727 extern void dmam_release_declared_memory(struct device
*dev
);
728 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
729 static inline int dmam_declare_coherent_memory(struct device
*dev
,
730 phys_addr_t phys_addr
, dma_addr_t device_addr
,
731 size_t size
, gfp_t gfp
)
736 static inline void dmam_release_declared_memory(struct device
*dev
)
739 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
741 static inline void *dma_alloc_wc(struct device
*dev
, size_t size
,
742 dma_addr_t
*dma_addr
, gfp_t gfp
)
744 return dma_alloc_attrs(dev
, size
, dma_addr
, gfp
,
745 DMA_ATTR_WRITE_COMBINE
);
747 #ifndef dma_alloc_writecombine
748 #define dma_alloc_writecombine dma_alloc_wc
751 static inline void dma_free_wc(struct device
*dev
, size_t size
,
752 void *cpu_addr
, dma_addr_t dma_addr
)
754 return dma_free_attrs(dev
, size
, cpu_addr
, dma_addr
,
755 DMA_ATTR_WRITE_COMBINE
);
757 #ifndef dma_free_writecombine
758 #define dma_free_writecombine dma_free_wc
761 static inline int dma_mmap_wc(struct device
*dev
,
762 struct vm_area_struct
*vma
,
763 void *cpu_addr
, dma_addr_t dma_addr
,
766 return dma_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
,
767 DMA_ATTR_WRITE_COMBINE
);
769 #ifndef dma_mmap_writecombine
770 #define dma_mmap_writecombine dma_mmap_wc
773 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
774 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
775 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
776 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
777 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
778 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
779 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
781 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
782 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
783 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
784 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
785 #define dma_unmap_len(PTR, LEN_NAME) (0)
786 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)