]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/dma-mapping.h
1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
63 #define DMA_ATTR_NO_WARN (1UL << 8)
66 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
67 * accessible at an elevated privilege level (and ideally inaccessible or
68 * at least read-only at lesser-privileged levels).
70 #define DMA_ATTR_PRIVILEGED (1UL << 9)
73 * A dma_addr_t can hold any valid DMA or bus address for the platform.
74 * It can be given to a device to use as a DMA source or target. A CPU cannot
75 * reference a dma_addr_t directly because there may be translation between
76 * its physical address space and the bus address space.
79 void* (*alloc
)(struct device
*dev
, size_t size
,
80 dma_addr_t
*dma_handle
, gfp_t gfp
,
82 void (*free
)(struct device
*dev
, size_t size
,
83 void *vaddr
, dma_addr_t dma_handle
,
85 int (*mmap
)(struct device
*, struct vm_area_struct
*,
86 void *, dma_addr_t
, size_t,
89 int (*get_sgtable
)(struct device
*dev
, struct sg_table
*sgt
, void *,
90 dma_addr_t
, size_t, unsigned long attrs
);
92 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
93 unsigned long offset
, size_t size
,
94 enum dma_data_direction dir
,
96 void (*unmap_page
)(struct device
*dev
, dma_addr_t dma_handle
,
97 size_t size
, enum dma_data_direction dir
,
100 * map_sg returns 0 on error and a value > 0 on success.
101 * It should never return a value < 0.
103 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
104 int nents
, enum dma_data_direction dir
,
105 unsigned long attrs
);
106 void (*unmap_sg
)(struct device
*dev
,
107 struct scatterlist
*sg
, int nents
,
108 enum dma_data_direction dir
,
109 unsigned long attrs
);
110 dma_addr_t (*map_resource
)(struct device
*dev
, phys_addr_t phys_addr
,
111 size_t size
, enum dma_data_direction dir
,
112 unsigned long attrs
);
113 void (*unmap_resource
)(struct device
*dev
, dma_addr_t dma_handle
,
114 size_t size
, enum dma_data_direction dir
,
115 unsigned long attrs
);
116 void (*sync_single_for_cpu
)(struct device
*dev
,
117 dma_addr_t dma_handle
, size_t size
,
118 enum dma_data_direction dir
);
119 void (*sync_single_for_device
)(struct device
*dev
,
120 dma_addr_t dma_handle
, size_t size
,
121 enum dma_data_direction dir
);
122 void (*sync_sg_for_cpu
)(struct device
*dev
,
123 struct scatterlist
*sg
, int nents
,
124 enum dma_data_direction dir
);
125 void (*sync_sg_for_device
)(struct device
*dev
,
126 struct scatterlist
*sg
, int nents
,
127 enum dma_data_direction dir
);
128 int (*mapping_error
)(struct device
*dev
, dma_addr_t dma_addr
);
129 int (*dma_supported
)(struct device
*dev
, u64 mask
);
130 int (*set_dma_mask
)(struct device
*dev
, u64 mask
);
131 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
132 u64 (*get_required_mask
)(struct device
*dev
);
137 extern const struct dma_map_ops dma_noop_ops
;
138 extern const struct dma_map_ops dma_virt_ops
;
140 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
142 #define DMA_MASK_NONE 0x0ULL
144 static inline int valid_dma_direction(int dma_direction
)
146 return ((dma_direction
== DMA_BIDIRECTIONAL
) ||
147 (dma_direction
== DMA_TO_DEVICE
) ||
148 (dma_direction
== DMA_FROM_DEVICE
));
151 static inline int is_device_dma_capable(struct device
*dev
)
153 return dev
->dma_mask
!= NULL
&& *dev
->dma_mask
!= DMA_MASK_NONE
;
156 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
158 * These three functions are only for dma allocator.
159 * Don't use them in device drivers.
161 int dma_alloc_from_coherent(struct device
*dev
, ssize_t size
,
162 dma_addr_t
*dma_handle
, void **ret
);
163 int dma_release_from_coherent(struct device
*dev
, int order
, void *vaddr
);
165 int dma_mmap_from_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
166 void *cpu_addr
, size_t size
, int *ret
);
168 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
169 #define dma_release_from_coherent(dev, order, vaddr) (0)
170 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
171 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
173 #ifdef CONFIG_HAS_DMA
174 #include <asm/dma-mapping.h>
175 static inline const struct dma_map_ops
*get_dma_ops(struct device
*dev
)
177 if (dev
&& dev
->dma_ops
)
179 return get_arch_dma_ops(dev
? dev
->bus
: NULL
);
182 static inline void set_dma_ops(struct device
*dev
,
183 const struct dma_map_ops
*dma_ops
)
185 dev
->dma_ops
= dma_ops
;
189 * Define the dma api to allow compilation but not linking of
190 * dma dependent code. Code that depends on the dma-mapping
191 * API needs to set 'depends on HAS_DMA' in its Kconfig
193 extern const struct dma_map_ops bad_dma_ops
;
194 static inline const struct dma_map_ops
*get_dma_ops(struct device
*dev
)
200 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
, void *ptr
,
202 enum dma_data_direction dir
,
205 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
208 kmemcheck_mark_initialized(ptr
, size
);
209 BUG_ON(!valid_dma_direction(dir
));
210 addr
= ops
->map_page(dev
, virt_to_page(ptr
),
211 offset_in_page(ptr
), size
,
213 debug_dma_map_page(dev
, virt_to_page(ptr
),
214 offset_in_page(ptr
), size
,
219 static inline void dma_unmap_single_attrs(struct device
*dev
, dma_addr_t addr
,
221 enum dma_data_direction dir
,
224 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
226 BUG_ON(!valid_dma_direction(dir
));
228 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
229 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
233 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
234 * It should never return a value < 0.
236 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
237 int nents
, enum dma_data_direction dir
,
240 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
242 struct scatterlist
*s
;
244 for_each_sg(sg
, s
, nents
, i
)
245 kmemcheck_mark_initialized(sg_virt(s
), s
->length
);
246 BUG_ON(!valid_dma_direction(dir
));
247 ents
= ops
->map_sg(dev
, sg
, nents
, dir
, attrs
);
249 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
);
254 static inline void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
255 int nents
, enum dma_data_direction dir
,
258 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
260 BUG_ON(!valid_dma_direction(dir
));
261 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
263 ops
->unmap_sg(dev
, sg
, nents
, dir
, attrs
);
266 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
268 size_t offset
, size_t size
,
269 enum dma_data_direction dir
,
272 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
275 kmemcheck_mark_initialized(page_address(page
) + offset
, size
);
276 BUG_ON(!valid_dma_direction(dir
));
277 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, attrs
);
278 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, false);
283 static inline void dma_unmap_page_attrs(struct device
*dev
,
284 dma_addr_t addr
, size_t size
,
285 enum dma_data_direction dir
,
288 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
290 BUG_ON(!valid_dma_direction(dir
));
292 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
293 debug_dma_unmap_page(dev
, addr
, size
, dir
, false);
296 static inline dma_addr_t
dma_map_resource(struct device
*dev
,
297 phys_addr_t phys_addr
,
299 enum dma_data_direction dir
,
302 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
305 BUG_ON(!valid_dma_direction(dir
));
307 /* Don't allow RAM to be mapped */
308 BUG_ON(pfn_valid(PHYS_PFN(phys_addr
)));
311 if (ops
->map_resource
)
312 addr
= ops
->map_resource(dev
, phys_addr
, size
, dir
, attrs
);
314 debug_dma_map_resource(dev
, phys_addr
, size
, dir
, addr
);
319 static inline void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
,
320 size_t size
, enum dma_data_direction dir
,
323 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
325 BUG_ON(!valid_dma_direction(dir
));
326 if (ops
->unmap_resource
)
327 ops
->unmap_resource(dev
, addr
, size
, dir
, attrs
);
328 debug_dma_unmap_resource(dev
, addr
, size
, dir
);
331 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
333 enum dma_data_direction dir
)
335 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
337 BUG_ON(!valid_dma_direction(dir
));
338 if (ops
->sync_single_for_cpu
)
339 ops
->sync_single_for_cpu(dev
, addr
, size
, dir
);
340 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
343 static inline void dma_sync_single_for_device(struct device
*dev
,
344 dma_addr_t addr
, size_t size
,
345 enum dma_data_direction dir
)
347 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
349 BUG_ON(!valid_dma_direction(dir
));
350 if (ops
->sync_single_for_device
)
351 ops
->sync_single_for_device(dev
, addr
, size
, dir
);
352 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
355 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
357 unsigned long offset
,
359 enum dma_data_direction dir
)
361 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
363 BUG_ON(!valid_dma_direction(dir
));
364 if (ops
->sync_single_for_cpu
)
365 ops
->sync_single_for_cpu(dev
, addr
+ offset
, size
, dir
);
366 debug_dma_sync_single_range_for_cpu(dev
, addr
, offset
, size
, dir
);
369 static inline void dma_sync_single_range_for_device(struct device
*dev
,
371 unsigned long offset
,
373 enum dma_data_direction dir
)
375 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
377 BUG_ON(!valid_dma_direction(dir
));
378 if (ops
->sync_single_for_device
)
379 ops
->sync_single_for_device(dev
, addr
+ offset
, size
, dir
);
380 debug_dma_sync_single_range_for_device(dev
, addr
, offset
, size
, dir
);
384 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
385 int nelems
, enum dma_data_direction dir
)
387 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
389 BUG_ON(!valid_dma_direction(dir
));
390 if (ops
->sync_sg_for_cpu
)
391 ops
->sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
392 debug_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
396 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
397 int nelems
, enum dma_data_direction dir
)
399 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
401 BUG_ON(!valid_dma_direction(dir
));
402 if (ops
->sync_sg_for_device
)
403 ops
->sync_sg_for_device(dev
, sg
, nelems
, dir
);
404 debug_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
408 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
409 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
410 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
411 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
412 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
413 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
415 extern int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
416 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
418 void *dma_common_contiguous_remap(struct page
*page
, size_t size
,
419 unsigned long vm_flags
,
420 pgprot_t prot
, const void *caller
);
422 void *dma_common_pages_remap(struct page
**pages
, size_t size
,
423 unsigned long vm_flags
, pgprot_t prot
,
425 void dma_common_free_remap(void *cpu_addr
, size_t size
, unsigned long vm_flags
);
428 * dma_mmap_attrs - map a coherent DMA allocation into user space
429 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
430 * @vma: vm_area_struct describing requested user mapping
431 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
432 * @handle: device-view address returned from dma_alloc_attrs
433 * @size: size of memory originally requested in dma_alloc_attrs
434 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
436 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
437 * into user space. The coherent DMA buffer must not be freed by the
438 * driver until the user space mapping has been released.
441 dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
, void *cpu_addr
,
442 dma_addr_t dma_addr
, size_t size
, unsigned long attrs
)
444 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
447 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
448 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
451 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
454 dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
455 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
458 dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
, void *cpu_addr
,
459 dma_addr_t dma_addr
, size_t size
,
462 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
464 if (ops
->get_sgtable
)
465 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
467 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
);
470 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
472 #ifndef arch_dma_alloc_attrs
473 #define arch_dma_alloc_attrs(dev, flag) (true)
476 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
477 dma_addr_t
*dma_handle
, gfp_t flag
,
480 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
485 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &cpu_addr
))
488 if (!arch_dma_alloc_attrs(&dev
, &flag
))
493 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
494 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
498 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
499 void *cpu_addr
, dma_addr_t dma_handle
,
502 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
505 WARN_ON(irqs_disabled());
507 if (dma_release_from_coherent(dev
, get_order(size
), cpu_addr
))
510 if (!ops
->free
|| !cpu_addr
)
513 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
514 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
517 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
518 dma_addr_t
*dma_handle
, gfp_t flag
)
520 return dma_alloc_attrs(dev
, size
, dma_handle
, flag
, 0);
523 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
524 void *cpu_addr
, dma_addr_t dma_handle
)
526 return dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, 0);
529 static inline void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
530 dma_addr_t
*dma_handle
, gfp_t gfp
)
532 return dma_alloc_attrs(dev
, size
, dma_handle
, gfp
,
533 DMA_ATTR_NON_CONSISTENT
);
536 static inline void dma_free_noncoherent(struct device
*dev
, size_t size
,
537 void *cpu_addr
, dma_addr_t dma_handle
)
539 dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
,
540 DMA_ATTR_NON_CONSISTENT
);
543 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
545 debug_dma_mapping_error(dev
, dma_addr
);
547 if (get_dma_ops(dev
)->mapping_error
)
548 return get_dma_ops(dev
)->mapping_error(dev
, dma_addr
);
550 #ifdef DMA_ERROR_CODE
551 return dma_addr
== DMA_ERROR_CODE
;
557 #ifndef HAVE_ARCH_DMA_SUPPORTED
558 static inline int dma_supported(struct device
*dev
, u64 mask
)
560 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
564 if (!ops
->dma_supported
)
566 return ops
->dma_supported(dev
, mask
);
570 #ifndef HAVE_ARCH_DMA_SET_MASK
571 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
573 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
575 if (ops
->set_dma_mask
)
576 return ops
->set_dma_mask(dev
, mask
);
578 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
580 *dev
->dma_mask
= mask
;
585 static inline u64
dma_get_mask(struct device
*dev
)
587 if (dev
&& dev
->dma_mask
&& *dev
->dma_mask
)
588 return *dev
->dma_mask
;
589 return DMA_BIT_MASK(32);
592 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
593 int dma_set_coherent_mask(struct device
*dev
, u64 mask
);
595 static inline int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
597 if (!dma_supported(dev
, mask
))
599 dev
->coherent_dma_mask
= mask
;
605 * Set both the DMA mask and the coherent DMA mask to the same thing.
606 * Note that we don't check the return value from dma_set_coherent_mask()
607 * as the DMA API guarantees that the coherent DMA mask can be set to
608 * the same or smaller than the streaming DMA mask.
610 static inline int dma_set_mask_and_coherent(struct device
*dev
, u64 mask
)
612 int rc
= dma_set_mask(dev
, mask
);
614 dma_set_coherent_mask(dev
, mask
);
619 * Similar to the above, except it deals with the case where the device
620 * does not have dev->dma_mask appropriately setup.
622 static inline int dma_coerce_mask_and_coherent(struct device
*dev
, u64 mask
)
624 dev
->dma_mask
= &dev
->coherent_dma_mask
;
625 return dma_set_mask_and_coherent(dev
, mask
);
628 extern u64
dma_get_required_mask(struct device
*dev
);
630 #ifndef arch_setup_dma_ops
631 static inline void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
,
632 u64 size
, const struct iommu_ops
*iommu
,
636 #ifndef arch_teardown_dma_ops
637 static inline void arch_teardown_dma_ops(struct device
*dev
) { }
640 static inline unsigned int dma_get_max_seg_size(struct device
*dev
)
642 if (dev
->dma_parms
&& dev
->dma_parms
->max_segment_size
)
643 return dev
->dma_parms
->max_segment_size
;
647 static inline unsigned int dma_set_max_seg_size(struct device
*dev
,
650 if (dev
->dma_parms
) {
651 dev
->dma_parms
->max_segment_size
= size
;
657 static inline unsigned long dma_get_seg_boundary(struct device
*dev
)
659 if (dev
->dma_parms
&& dev
->dma_parms
->segment_boundary_mask
)
660 return dev
->dma_parms
->segment_boundary_mask
;
661 return DMA_BIT_MASK(32);
664 static inline int dma_set_seg_boundary(struct device
*dev
, unsigned long mask
)
666 if (dev
->dma_parms
) {
667 dev
->dma_parms
->segment_boundary_mask
= mask
;
674 static inline unsigned long dma_max_pfn(struct device
*dev
)
676 return *dev
->dma_mask
>> PAGE_SHIFT
;
680 static inline void *dma_zalloc_coherent(struct device
*dev
, size_t size
,
681 dma_addr_t
*dma_handle
, gfp_t flag
)
683 void *ret
= dma_alloc_coherent(dev
, size
, dma_handle
,
688 #ifdef CONFIG_HAS_DMA
689 static inline int dma_get_cache_alignment(void)
691 #ifdef ARCH_DMA_MINALIGN
692 return ARCH_DMA_MINALIGN
;
698 /* flags for the coherent memory api */
699 #define DMA_MEMORY_MAP 0x01
700 #define DMA_MEMORY_IO 0x02
701 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
702 #define DMA_MEMORY_EXCLUSIVE 0x08
704 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
705 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
706 dma_addr_t device_addr
, size_t size
, int flags
);
707 void dma_release_declared_memory(struct device
*dev
);
708 void *dma_mark_declared_memory_occupied(struct device
*dev
,
709 dma_addr_t device_addr
, size_t size
);
712 dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
713 dma_addr_t device_addr
, size_t size
, int flags
)
719 dma_release_declared_memory(struct device
*dev
)
724 dma_mark_declared_memory_occupied(struct device
*dev
,
725 dma_addr_t device_addr
, size_t size
)
727 return ERR_PTR(-EBUSY
);
729 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
731 #ifdef CONFIG_HAS_DMA
732 int dma_configure(struct device
*dev
);
733 void dma_deconfigure(struct device
*dev
);
735 static inline int dma_configure(struct device
*dev
)
740 static inline void dma_deconfigure(struct device
*dev
) {}
746 extern void *dmam_alloc_coherent(struct device
*dev
, size_t size
,
747 dma_addr_t
*dma_handle
, gfp_t gfp
);
748 extern void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
749 dma_addr_t dma_handle
);
750 extern void *dmam_alloc_noncoherent(struct device
*dev
, size_t size
,
751 dma_addr_t
*dma_handle
, gfp_t gfp
);
752 extern void dmam_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
753 dma_addr_t dma_handle
);
754 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
755 extern int dmam_declare_coherent_memory(struct device
*dev
,
756 phys_addr_t phys_addr
,
757 dma_addr_t device_addr
, size_t size
,
759 extern void dmam_release_declared_memory(struct device
*dev
);
760 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
761 static inline int dmam_declare_coherent_memory(struct device
*dev
,
762 phys_addr_t phys_addr
, dma_addr_t device_addr
,
763 size_t size
, gfp_t gfp
)
768 static inline void dmam_release_declared_memory(struct device
*dev
)
771 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
773 static inline void *dma_alloc_wc(struct device
*dev
, size_t size
,
774 dma_addr_t
*dma_addr
, gfp_t gfp
)
776 return dma_alloc_attrs(dev
, size
, dma_addr
, gfp
,
777 DMA_ATTR_WRITE_COMBINE
);
779 #ifndef dma_alloc_writecombine
780 #define dma_alloc_writecombine dma_alloc_wc
783 static inline void dma_free_wc(struct device
*dev
, size_t size
,
784 void *cpu_addr
, dma_addr_t dma_addr
)
786 return dma_free_attrs(dev
, size
, cpu_addr
, dma_addr
,
787 DMA_ATTR_WRITE_COMBINE
);
789 #ifndef dma_free_writecombine
790 #define dma_free_writecombine dma_free_wc
793 static inline int dma_mmap_wc(struct device
*dev
,
794 struct vm_area_struct
*vma
,
795 void *cpu_addr
, dma_addr_t dma_addr
,
798 return dma_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
,
799 DMA_ATTR_WRITE_COMBINE
);
801 #ifndef dma_mmap_writecombine
802 #define dma_mmap_writecombine dma_mmap_wc
805 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
806 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
807 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
808 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
809 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
810 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
811 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
813 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
814 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
815 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
816 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
817 #define dma_unmap_len(PTR, LEN_NAME) (0)
818 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)