]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/dma-mapping.h
1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
63 #define DMA_ATTR_NO_WARN (1UL << 8)
66 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
67 * accessible at an elevated privilege level (and ideally inaccessible or
68 * at least read-only at lesser-privileged levels).
70 #define DMA_ATTR_PRIVILEGED (1UL << 9)
73 * A dma_addr_t can hold any valid DMA or bus address for the platform.
74 * It can be given to a device to use as a DMA source or target. A CPU cannot
75 * reference a dma_addr_t directly because there may be translation between
76 * its physical address space and the bus address space.
79 void* (*alloc
)(struct device
*dev
, size_t size
,
80 dma_addr_t
*dma_handle
, gfp_t gfp
,
82 void (*free
)(struct device
*dev
, size_t size
,
83 void *vaddr
, dma_addr_t dma_handle
,
85 int (*mmap
)(struct device
*, struct vm_area_struct
*,
86 void *, dma_addr_t
, size_t,
89 int (*get_sgtable
)(struct device
*dev
, struct sg_table
*sgt
, void *,
90 dma_addr_t
, size_t, unsigned long attrs
);
92 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
93 unsigned long offset
, size_t size
,
94 enum dma_data_direction dir
,
96 void (*unmap_page
)(struct device
*dev
, dma_addr_t dma_handle
,
97 size_t size
, enum dma_data_direction dir
,
100 * map_sg returns 0 on error and a value > 0 on success.
101 * It should never return a value < 0.
103 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
104 int nents
, enum dma_data_direction dir
,
105 unsigned long attrs
);
106 void (*unmap_sg
)(struct device
*dev
,
107 struct scatterlist
*sg
, int nents
,
108 enum dma_data_direction dir
,
109 unsigned long attrs
);
110 dma_addr_t (*map_resource
)(struct device
*dev
, phys_addr_t phys_addr
,
111 size_t size
, enum dma_data_direction dir
,
112 unsigned long attrs
);
113 void (*unmap_resource
)(struct device
*dev
, dma_addr_t dma_handle
,
114 size_t size
, enum dma_data_direction dir
,
115 unsigned long attrs
);
116 void (*sync_single_for_cpu
)(struct device
*dev
,
117 dma_addr_t dma_handle
, size_t size
,
118 enum dma_data_direction dir
);
119 void (*sync_single_for_device
)(struct device
*dev
,
120 dma_addr_t dma_handle
, size_t size
,
121 enum dma_data_direction dir
);
122 void (*sync_sg_for_cpu
)(struct device
*dev
,
123 struct scatterlist
*sg
, int nents
,
124 enum dma_data_direction dir
);
125 void (*sync_sg_for_device
)(struct device
*dev
,
126 struct scatterlist
*sg
, int nents
,
127 enum dma_data_direction dir
);
128 int (*mapping_error
)(struct device
*dev
, dma_addr_t dma_addr
);
129 int (*dma_supported
)(struct device
*dev
, u64 mask
);
130 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
131 u64 (*get_required_mask
)(struct device
*dev
);
136 extern const struct dma_map_ops dma_noop_ops
;
137 extern const struct dma_map_ops dma_virt_ops
;
139 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
141 #define DMA_MASK_NONE 0x0ULL
143 static inline int valid_dma_direction(int dma_direction
)
145 return ((dma_direction
== DMA_BIDIRECTIONAL
) ||
146 (dma_direction
== DMA_TO_DEVICE
) ||
147 (dma_direction
== DMA_FROM_DEVICE
));
150 static inline int is_device_dma_capable(struct device
*dev
)
152 return dev
->dma_mask
!= NULL
&& *dev
->dma_mask
!= DMA_MASK_NONE
;
155 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
157 * These three functions are only for dma allocator.
158 * Don't use them in device drivers.
160 int dma_alloc_from_dev_coherent(struct device
*dev
, ssize_t size
,
161 dma_addr_t
*dma_handle
, void **ret
);
162 int dma_release_from_dev_coherent(struct device
*dev
, int order
, void *vaddr
);
164 int dma_mmap_from_dev_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
165 void *cpu_addr
, size_t size
, int *ret
);
167 void *dma_alloc_from_global_coherent(ssize_t size
, dma_addr_t
*dma_handle
);
168 int dma_release_from_global_coherent(int order
, void *vaddr
);
169 int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
, void *cpu_addr
,
170 size_t size
, int *ret
);
173 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
174 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
175 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
177 static inline void *dma_alloc_from_global_coherent(ssize_t size
,
178 dma_addr_t
*dma_handle
)
183 static inline int dma_release_from_global_coherent(int order
, void *vaddr
)
188 static inline int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
,
189 void *cpu_addr
, size_t size
,
194 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
196 #ifdef CONFIG_HAS_DMA
197 #include <asm/dma-mapping.h>
198 static inline const struct dma_map_ops
*get_dma_ops(struct device
*dev
)
200 if (dev
&& dev
->dma_ops
)
202 return get_arch_dma_ops(dev
? dev
->bus
: NULL
);
205 static inline void set_dma_ops(struct device
*dev
,
206 const struct dma_map_ops
*dma_ops
)
208 dev
->dma_ops
= dma_ops
;
212 * Define the dma api to allow compilation but not linking of
213 * dma dependent code. Code that depends on the dma-mapping
214 * API needs to set 'depends on HAS_DMA' in its Kconfig
216 extern const struct dma_map_ops bad_dma_ops
;
217 static inline const struct dma_map_ops
*get_dma_ops(struct device
*dev
)
223 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
, void *ptr
,
225 enum dma_data_direction dir
,
228 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
231 kmemcheck_mark_initialized(ptr
, size
);
232 BUG_ON(!valid_dma_direction(dir
));
233 addr
= ops
->map_page(dev
, virt_to_page(ptr
),
234 offset_in_page(ptr
), size
,
236 debug_dma_map_page(dev
, virt_to_page(ptr
),
237 offset_in_page(ptr
), size
,
242 static inline void dma_unmap_single_attrs(struct device
*dev
, dma_addr_t addr
,
244 enum dma_data_direction dir
,
247 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
249 BUG_ON(!valid_dma_direction(dir
));
251 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
252 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
256 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
257 * It should never return a value < 0.
259 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
260 int nents
, enum dma_data_direction dir
,
263 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
265 struct scatterlist
*s
;
267 for_each_sg(sg
, s
, nents
, i
)
268 kmemcheck_mark_initialized(sg_virt(s
), s
->length
);
269 BUG_ON(!valid_dma_direction(dir
));
270 ents
= ops
->map_sg(dev
, sg
, nents
, dir
, attrs
);
272 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
);
277 static inline void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
278 int nents
, enum dma_data_direction dir
,
281 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
283 BUG_ON(!valid_dma_direction(dir
));
284 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
286 ops
->unmap_sg(dev
, sg
, nents
, dir
, attrs
);
289 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
291 size_t offset
, size_t size
,
292 enum dma_data_direction dir
,
295 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
298 kmemcheck_mark_initialized(page_address(page
) + offset
, size
);
299 BUG_ON(!valid_dma_direction(dir
));
300 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, attrs
);
301 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, false);
306 static inline void dma_unmap_page_attrs(struct device
*dev
,
307 dma_addr_t addr
, size_t size
,
308 enum dma_data_direction dir
,
311 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
313 BUG_ON(!valid_dma_direction(dir
));
315 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
316 debug_dma_unmap_page(dev
, addr
, size
, dir
, false);
319 static inline dma_addr_t
dma_map_resource(struct device
*dev
,
320 phys_addr_t phys_addr
,
322 enum dma_data_direction dir
,
325 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
328 BUG_ON(!valid_dma_direction(dir
));
330 /* Don't allow RAM to be mapped */
331 BUG_ON(pfn_valid(PHYS_PFN(phys_addr
)));
334 if (ops
->map_resource
)
335 addr
= ops
->map_resource(dev
, phys_addr
, size
, dir
, attrs
);
337 debug_dma_map_resource(dev
, phys_addr
, size
, dir
, addr
);
342 static inline void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
,
343 size_t size
, enum dma_data_direction dir
,
346 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
348 BUG_ON(!valid_dma_direction(dir
));
349 if (ops
->unmap_resource
)
350 ops
->unmap_resource(dev
, addr
, size
, dir
, attrs
);
351 debug_dma_unmap_resource(dev
, addr
, size
, dir
);
354 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
356 enum dma_data_direction dir
)
358 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
360 BUG_ON(!valid_dma_direction(dir
));
361 if (ops
->sync_single_for_cpu
)
362 ops
->sync_single_for_cpu(dev
, addr
, size
, dir
);
363 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
366 static inline void dma_sync_single_for_device(struct device
*dev
,
367 dma_addr_t addr
, size_t size
,
368 enum dma_data_direction dir
)
370 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
372 BUG_ON(!valid_dma_direction(dir
));
373 if (ops
->sync_single_for_device
)
374 ops
->sync_single_for_device(dev
, addr
, size
, dir
);
375 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
378 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
380 unsigned long offset
,
382 enum dma_data_direction dir
)
384 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
386 BUG_ON(!valid_dma_direction(dir
));
387 if (ops
->sync_single_for_cpu
)
388 ops
->sync_single_for_cpu(dev
, addr
+ offset
, size
, dir
);
389 debug_dma_sync_single_range_for_cpu(dev
, addr
, offset
, size
, dir
);
392 static inline void dma_sync_single_range_for_device(struct device
*dev
,
394 unsigned long offset
,
396 enum dma_data_direction dir
)
398 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
400 BUG_ON(!valid_dma_direction(dir
));
401 if (ops
->sync_single_for_device
)
402 ops
->sync_single_for_device(dev
, addr
+ offset
, size
, dir
);
403 debug_dma_sync_single_range_for_device(dev
, addr
, offset
, size
, dir
);
407 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
408 int nelems
, enum dma_data_direction dir
)
410 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
412 BUG_ON(!valid_dma_direction(dir
));
413 if (ops
->sync_sg_for_cpu
)
414 ops
->sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
415 debug_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
419 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
420 int nelems
, enum dma_data_direction dir
)
422 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
424 BUG_ON(!valid_dma_direction(dir
));
425 if (ops
->sync_sg_for_device
)
426 ops
->sync_sg_for_device(dev
, sg
, nelems
, dir
);
427 debug_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
431 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
432 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
433 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
434 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
435 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
436 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
438 extern int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
439 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
441 void *dma_common_contiguous_remap(struct page
*page
, size_t size
,
442 unsigned long vm_flags
,
443 pgprot_t prot
, const void *caller
);
445 void *dma_common_pages_remap(struct page
**pages
, size_t size
,
446 unsigned long vm_flags
, pgprot_t prot
,
448 void dma_common_free_remap(void *cpu_addr
, size_t size
, unsigned long vm_flags
);
451 * dma_mmap_attrs - map a coherent DMA allocation into user space
452 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
453 * @vma: vm_area_struct describing requested user mapping
454 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
455 * @handle: device-view address returned from dma_alloc_attrs
456 * @size: size of memory originally requested in dma_alloc_attrs
457 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
459 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
460 * into user space. The coherent DMA buffer must not be freed by the
461 * driver until the user space mapping has been released.
464 dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
, void *cpu_addr
,
465 dma_addr_t dma_addr
, size_t size
, unsigned long attrs
)
467 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
470 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
471 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
474 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
477 dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
478 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
481 dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
, void *cpu_addr
,
482 dma_addr_t dma_addr
, size_t size
,
485 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
487 if (ops
->get_sgtable
)
488 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
490 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
);
493 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
495 #ifndef arch_dma_alloc_attrs
496 #define arch_dma_alloc_attrs(dev, flag) (true)
499 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
500 dma_addr_t
*dma_handle
, gfp_t flag
,
503 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
508 if (dma_alloc_from_dev_coherent(dev
, size
, dma_handle
, &cpu_addr
))
511 if (!arch_dma_alloc_attrs(&dev
, &flag
))
516 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
517 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
521 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
522 void *cpu_addr
, dma_addr_t dma_handle
,
525 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
528 WARN_ON(irqs_disabled());
530 if (dma_release_from_dev_coherent(dev
, get_order(size
), cpu_addr
))
533 if (!ops
->free
|| !cpu_addr
)
536 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
537 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
540 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
541 dma_addr_t
*dma_handle
, gfp_t flag
)
543 return dma_alloc_attrs(dev
, size
, dma_handle
, flag
, 0);
546 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
547 void *cpu_addr
, dma_addr_t dma_handle
)
549 return dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, 0);
552 static inline void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
553 dma_addr_t
*dma_handle
, gfp_t gfp
)
555 return dma_alloc_attrs(dev
, size
, dma_handle
, gfp
,
556 DMA_ATTR_NON_CONSISTENT
);
559 static inline void dma_free_noncoherent(struct device
*dev
, size_t size
,
560 void *cpu_addr
, dma_addr_t dma_handle
)
562 dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
,
563 DMA_ATTR_NON_CONSISTENT
);
566 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
568 debug_dma_mapping_error(dev
, dma_addr
);
570 if (get_dma_ops(dev
)->mapping_error
)
571 return get_dma_ops(dev
)->mapping_error(dev
, dma_addr
);
575 static inline int dma_supported(struct device
*dev
, u64 mask
)
577 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
581 if (!ops
->dma_supported
)
583 return ops
->dma_supported(dev
, mask
);
586 #ifndef HAVE_ARCH_DMA_SET_MASK
587 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
589 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
591 *dev
->dma_mask
= mask
;
596 static inline u64
dma_get_mask(struct device
*dev
)
598 if (dev
&& dev
->dma_mask
&& *dev
->dma_mask
)
599 return *dev
->dma_mask
;
600 return DMA_BIT_MASK(32);
603 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
604 int dma_set_coherent_mask(struct device
*dev
, u64 mask
);
606 static inline int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
608 if (!dma_supported(dev
, mask
))
610 dev
->coherent_dma_mask
= mask
;
616 * Set both the DMA mask and the coherent DMA mask to the same thing.
617 * Note that we don't check the return value from dma_set_coherent_mask()
618 * as the DMA API guarantees that the coherent DMA mask can be set to
619 * the same or smaller than the streaming DMA mask.
621 static inline int dma_set_mask_and_coherent(struct device
*dev
, u64 mask
)
623 int rc
= dma_set_mask(dev
, mask
);
625 dma_set_coherent_mask(dev
, mask
);
630 * Similar to the above, except it deals with the case where the device
631 * does not have dev->dma_mask appropriately setup.
633 static inline int dma_coerce_mask_and_coherent(struct device
*dev
, u64 mask
)
635 dev
->dma_mask
= &dev
->coherent_dma_mask
;
636 return dma_set_mask_and_coherent(dev
, mask
);
639 extern u64
dma_get_required_mask(struct device
*dev
);
641 #ifndef arch_setup_dma_ops
642 static inline void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
,
643 u64 size
, const struct iommu_ops
*iommu
,
647 #ifndef arch_teardown_dma_ops
648 static inline void arch_teardown_dma_ops(struct device
*dev
) { }
651 static inline unsigned int dma_get_max_seg_size(struct device
*dev
)
653 if (dev
->dma_parms
&& dev
->dma_parms
->max_segment_size
)
654 return dev
->dma_parms
->max_segment_size
;
658 static inline unsigned int dma_set_max_seg_size(struct device
*dev
,
661 if (dev
->dma_parms
) {
662 dev
->dma_parms
->max_segment_size
= size
;
668 static inline unsigned long dma_get_seg_boundary(struct device
*dev
)
670 if (dev
->dma_parms
&& dev
->dma_parms
->segment_boundary_mask
)
671 return dev
->dma_parms
->segment_boundary_mask
;
672 return DMA_BIT_MASK(32);
675 static inline int dma_set_seg_boundary(struct device
*dev
, unsigned long mask
)
677 if (dev
->dma_parms
) {
678 dev
->dma_parms
->segment_boundary_mask
= mask
;
685 static inline unsigned long dma_max_pfn(struct device
*dev
)
687 return *dev
->dma_mask
>> PAGE_SHIFT
;
691 static inline void *dma_zalloc_coherent(struct device
*dev
, size_t size
,
692 dma_addr_t
*dma_handle
, gfp_t flag
)
694 void *ret
= dma_alloc_coherent(dev
, size
, dma_handle
,
699 #ifdef CONFIG_HAS_DMA
700 static inline int dma_get_cache_alignment(void)
702 #ifdef ARCH_DMA_MINALIGN
703 return ARCH_DMA_MINALIGN
;
709 /* flags for the coherent memory api */
710 #define DMA_MEMORY_MAP 0x01
711 #define DMA_MEMORY_IO 0x02
712 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
713 #define DMA_MEMORY_EXCLUSIVE 0x08
715 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
716 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
717 dma_addr_t device_addr
, size_t size
, int flags
);
718 void dma_release_declared_memory(struct device
*dev
);
719 void *dma_mark_declared_memory_occupied(struct device
*dev
,
720 dma_addr_t device_addr
, size_t size
);
723 dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
724 dma_addr_t device_addr
, size_t size
, int flags
)
730 dma_release_declared_memory(struct device
*dev
)
735 dma_mark_declared_memory_occupied(struct device
*dev
,
736 dma_addr_t device_addr
, size_t size
)
738 return ERR_PTR(-EBUSY
);
740 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
742 #ifdef CONFIG_HAS_DMA
743 int dma_configure(struct device
*dev
);
744 void dma_deconfigure(struct device
*dev
);
746 static inline int dma_configure(struct device
*dev
)
751 static inline void dma_deconfigure(struct device
*dev
) {}
757 extern void *dmam_alloc_coherent(struct device
*dev
, size_t size
,
758 dma_addr_t
*dma_handle
, gfp_t gfp
);
759 extern void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
760 dma_addr_t dma_handle
);
761 extern void *dmam_alloc_attrs(struct device
*dev
, size_t size
,
762 dma_addr_t
*dma_handle
, gfp_t gfp
,
763 unsigned long attrs
);
764 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
765 extern int dmam_declare_coherent_memory(struct device
*dev
,
766 phys_addr_t phys_addr
,
767 dma_addr_t device_addr
, size_t size
,
769 extern void dmam_release_declared_memory(struct device
*dev
);
770 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
771 static inline int dmam_declare_coherent_memory(struct device
*dev
,
772 phys_addr_t phys_addr
, dma_addr_t device_addr
,
773 size_t size
, gfp_t gfp
)
778 static inline void dmam_release_declared_memory(struct device
*dev
)
781 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
783 static inline void *dma_alloc_wc(struct device
*dev
, size_t size
,
784 dma_addr_t
*dma_addr
, gfp_t gfp
)
786 return dma_alloc_attrs(dev
, size
, dma_addr
, gfp
,
787 DMA_ATTR_WRITE_COMBINE
);
789 #ifndef dma_alloc_writecombine
790 #define dma_alloc_writecombine dma_alloc_wc
793 static inline void dma_free_wc(struct device
*dev
, size_t size
,
794 void *cpu_addr
, dma_addr_t dma_addr
)
796 return dma_free_attrs(dev
, size
, cpu_addr
, dma_addr
,
797 DMA_ATTR_WRITE_COMBINE
);
799 #ifndef dma_free_writecombine
800 #define dma_free_writecombine dma_free_wc
803 static inline int dma_mmap_wc(struct device
*dev
,
804 struct vm_area_struct
*vma
,
805 void *cpu_addr
, dma_addr_t dma_addr
,
808 return dma_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
,
809 DMA_ATTR_WRITE_COMBINE
);
811 #ifndef dma_mmap_writecombine
812 #define dma_mmap_writecombine dma_mmap_wc
815 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
816 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
817 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
818 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
819 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
820 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
821 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
823 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
824 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
825 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
826 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
827 #define dma_unmap_len(PTR, LEN_NAME) (0)
828 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)