]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_DMA_MAPPING_H |
2 | #define _ASM_IA64_DMA_MAPPING_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 2003-2004 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
1da177e4 | 8 | #include <asm/machvec.h> |
9b6eccfc | 9 | #include <linux/scatterlist.h> |
62fdd767 FY |
10 | #include <asm/swiotlb.h> |
11 | ||
160c1d8e | 12 | extern struct dma_map_ops *dma_ops; |
62fdd767 FY |
13 | extern struct ia64_machine_vector ia64_mv; |
14 | extern void set_iommu_machvec(void); | |
1da177e4 | 15 | |
c2990307 FT |
16 | extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, |
17 | enum dma_data_direction); | |
18 | extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, | |
19 | enum dma_data_direction); | |
20 | ||
b7ea6e95 FT |
21 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
22 | dma_addr_t *daddr, gfp_t gfp) | |
23 | { | |
160c1d8e | 24 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
c190ab0b | 25 | return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA); |
b7ea6e95 | 26 | } |
3a80b6aa | 27 | |
b7ea6e95 FT |
28 | static inline void dma_free_coherent(struct device *dev, size_t size, |
29 | void *caddr, dma_addr_t daddr) | |
b7de8e7e | 30 | { |
160c1d8e | 31 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
c190ab0b | 32 | ops->free_coherent(dev, size, caddr, daddr); |
b7de8e7e | 33 | } |
b7ea6e95 FT |
34 | |
35 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | |
36 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
37 | ||
38 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, | |
39 | void *caddr, size_t size, | |
40 | enum dma_data_direction dir, | |
41 | struct dma_attrs *attrs) | |
42 | { | |
160c1d8e FT |
43 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
44 | return ops->map_page(dev, virt_to_page(caddr), | |
45 | (unsigned long)caddr & ~PAGE_MASK, size, | |
46 | dir, attrs); | |
b7ea6e95 FT |
47 | } |
48 | ||
49 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, | |
50 | size_t size, | |
51 | enum dma_data_direction dir, | |
52 | struct dma_attrs *attrs) | |
b7de8e7e | 53 | { |
160c1d8e FT |
54 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
55 | ops->unmap_page(dev, daddr, size, dir, attrs); | |
b7de8e7e | 56 | } |
b7ea6e95 FT |
57 | |
58 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | |
59 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) | |
60 | ||
61 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |
62 | int nents, enum dma_data_direction dir, | |
63 | struct dma_attrs *attrs) | |
309df0c5 | 64 | { |
160c1d8e FT |
65 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
66 | return ops->map_sg(dev, sgl, nents, dir, attrs); | |
309df0c5 | 67 | } |
b7ea6e95 FT |
68 | |
69 | static inline void dma_unmap_sg_attrs(struct device *dev, | |
70 | struct scatterlist *sgl, int nents, | |
71 | enum dma_data_direction dir, | |
72 | struct dma_attrs *attrs) | |
309df0c5 | 73 | { |
160c1d8e FT |
74 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
75 | ops->unmap_sg(dev, sgl, nents, dir, attrs); | |
309df0c5 | 76 | } |
b7ea6e95 FT |
77 | |
78 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | |
79 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | |
80 | ||
81 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, | |
82 | size_t size, | |
83 | enum dma_data_direction dir) | |
309df0c5 | 84 | { |
160c1d8e | 85 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
c190ab0b | 86 | ops->sync_single_for_cpu(dev, daddr, size, dir); |
309df0c5 | 87 | } |
b7ea6e95 FT |
88 | |
89 | static inline void dma_sync_sg_for_cpu(struct device *dev, | |
90 | struct scatterlist *sgl, | |
91 | int nents, enum dma_data_direction dir) | |
309df0c5 | 92 | { |
160c1d8e | 93 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
c190ab0b | 94 | ops->sync_sg_for_cpu(dev, sgl, nents, dir); |
b7ea6e95 FT |
95 | } |
96 | ||
97 | static inline void dma_sync_single_for_device(struct device *dev, | |
98 | dma_addr_t daddr, | |
99 | size_t size, | |
100 | enum dma_data_direction dir) | |
101 | { | |
160c1d8e | 102 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
c190ab0b | 103 | ops->sync_single_for_device(dev, daddr, size, dir); |
b7ea6e95 FT |
104 | } |
105 | ||
106 | static inline void dma_sync_sg_for_device(struct device *dev, | |
107 | struct scatterlist *sgl, | |
108 | int nents, | |
109 | enum dma_data_direction dir) | |
110 | { | |
160c1d8e | 111 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
c190ab0b | 112 | ops->sync_sg_for_device(dev, sgl, nents, dir); |
b7ea6e95 FT |
113 | } |
114 | ||
115 | static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) | |
116 | { | |
160c1d8e | 117 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
c190ab0b | 118 | return ops->mapping_error(dev, daddr); |
309df0c5 | 119 | } |
1da177e4 | 120 | |
160c1d8e FT |
121 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
122 | size_t offset, size_t size, | |
123 | enum dma_data_direction dir) | |
124 | { | |
125 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | |
126 | return ops->map_page(dev, page, offset, size, dir, NULL); | |
127 | } | |
128 | ||
129 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | |
130 | size_t size, enum dma_data_direction dir) | |
131 | { | |
132 | dma_unmap_single(dev, addr, size, dir); | |
133 | } | |
1da177e4 LT |
134 | |
135 | /* | |
136 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. | |
137 | * See Documentation/DMA-API.txt for details. | |
138 | */ | |
139 | ||
140 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ | |
141 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) | |
142 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | |
143 | dma_sync_single_for_device(dev, dma_handle, size, dir) | |
144 | ||
b7ea6e95 FT |
145 | static inline int dma_supported(struct device *dev, u64 mask) |
146 | { | |
160c1d8e FT |
147 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
148 | return ops->dma_supported(dev, mask); | |
b7ea6e95 | 149 | } |
1da177e4 LT |
150 | |
151 | static inline int | |
152 | dma_set_mask (struct device *dev, u64 mask) | |
153 | { | |
154 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
155 | return -EIO; | |
156 | *dev->dma_mask = mask; | |
157 | return 0; | |
158 | } | |
159 | ||
e1531b42 | 160 | extern int dma_get_cache_alignment(void); |
1da177e4 LT |
161 | |
162 | static inline void | |
d3fa72e4 RB |
163 | dma_cache_sync (struct device *dev, void *vaddr, size_t size, |
164 | enum dma_data_direction dir) | |
1da177e4 LT |
165 | { |
166 | /* | |
167 | * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to | |
168 | * ensure that dma_cache_sync() enforces order, hence the mb(). | |
169 | */ | |
170 | mb(); | |
171 | } | |
172 | ||
f67637ee | 173 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ |
1da177e4 LT |
174 | |
175 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |