]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
96532bab RD |
2 | #ifndef _LINUX_DMA_MAPPING_H |
3 | #define _LINUX_DMA_MAPPING_H | |
1da177e4 | 4 | |
002edb6f | 5 | #include <linux/sizes.h> |
842fa69f | 6 | #include <linux/string.h> |
1da177e4 LT |
7 | #include <linux/device.h> |
8 | #include <linux/err.h> | |
b7f080cf | 9 | #include <linux/dma-direction.h> |
f0402a26 | 10 | #include <linux/scatterlist.h> |
e1c7e324 | 11 | #include <linux/bug.h> |
648babb7 | 12 | #include <linux/mem_encrypt.h> |
1da177e4 | 13 | |
00085f1e KK |
14 | /** |
15 | * List of possible attributes associated with a DMA mapping. The semantics | |
985098a0 | 16 | * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. |
00085f1e | 17 | */ |
7283fff8 | 18 | |
00085f1e KK |
19 | /* |
20 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping | |
21 | * may be weakly ordered, that is that reads and writes may pass each other. | |
22 | */ | |
23 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) | |
24 | /* | |
25 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be | |
26 | * buffered to improve performance. | |
27 | */ | |
28 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) | |
00085f1e KK |
29 | /* |
30 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel | |
31 | * virtual mapping for the allocated buffer. | |
32 | */ | |
33 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) | |
34 | /* | |
35 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of | |
36 | * the CPU cache for the given buffer assuming that it has been already | |
37 | * transferred to 'device' domain. | |
38 | */ | |
39 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) | |
40 | /* | |
41 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer | |
42 | * in physical memory. | |
43 | */ | |
44 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) | |
45 | /* | |
46 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem | |
47 | * that it's probably not worth the time to try to allocate memory to in a way | |
48 | * that gives better TLB efficiency. | |
49 | */ | |
50 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) | |
a9a62c93 MFO |
51 | /* |
52 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress | |
53 | * allocation failure reports (similarly to __GFP_NOWARN). | |
54 | */ | |
55 | #define DMA_ATTR_NO_WARN (1UL << 8) | |
00085f1e | 56 | |
b2fb3664 MH |
57 | /* |
58 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully | |
59 | * accessible at an elevated privilege level (and ideally inaccessible or | |
60 | * at least read-only at lesser-privileged levels). | |
61 | */ | |
62 | #define DMA_ATTR_PRIVILEGED (1UL << 9) | |
63 | ||
eba304c6 CH |
64 | /* |
65 | * A dma_addr_t can hold any valid DMA or bus address for the platform. It can | |
66 | * be given to a device to use as a DMA source or target. It is specific to a | |
67 | * given device and there may be a translation between the CPU physical address | |
68 | * space and the bus address space. | |
69 | * | |
70 | * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not | |
71 | * be used directly in drivers, but checked for using dma_mapping_error() | |
72 | * instead. | |
73 | */ | |
42ee3cae CH |
74 | #define DMA_MAPPING_ERROR (~(dma_addr_t)0) |
75 | ||
8f286c33 | 76 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
34c65384 | 77 | |
a1fd09e8 CH |
78 | #ifdef CONFIG_DMA_API_DEBUG |
79 | void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | |
80 | void debug_dma_map_single(struct device *dev, const void *addr, | |
81 | unsigned long len); | |
82 | #else | |
83 | static inline void debug_dma_mapping_error(struct device *dev, | |
84 | dma_addr_t dma_addr) | |
85 | { | |
86 | } | |
87 | static inline void debug_dma_map_single(struct device *dev, const void *addr, | |
88 | unsigned long len) | |
89 | { | |
90 | } | |
91 | #endif /* CONFIG_DMA_API_DEBUG */ | |
92 | ||
ed6ccf10 | 93 | #ifdef CONFIG_HAS_DMA |
ed6ccf10 CH |
94 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
95 | { | |
96 | debug_dma_mapping_error(dev, dma_addr); | |
97 | ||
98 | if (dma_addr == DMA_MAPPING_ERROR) | |
99 | return -ENOMEM; | |
100 | return 0; | |
101 | } | |
102 | ||
d3fa60d7 CH |
103 | dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, |
104 | size_t offset, size_t size, enum dma_data_direction dir, | |
105 | unsigned long attrs); | |
106 | void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, | |
107 | enum dma_data_direction dir, unsigned long attrs); | |
108 | int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, | |
109 | enum dma_data_direction dir, unsigned long attrs); | |
110 | void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | |
111 | int nents, enum dma_data_direction dir, | |
112 | unsigned long attrs); | |
113 | dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, | |
114 | size_t size, enum dma_data_direction dir, unsigned long attrs); | |
115 | void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, | |
116 | enum dma_data_direction dir, unsigned long attrs); | |
117 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, | |
118 | enum dma_data_direction dir); | |
119 | void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, | |
120 | size_t size, enum dma_data_direction dir); | |
121 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
122 | int nelems, enum dma_data_direction dir); | |
123 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
124 | int nelems, enum dma_data_direction dir); | |
ed6ccf10 CH |
125 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
126 | gfp_t flag, unsigned long attrs); | |
127 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
128 | dma_addr_t dma_handle, unsigned long attrs); | |
129 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
130 | gfp_t gfp, unsigned long attrs); | |
131 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
132 | dma_addr_t dma_handle); | |
ed6ccf10 CH |
133 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
134 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
135 | unsigned long attrs); | |
136 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
137 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
138 | unsigned long attrs); | |
e29ccc18 | 139 | bool dma_can_mmap(struct device *dev); |
ed6ccf10 CH |
140 | int dma_supported(struct device *dev, u64 mask); |
141 | int dma_set_mask(struct device *dev, u64 mask); | |
142 | int dma_set_coherent_mask(struct device *dev, u64 mask); | |
143 | u64 dma_get_required_mask(struct device *dev); | |
133d624b | 144 | size_t dma_max_mapping_size(struct device *dev); |
3aa91625 | 145 | bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); |
6ba99411 | 146 | unsigned long dma_get_merge_boundary(struct device *dev); |
ed6ccf10 CH |
147 | #else /* CONFIG_HAS_DMA */ |
148 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | |
149 | struct page *page, size_t offset, size_t size, | |
150 | enum dma_data_direction dir, unsigned long attrs) | |
151 | { | |
152 | return DMA_MAPPING_ERROR; | |
153 | } | |
154 | static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, | |
155 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
156 | { | |
157 | } | |
158 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
159 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
160 | { | |
161 | return 0; | |
162 | } | |
163 | static inline void dma_unmap_sg_attrs(struct device *dev, | |
164 | struct scatterlist *sg, int nents, enum dma_data_direction dir, | |
165 | unsigned long attrs) | |
166 | { | |
167 | } | |
168 | static inline dma_addr_t dma_map_resource(struct device *dev, | |
169 | phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, | |
170 | unsigned long attrs) | |
171 | { | |
172 | return DMA_MAPPING_ERROR; | |
173 | } | |
174 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, | |
175 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
176 | { | |
177 | } | |
178 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |
179 | size_t size, enum dma_data_direction dir) | |
180 | { | |
181 | } | |
182 | static inline void dma_sync_single_for_device(struct device *dev, | |
183 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | |
184 | { | |
185 | } | |
186 | static inline void dma_sync_sg_for_cpu(struct device *dev, | |
187 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) | |
188 | { | |
189 | } | |
190 | static inline void dma_sync_sg_for_device(struct device *dev, | |
191 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) | |
192 | { | |
193 | } | |
194 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
195 | { | |
196 | return -ENOMEM; | |
197 | } | |
198 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
199 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) | |
200 | { | |
201 | return NULL; | |
202 | } | |
203 | static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
204 | dma_addr_t dma_handle, unsigned long attrs) | |
205 | { | |
206 | } | |
207 | static inline void *dmam_alloc_attrs(struct device *dev, size_t size, | |
208 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | |
209 | { | |
210 | return NULL; | |
211 | } | |
212 | static inline void dmam_free_coherent(struct device *dev, size_t size, | |
213 | void *vaddr, dma_addr_t dma_handle) | |
214 | { | |
215 | } | |
ed6ccf10 CH |
216 | static inline int dma_get_sgtable_attrs(struct device *dev, |
217 | struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, | |
218 | size_t size, unsigned long attrs) | |
219 | { | |
220 | return -ENXIO; | |
221 | } | |
222 | static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
223 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
224 | unsigned long attrs) | |
225 | { | |
226 | return -ENXIO; | |
227 | } | |
e29ccc18 CH |
228 | static inline bool dma_can_mmap(struct device *dev) |
229 | { | |
230 | return false; | |
231 | } | |
ed6ccf10 CH |
232 | static inline int dma_supported(struct device *dev, u64 mask) |
233 | { | |
234 | return 0; | |
235 | } | |
236 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
237 | { | |
238 | return -EIO; | |
239 | } | |
240 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) | |
241 | { | |
242 | return -EIO; | |
243 | } | |
244 | static inline u64 dma_get_required_mask(struct device *dev) | |
245 | { | |
246 | return 0; | |
247 | } | |
133d624b JR |
248 | static inline size_t dma_max_mapping_size(struct device *dev) |
249 | { | |
250 | return 0; | |
251 | } | |
3aa91625 CH |
252 | static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
253 | { | |
254 | return false; | |
255 | } | |
6ba99411 YS |
256 | static inline unsigned long dma_get_merge_boundary(struct device *dev) |
257 | { | |
258 | return 0; | |
259 | } | |
ed6ccf10 CH |
260 | #endif /* CONFIG_HAS_DMA */ |
261 | ||
efa70f2f CH |
262 | struct page *dma_alloc_pages(struct device *dev, size_t size, |
263 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); | |
264 | void dma_free_pages(struct device *dev, size_t size, struct page *page, | |
265 | dma_addr_t dma_handle, enum dma_data_direction dir); | |
266 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
267 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); | |
268 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
269 | dma_addr_t dma_handle, enum dma_data_direction dir); | |
0d71675f | 270 | |
2e05ea5c CH |
271 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
272 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
273 | { | |
4544b9f2 KC |
274 | /* DMA must never operate on areas that might be remapped. */ |
275 | if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), | |
276 | "rejecting DMA map of vmalloc memory\n")) | |
277 | return DMA_MAPPING_ERROR; | |
2e05ea5c CH |
278 | debug_dma_map_single(dev, ptr, size); |
279 | return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), | |
280 | size, dir, attrs); | |
281 | } | |
282 | ||
283 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | |
284 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
285 | { | |
286 | return dma_unmap_page_attrs(dev, addr, size, dir, attrs); | |
287 | } | |
288 | ||
ed6ccf10 CH |
289 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
290 | dma_addr_t addr, unsigned long offset, size_t size, | |
291 | enum dma_data_direction dir) | |
292 | { | |
293 | return dma_sync_single_for_cpu(dev, addr + offset, size, dir); | |
294 | } | |
295 | ||
296 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
297 | dma_addr_t addr, unsigned long offset, size_t size, | |
298 | enum dma_data_direction dir) | |
299 | { | |
300 | return dma_sync_single_for_device(dev, addr + offset, size, dir); | |
301 | } | |
302 | ||
d9d200bc MS |
303 | /** |
304 | * dma_map_sgtable - Map the given buffer for DMA | |
305 | * @dev: The device for which to perform the DMA operation | |
306 | * @sgt: The sg_table object describing the buffer | |
307 | * @dir: DMA direction | |
308 | * @attrs: Optional DMA attributes for the map operation | |
309 | * | |
310 | * Maps a buffer described by a scatterlist stored in the given sg_table | |
311 | * object for the @dir DMA operation by the @dev device. After success the | |
312 | * ownership for the buffer is transferred to the DMA domain. One has to | |
313 | * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the | |
314 | * ownership of the buffer back to the CPU domain before touching the | |
315 | * buffer by the CPU. | |
316 | * | |
317 | * Returns 0 on success or -EINVAL on error during mapping the buffer. | |
318 | */ | |
319 | static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, | |
320 | enum dma_data_direction dir, unsigned long attrs) | |
321 | { | |
322 | int nents; | |
323 | ||
324 | nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); | |
325 | if (nents <= 0) | |
326 | return -EINVAL; | |
327 | sgt->nents = nents; | |
328 | return 0; | |
329 | } | |
330 | ||
331 | /** | |
332 | * dma_unmap_sgtable - Unmap the given buffer for DMA | |
333 | * @dev: The device for which to perform the DMA operation | |
334 | * @sgt: The sg_table object describing the buffer | |
335 | * @dir: DMA direction | |
336 | * @attrs: Optional DMA attributes for the unmap operation | |
337 | * | |
338 | * Unmaps a buffer described by a scatterlist stored in the given sg_table | |
339 | * object for the @dir DMA operation by the @dev device. After this function | |
340 | * the ownership of the buffer is transferred back to the CPU domain. | |
341 | */ | |
342 | static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, | |
343 | enum dma_data_direction dir, unsigned long attrs) | |
344 | { | |
345 | dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); | |
346 | } | |
347 | ||
348 | /** | |
349 | * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access | |
350 | * @dev: The device for which to perform the DMA operation | |
351 | * @sgt: The sg_table object describing the buffer | |
352 | * @dir: DMA direction | |
353 | * | |
354 | * Performs the needed cache synchronization and moves the ownership of the | |
355 | * buffer back to the CPU domain, so it is safe to perform any access to it | |
356 | * by the CPU. Before doing any further DMA operations, one has to transfer | |
357 | * the ownership of the buffer back to the DMA domain by calling the | |
358 | * dma_sync_sgtable_for_device(). | |
359 | */ | |
360 | static inline void dma_sync_sgtable_for_cpu(struct device *dev, | |
361 | struct sg_table *sgt, enum dma_data_direction dir) | |
362 | { | |
363 | dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); | |
364 | } | |
365 | ||
366 | /** | |
367 | * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA | |
368 | * @dev: The device for which to perform the DMA operation | |
369 | * @sgt: The sg_table object describing the buffer | |
370 | * @dir: DMA direction | |
371 | * | |
372 | * Performs the needed cache synchronization and moves the ownership of the | |
373 | * buffer back to the DMA domain, so it is safe to perform the DMA operation. | |
374 | * Once finished, one has to call dma_sync_sgtable_for_cpu() or | |
375 | * dma_unmap_sgtable(). | |
376 | */ | |
377 | static inline void dma_sync_sgtable_for_device(struct device *dev, | |
378 | struct sg_table *sgt, enum dma_data_direction dir) | |
379 | { | |
380 | dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); | |
381 | } | |
382 | ||
00085f1e KK |
383 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
384 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) | |
385 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) | |
386 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) | |
0495c3d3 AD |
387 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
388 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) | |
ed6ccf10 CH |
389 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
390 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) | |
c9eb6172 | 391 | |
e1c7e324 | 392 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
7ed1d91a | 393 | dma_addr_t *dma_handle, gfp_t gfp) |
e1c7e324 | 394 | { |
7ed1d91a CH |
395 | |
396 | return dma_alloc_attrs(dev, size, dma_handle, gfp, | |
397 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); | |
e1c7e324 CH |
398 | } |
399 | ||
400 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
401 | void *cpu_addr, dma_addr_t dma_handle) | |
402 | { | |
00085f1e | 403 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
e1c7e324 CH |
404 | } |
405 | ||
1da177e4 | 406 | |
589fc9a6 FT |
407 | static inline u64 dma_get_mask(struct device *dev) |
408 | { | |
d7e02a93 | 409 | if (dev->dma_mask && *dev->dma_mask) |
589fc9a6 | 410 | return *dev->dma_mask; |
284901a9 | 411 | return DMA_BIT_MASK(32); |
589fc9a6 FT |
412 | } |
413 | ||
4aa806b7 RK |
414 | /* |
415 | * Set both the DMA mask and the coherent DMA mask to the same thing. | |
416 | * Note that we don't check the return value from dma_set_coherent_mask() | |
417 | * as the DMA API guarantees that the coherent DMA mask can be set to | |
418 | * the same or smaller than the streaming DMA mask. | |
419 | */ | |
420 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) | |
421 | { | |
422 | int rc = dma_set_mask(dev, mask); | |
423 | if (rc == 0) | |
424 | dma_set_coherent_mask(dev, mask); | |
425 | return rc; | |
426 | } | |
427 | ||
fa6a8d6d RK |
428 | /* |
429 | * Similar to the above, except it deals with the case where the device | |
430 | * does not have dev->dma_mask appropriately setup. | |
431 | */ | |
432 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |
433 | { | |
434 | dev->dma_mask = &dev->coherent_dma_mask; | |
435 | return dma_set_mask_and_coherent(dev, mask); | |
436 | } | |
437 | ||
b8664554 CH |
438 | /** |
439 | * dma_addressing_limited - return if the device is addressing limited | |
440 | * @dev: device to check | |
441 | * | |
442 | * Return %true if the devices DMA mask is too small to address all memory in | |
443 | * the system, else %false. Lack of addressing bits is the prime reason for | |
444 | * bounce buffering, but might not be the only one. | |
445 | */ | |
446 | static inline bool dma_addressing_limited(struct device *dev) | |
447 | { | |
a7ba70f1 | 448 | return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < |
06532750 | 449 | dma_get_required_mask(dev); |
b8664554 CH |
450 | } |
451 | ||
6b7b6510 FT |
452 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
453 | { | |
002edb6f RM |
454 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
455 | return dev->dma_parms->max_segment_size; | |
456 | return SZ_64K; | |
6b7b6510 FT |
457 | } |
458 | ||
c9d76d06 | 459 | static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) |
6b7b6510 FT |
460 | { |
461 | if (dev->dma_parms) { | |
462 | dev->dma_parms->max_segment_size = size; | |
463 | return 0; | |
002edb6f RM |
464 | } |
465 | return -EIO; | |
6b7b6510 FT |
466 | } |
467 | ||
d22a6966 FT |
468 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
469 | { | |
002edb6f RM |
470 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
471 | return dev->dma_parms->segment_boundary_mask; | |
135ba11a | 472 | return ULONG_MAX; |
d22a6966 FT |
473 | } |
474 | ||
1e9d90db NC |
475 | /** |
476 | * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units | |
477 | * @dev: device to guery the boundary for | |
478 | * @page_shift: ilog() of the IOMMU page size | |
479 | * | |
480 | * Return the segment boundary in IOMMU page units (which may be different from | |
481 | * the CPU page size) for the passed in device. | |
482 | * | |
483 | * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for | |
484 | * non-DMA API callers. | |
485 | */ | |
486 | static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, | |
487 | unsigned int page_shift) | |
488 | { | |
489 | if (!dev) | |
490 | return (U32_MAX >> page_shift) + 1; | |
491 | return (dma_get_seg_boundary(dev) >> page_shift) + 1; | |
492 | } | |
493 | ||
d22a6966 FT |
494 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) |
495 | { | |
496 | if (dev->dma_parms) { | |
497 | dev->dma_parms->segment_boundary_mask = mask; | |
498 | return 0; | |
002edb6f RM |
499 | } |
500 | return -EIO; | |
d22a6966 FT |
501 | } |
502 | ||
4565f017 FT |
503 | static inline int dma_get_cache_alignment(void) |
504 | { | |
505 | #ifdef ARCH_DMA_MINALIGN | |
506 | return ARCH_DMA_MINALIGN; | |
507 | #endif | |
508 | return 1; | |
509 | } | |
510 | ||
d7076f07 CH |
511 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, |
512 | dma_addr_t *dma_handle, gfp_t gfp) | |
513 | { | |
514 | return dmam_alloc_attrs(dev, size, dma_handle, gfp, | |
515 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); | |
516 | } | |
517 | ||
f6e45661 LR |
518 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
519 | dma_addr_t *dma_addr, gfp_t gfp) | |
b4bbb107 | 520 | { |
0cd60eb1 | 521 | unsigned long attrs = DMA_ATTR_WRITE_COMBINE; |
7ed1d91a CH |
522 | |
523 | if (gfp & __GFP_NOWARN) | |
524 | attrs |= DMA_ATTR_NO_WARN; | |
525 | ||
526 | return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); | |
b4bbb107 TR |
527 | } |
528 | ||
f6e45661 LR |
529 | static inline void dma_free_wc(struct device *dev, size_t size, |
530 | void *cpu_addr, dma_addr_t dma_addr) | |
b4bbb107 | 531 | { |
00085f1e KK |
532 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, |
533 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 TR |
534 | } |
535 | ||
f6e45661 LR |
536 | static inline int dma_mmap_wc(struct device *dev, |
537 | struct vm_area_struct *vma, | |
538 | void *cpu_addr, dma_addr_t dma_addr, | |
539 | size_t size) | |
b4bbb107 | 540 | { |
00085f1e KK |
541 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
542 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 543 | } |
74bc7cee | 544 | |
f616ab59 | 545 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
0acedc12 FT |
546 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
547 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | |
548 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | |
549 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) | |
550 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) | |
551 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) | |
552 | #else | |
553 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) | |
554 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) | |
555 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) | |
556 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | |
557 | #define dma_unmap_len(PTR, LEN_NAME) (0) | |
558 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | |
559 | #endif | |
560 | ||
e0d07278 | 561 | #endif /* _LINUX_DMA_MAPPING_H */ |