]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/dma-mapping.h
Merge tag 'kvm-s390-master-4.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / include / linux / dma-mapping.h
1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
3
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
7 #include <linux/err.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
13
14 /**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22 /*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27 /*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32 /*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37 /*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42 /*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48 /*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53 /*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
59 /*
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
62 */
63 #define DMA_ATTR_NO_WARN (1UL << 8)
64
65 /*
66 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
67 * accessible at an elevated privilege level (and ideally inaccessible or
68 * at least read-only at lesser-privileged levels).
69 */
70 #define DMA_ATTR_PRIVILEGED (1UL << 9)
71
72 /*
73 * A dma_addr_t can hold any valid DMA or bus address for the platform.
74 * It can be given to a device to use as a DMA source or target. A CPU cannot
75 * reference a dma_addr_t directly because there may be translation between
76 * its physical address space and the bus address space.
77 */
78 struct dma_map_ops {
79 void* (*alloc)(struct device *dev, size_t size,
80 dma_addr_t *dma_handle, gfp_t gfp,
81 unsigned long attrs);
82 void (*free)(struct device *dev, size_t size,
83 void *vaddr, dma_addr_t dma_handle,
84 unsigned long attrs);
85 int (*mmap)(struct device *, struct vm_area_struct *,
86 void *, dma_addr_t, size_t,
87 unsigned long attrs);
88
89 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
90 dma_addr_t, size_t, unsigned long attrs);
91
92 dma_addr_t (*map_page)(struct device *dev, struct page *page,
93 unsigned long offset, size_t size,
94 enum dma_data_direction dir,
95 unsigned long attrs);
96 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
97 size_t size, enum dma_data_direction dir,
98 unsigned long attrs);
99 /*
100 * map_sg returns 0 on error and a value > 0 on success.
101 * It should never return a value < 0.
102 */
103 int (*map_sg)(struct device *dev, struct scatterlist *sg,
104 int nents, enum dma_data_direction dir,
105 unsigned long attrs);
106 void (*unmap_sg)(struct device *dev,
107 struct scatterlist *sg, int nents,
108 enum dma_data_direction dir,
109 unsigned long attrs);
110 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
111 size_t size, enum dma_data_direction dir,
112 unsigned long attrs);
113 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
114 size_t size, enum dma_data_direction dir,
115 unsigned long attrs);
116 void (*sync_single_for_cpu)(struct device *dev,
117 dma_addr_t dma_handle, size_t size,
118 enum dma_data_direction dir);
119 void (*sync_single_for_device)(struct device *dev,
120 dma_addr_t dma_handle, size_t size,
121 enum dma_data_direction dir);
122 void (*sync_sg_for_cpu)(struct device *dev,
123 struct scatterlist *sg, int nents,
124 enum dma_data_direction dir);
125 void (*sync_sg_for_device)(struct device *dev,
126 struct scatterlist *sg, int nents,
127 enum dma_data_direction dir);
128 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
129 int (*dma_supported)(struct device *dev, u64 mask);
130 int (*set_dma_mask)(struct device *dev, u64 mask);
131 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
132 u64 (*get_required_mask)(struct device *dev);
133 #endif
134 int is_phys;
135 };
136
137 extern const struct dma_map_ops dma_noop_ops;
138 extern const struct dma_map_ops dma_virt_ops;
139
140 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
141
142 #define DMA_MASK_NONE 0x0ULL
143
144 static inline int valid_dma_direction(int dma_direction)
145 {
146 return ((dma_direction == DMA_BIDIRECTIONAL) ||
147 (dma_direction == DMA_TO_DEVICE) ||
148 (dma_direction == DMA_FROM_DEVICE));
149 }
150
151 static inline int is_device_dma_capable(struct device *dev)
152 {
153 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
154 }
155
156 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
157 /*
158 * These three functions are only for dma allocator.
159 * Don't use them in device drivers.
160 */
161 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
162 dma_addr_t *dma_handle, void **ret);
163 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
164
165 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
166 void *cpu_addr, size_t size, int *ret);
167 #else
168 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
169 #define dma_release_from_coherent(dev, order, vaddr) (0)
170 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
171 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
172
173 #ifdef CONFIG_HAS_DMA
174 #include <asm/dma-mapping.h>
175 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
176 {
177 if (dev && dev->dma_ops)
178 return dev->dma_ops;
179 return get_arch_dma_ops(dev ? dev->bus : NULL);
180 }
181
182 static inline void set_dma_ops(struct device *dev,
183 const struct dma_map_ops *dma_ops)
184 {
185 dev->dma_ops = dma_ops;
186 }
187 #else
188 /*
189 * Define the dma api to allow compilation but not linking of
190 * dma dependent code. Code that depends on the dma-mapping
191 * API needs to set 'depends on HAS_DMA' in its Kconfig
192 */
193 extern const struct dma_map_ops bad_dma_ops;
194 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
195 {
196 return &bad_dma_ops;
197 }
198 #endif
199
200 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
201 size_t size,
202 enum dma_data_direction dir,
203 unsigned long attrs)
204 {
205 const struct dma_map_ops *ops = get_dma_ops(dev);
206 dma_addr_t addr;
207
208 kmemcheck_mark_initialized(ptr, size);
209 BUG_ON(!valid_dma_direction(dir));
210 addr = ops->map_page(dev, virt_to_page(ptr),
211 offset_in_page(ptr), size,
212 dir, attrs);
213 debug_dma_map_page(dev, virt_to_page(ptr),
214 offset_in_page(ptr), size,
215 dir, addr, true);
216 return addr;
217 }
218
219 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
220 size_t size,
221 enum dma_data_direction dir,
222 unsigned long attrs)
223 {
224 const struct dma_map_ops *ops = get_dma_ops(dev);
225
226 BUG_ON(!valid_dma_direction(dir));
227 if (ops->unmap_page)
228 ops->unmap_page(dev, addr, size, dir, attrs);
229 debug_dma_unmap_page(dev, addr, size, dir, true);
230 }
231
232 /*
233 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
234 * It should never return a value < 0.
235 */
236 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
237 int nents, enum dma_data_direction dir,
238 unsigned long attrs)
239 {
240 const struct dma_map_ops *ops = get_dma_ops(dev);
241 int i, ents;
242 struct scatterlist *s;
243
244 for_each_sg(sg, s, nents, i)
245 kmemcheck_mark_initialized(sg_virt(s), s->length);
246 BUG_ON(!valid_dma_direction(dir));
247 ents = ops->map_sg(dev, sg, nents, dir, attrs);
248 BUG_ON(ents < 0);
249 debug_dma_map_sg(dev, sg, nents, ents, dir);
250
251 return ents;
252 }
253
254 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
255 int nents, enum dma_data_direction dir,
256 unsigned long attrs)
257 {
258 const struct dma_map_ops *ops = get_dma_ops(dev);
259
260 BUG_ON(!valid_dma_direction(dir));
261 debug_dma_unmap_sg(dev, sg, nents, dir);
262 if (ops->unmap_sg)
263 ops->unmap_sg(dev, sg, nents, dir, attrs);
264 }
265
266 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
267 struct page *page,
268 size_t offset, size_t size,
269 enum dma_data_direction dir,
270 unsigned long attrs)
271 {
272 const struct dma_map_ops *ops = get_dma_ops(dev);
273 dma_addr_t addr;
274
275 kmemcheck_mark_initialized(page_address(page) + offset, size);
276 BUG_ON(!valid_dma_direction(dir));
277 addr = ops->map_page(dev, page, offset, size, dir, attrs);
278 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
279
280 return addr;
281 }
282
283 static inline void dma_unmap_page_attrs(struct device *dev,
284 dma_addr_t addr, size_t size,
285 enum dma_data_direction dir,
286 unsigned long attrs)
287 {
288 const struct dma_map_ops *ops = get_dma_ops(dev);
289
290 BUG_ON(!valid_dma_direction(dir));
291 if (ops->unmap_page)
292 ops->unmap_page(dev, addr, size, dir, attrs);
293 debug_dma_unmap_page(dev, addr, size, dir, false);
294 }
295
296 static inline dma_addr_t dma_map_resource(struct device *dev,
297 phys_addr_t phys_addr,
298 size_t size,
299 enum dma_data_direction dir,
300 unsigned long attrs)
301 {
302 const struct dma_map_ops *ops = get_dma_ops(dev);
303 dma_addr_t addr;
304
305 BUG_ON(!valid_dma_direction(dir));
306
307 /* Don't allow RAM to be mapped */
308 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
309
310 addr = phys_addr;
311 if (ops->map_resource)
312 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
313
314 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
315
316 return addr;
317 }
318
319 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
320 size_t size, enum dma_data_direction dir,
321 unsigned long attrs)
322 {
323 const struct dma_map_ops *ops = get_dma_ops(dev);
324
325 BUG_ON(!valid_dma_direction(dir));
326 if (ops->unmap_resource)
327 ops->unmap_resource(dev, addr, size, dir, attrs);
328 debug_dma_unmap_resource(dev, addr, size, dir);
329 }
330
331 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
332 size_t size,
333 enum dma_data_direction dir)
334 {
335 const struct dma_map_ops *ops = get_dma_ops(dev);
336
337 BUG_ON(!valid_dma_direction(dir));
338 if (ops->sync_single_for_cpu)
339 ops->sync_single_for_cpu(dev, addr, size, dir);
340 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
341 }
342
343 static inline void dma_sync_single_for_device(struct device *dev,
344 dma_addr_t addr, size_t size,
345 enum dma_data_direction dir)
346 {
347 const struct dma_map_ops *ops = get_dma_ops(dev);
348
349 BUG_ON(!valid_dma_direction(dir));
350 if (ops->sync_single_for_device)
351 ops->sync_single_for_device(dev, addr, size, dir);
352 debug_dma_sync_single_for_device(dev, addr, size, dir);
353 }
354
355 static inline void dma_sync_single_range_for_cpu(struct device *dev,
356 dma_addr_t addr,
357 unsigned long offset,
358 size_t size,
359 enum dma_data_direction dir)
360 {
361 const struct dma_map_ops *ops = get_dma_ops(dev);
362
363 BUG_ON(!valid_dma_direction(dir));
364 if (ops->sync_single_for_cpu)
365 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
366 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
367 }
368
369 static inline void dma_sync_single_range_for_device(struct device *dev,
370 dma_addr_t addr,
371 unsigned long offset,
372 size_t size,
373 enum dma_data_direction dir)
374 {
375 const struct dma_map_ops *ops = get_dma_ops(dev);
376
377 BUG_ON(!valid_dma_direction(dir));
378 if (ops->sync_single_for_device)
379 ops->sync_single_for_device(dev, addr + offset, size, dir);
380 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
381 }
382
383 static inline void
384 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
385 int nelems, enum dma_data_direction dir)
386 {
387 const struct dma_map_ops *ops = get_dma_ops(dev);
388
389 BUG_ON(!valid_dma_direction(dir));
390 if (ops->sync_sg_for_cpu)
391 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
392 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
393 }
394
395 static inline void
396 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
397 int nelems, enum dma_data_direction dir)
398 {
399 const struct dma_map_ops *ops = get_dma_ops(dev);
400
401 BUG_ON(!valid_dma_direction(dir));
402 if (ops->sync_sg_for_device)
403 ops->sync_sg_for_device(dev, sg, nelems, dir);
404 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
405
406 }
407
408 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
409 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
410 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
411 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
412 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
413 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
414
415 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
416 void *cpu_addr, dma_addr_t dma_addr, size_t size);
417
418 void *dma_common_contiguous_remap(struct page *page, size_t size,
419 unsigned long vm_flags,
420 pgprot_t prot, const void *caller);
421
422 void *dma_common_pages_remap(struct page **pages, size_t size,
423 unsigned long vm_flags, pgprot_t prot,
424 const void *caller);
425 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
426
427 /**
428 * dma_mmap_attrs - map a coherent DMA allocation into user space
429 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
430 * @vma: vm_area_struct describing requested user mapping
431 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
432 * @handle: device-view address returned from dma_alloc_attrs
433 * @size: size of memory originally requested in dma_alloc_attrs
434 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
435 *
436 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
437 * into user space. The coherent DMA buffer must not be freed by the
438 * driver until the user space mapping has been released.
439 */
440 static inline int
441 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
442 dma_addr_t dma_addr, size_t size, unsigned long attrs)
443 {
444 const struct dma_map_ops *ops = get_dma_ops(dev);
445 BUG_ON(!ops);
446 if (ops->mmap)
447 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
448 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
449 }
450
451 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
452
453 int
454 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
455 void *cpu_addr, dma_addr_t dma_addr, size_t size);
456
457 static inline int
458 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
459 dma_addr_t dma_addr, size_t size,
460 unsigned long attrs)
461 {
462 const struct dma_map_ops *ops = get_dma_ops(dev);
463 BUG_ON(!ops);
464 if (ops->get_sgtable)
465 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
466 attrs);
467 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
468 }
469
470 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
471
472 #ifndef arch_dma_alloc_attrs
473 #define arch_dma_alloc_attrs(dev, flag) (true)
474 #endif
475
476 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
477 dma_addr_t *dma_handle, gfp_t flag,
478 unsigned long attrs)
479 {
480 const struct dma_map_ops *ops = get_dma_ops(dev);
481 void *cpu_addr;
482
483 BUG_ON(!ops);
484
485 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
486 return cpu_addr;
487
488 if (!arch_dma_alloc_attrs(&dev, &flag))
489 return NULL;
490 if (!ops->alloc)
491 return NULL;
492
493 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
494 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
495 return cpu_addr;
496 }
497
498 static inline void dma_free_attrs(struct device *dev, size_t size,
499 void *cpu_addr, dma_addr_t dma_handle,
500 unsigned long attrs)
501 {
502 const struct dma_map_ops *ops = get_dma_ops(dev);
503
504 BUG_ON(!ops);
505 WARN_ON(irqs_disabled());
506
507 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
508 return;
509
510 if (!ops->free || !cpu_addr)
511 return;
512
513 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
514 ops->free(dev, size, cpu_addr, dma_handle, attrs);
515 }
516
517 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
518 dma_addr_t *dma_handle, gfp_t flag)
519 {
520 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
521 }
522
523 static inline void dma_free_coherent(struct device *dev, size_t size,
524 void *cpu_addr, dma_addr_t dma_handle)
525 {
526 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
527 }
528
529 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
530 dma_addr_t *dma_handle, gfp_t gfp)
531 {
532 return dma_alloc_attrs(dev, size, dma_handle, gfp,
533 DMA_ATTR_NON_CONSISTENT);
534 }
535
536 static inline void dma_free_noncoherent(struct device *dev, size_t size,
537 void *cpu_addr, dma_addr_t dma_handle)
538 {
539 dma_free_attrs(dev, size, cpu_addr, dma_handle,
540 DMA_ATTR_NON_CONSISTENT);
541 }
542
543 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
544 {
545 debug_dma_mapping_error(dev, dma_addr);
546
547 if (get_dma_ops(dev)->mapping_error)
548 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
549
550 #ifdef DMA_ERROR_CODE
551 return dma_addr == DMA_ERROR_CODE;
552 #else
553 return 0;
554 #endif
555 }
556
557 #ifndef HAVE_ARCH_DMA_SUPPORTED
558 static inline int dma_supported(struct device *dev, u64 mask)
559 {
560 const struct dma_map_ops *ops = get_dma_ops(dev);
561
562 if (!ops)
563 return 0;
564 if (!ops->dma_supported)
565 return 1;
566 return ops->dma_supported(dev, mask);
567 }
568 #endif
569
570 #ifndef HAVE_ARCH_DMA_SET_MASK
571 static inline int dma_set_mask(struct device *dev, u64 mask)
572 {
573 const struct dma_map_ops *ops = get_dma_ops(dev);
574
575 if (ops->set_dma_mask)
576 return ops->set_dma_mask(dev, mask);
577
578 if (!dev->dma_mask || !dma_supported(dev, mask))
579 return -EIO;
580 *dev->dma_mask = mask;
581 return 0;
582 }
583 #endif
584
585 static inline u64 dma_get_mask(struct device *dev)
586 {
587 if (dev && dev->dma_mask && *dev->dma_mask)
588 return *dev->dma_mask;
589 return DMA_BIT_MASK(32);
590 }
591
592 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
593 int dma_set_coherent_mask(struct device *dev, u64 mask);
594 #else
595 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
596 {
597 if (!dma_supported(dev, mask))
598 return -EIO;
599 dev->coherent_dma_mask = mask;
600 return 0;
601 }
602 #endif
603
604 /*
605 * Set both the DMA mask and the coherent DMA mask to the same thing.
606 * Note that we don't check the return value from dma_set_coherent_mask()
607 * as the DMA API guarantees that the coherent DMA mask can be set to
608 * the same or smaller than the streaming DMA mask.
609 */
610 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
611 {
612 int rc = dma_set_mask(dev, mask);
613 if (rc == 0)
614 dma_set_coherent_mask(dev, mask);
615 return rc;
616 }
617
618 /*
619 * Similar to the above, except it deals with the case where the device
620 * does not have dev->dma_mask appropriately setup.
621 */
622 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
623 {
624 dev->dma_mask = &dev->coherent_dma_mask;
625 return dma_set_mask_and_coherent(dev, mask);
626 }
627
628 extern u64 dma_get_required_mask(struct device *dev);
629
630 #ifndef arch_setup_dma_ops
631 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
632 u64 size, const struct iommu_ops *iommu,
633 bool coherent) { }
634 #endif
635
636 #ifndef arch_teardown_dma_ops
637 static inline void arch_teardown_dma_ops(struct device *dev) { }
638 #endif
639
640 static inline unsigned int dma_get_max_seg_size(struct device *dev)
641 {
642 if (dev->dma_parms && dev->dma_parms->max_segment_size)
643 return dev->dma_parms->max_segment_size;
644 return SZ_64K;
645 }
646
647 static inline unsigned int dma_set_max_seg_size(struct device *dev,
648 unsigned int size)
649 {
650 if (dev->dma_parms) {
651 dev->dma_parms->max_segment_size = size;
652 return 0;
653 }
654 return -EIO;
655 }
656
657 static inline unsigned long dma_get_seg_boundary(struct device *dev)
658 {
659 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
660 return dev->dma_parms->segment_boundary_mask;
661 return DMA_BIT_MASK(32);
662 }
663
664 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
665 {
666 if (dev->dma_parms) {
667 dev->dma_parms->segment_boundary_mask = mask;
668 return 0;
669 }
670 return -EIO;
671 }
672
673 #ifndef dma_max_pfn
674 static inline unsigned long dma_max_pfn(struct device *dev)
675 {
676 return *dev->dma_mask >> PAGE_SHIFT;
677 }
678 #endif
679
680 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
681 dma_addr_t *dma_handle, gfp_t flag)
682 {
683 void *ret = dma_alloc_coherent(dev, size, dma_handle,
684 flag | __GFP_ZERO);
685 return ret;
686 }
687
688 #ifdef CONFIG_HAS_DMA
689 static inline int dma_get_cache_alignment(void)
690 {
691 #ifdef ARCH_DMA_MINALIGN
692 return ARCH_DMA_MINALIGN;
693 #endif
694 return 1;
695 }
696 #endif
697
698 /* flags for the coherent memory api */
699 #define DMA_MEMORY_MAP 0x01
700 #define DMA_MEMORY_IO 0x02
701 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
702 #define DMA_MEMORY_EXCLUSIVE 0x08
703
704 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
705 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
706 dma_addr_t device_addr, size_t size, int flags);
707 void dma_release_declared_memory(struct device *dev);
708 void *dma_mark_declared_memory_occupied(struct device *dev,
709 dma_addr_t device_addr, size_t size);
710 #else
711 static inline int
712 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
713 dma_addr_t device_addr, size_t size, int flags)
714 {
715 return 0;
716 }
717
718 static inline void
719 dma_release_declared_memory(struct device *dev)
720 {
721 }
722
723 static inline void *
724 dma_mark_declared_memory_occupied(struct device *dev,
725 dma_addr_t device_addr, size_t size)
726 {
727 return ERR_PTR(-EBUSY);
728 }
729 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
730
731 #ifdef CONFIG_HAS_DMA
732 int dma_configure(struct device *dev);
733 void dma_deconfigure(struct device *dev);
734 #else
735 static inline int dma_configure(struct device *dev)
736 {
737 return 0;
738 }
739
740 static inline void dma_deconfigure(struct device *dev) {}
741 #endif
742
743 /*
744 * Managed DMA API
745 */
746 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
747 dma_addr_t *dma_handle, gfp_t gfp);
748 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
749 dma_addr_t dma_handle);
750 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
751 dma_addr_t *dma_handle, gfp_t gfp);
752 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
753 dma_addr_t dma_handle);
754 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
755 extern int dmam_declare_coherent_memory(struct device *dev,
756 phys_addr_t phys_addr,
757 dma_addr_t device_addr, size_t size,
758 int flags);
759 extern void dmam_release_declared_memory(struct device *dev);
760 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
761 static inline int dmam_declare_coherent_memory(struct device *dev,
762 phys_addr_t phys_addr, dma_addr_t device_addr,
763 size_t size, gfp_t gfp)
764 {
765 return 0;
766 }
767
768 static inline void dmam_release_declared_memory(struct device *dev)
769 {
770 }
771 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
772
773 static inline void *dma_alloc_wc(struct device *dev, size_t size,
774 dma_addr_t *dma_addr, gfp_t gfp)
775 {
776 return dma_alloc_attrs(dev, size, dma_addr, gfp,
777 DMA_ATTR_WRITE_COMBINE);
778 }
779 #ifndef dma_alloc_writecombine
780 #define dma_alloc_writecombine dma_alloc_wc
781 #endif
782
783 static inline void dma_free_wc(struct device *dev, size_t size,
784 void *cpu_addr, dma_addr_t dma_addr)
785 {
786 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
787 DMA_ATTR_WRITE_COMBINE);
788 }
789 #ifndef dma_free_writecombine
790 #define dma_free_writecombine dma_free_wc
791 #endif
792
793 static inline int dma_mmap_wc(struct device *dev,
794 struct vm_area_struct *vma,
795 void *cpu_addr, dma_addr_t dma_addr,
796 size_t size)
797 {
798 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
799 DMA_ATTR_WRITE_COMBINE);
800 }
801 #ifndef dma_mmap_writecombine
802 #define dma_mmap_writecombine dma_mmap_wc
803 #endif
804
805 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
806 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
807 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
808 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
809 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
810 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
811 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
812 #else
813 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
814 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
815 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
816 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
817 #define dma_unmap_len(PTR, LEN_NAME) (0)
818 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
819 #endif
820
821 #endif