1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_NONCOHERENT_H
3 #define _LINUX_DMA_NONCOHERENT_H 1
5 #include <linux/dma-mapping.h>
6 #include <linux/pgtable.h>
8 #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
9 #include <asm/dma-coherence.h>
10 #elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
11 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
12 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
13 static inline bool dev_is_dma_coherent(struct device
*dev
)
15 return dev
->dma_coherent
;
18 static inline bool dev_is_dma_coherent(struct device
*dev
)
22 #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
25 * Check if an allocation needs to be marked uncached to be coherent.
27 static __always_inline
bool dma_alloc_need_uncached(struct device
*dev
,
30 if (dev_is_dma_coherent(dev
))
32 if (attrs
& DMA_ATTR_NO_KERNEL_MAPPING
)
34 if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC
) &&
35 (attrs
& DMA_ATTR_NON_CONSISTENT
))
40 void *arch_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
41 gfp_t gfp
, unsigned long attrs
);
42 void arch_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
43 dma_addr_t dma_addr
, unsigned long attrs
);
47 * Page protection so that devices that can't snoop CPU caches can use the
48 * memory coherently. We default to pgprot_noncached which is usually used
49 * for ioremap as a safe bet, but architectures can override this with less
50 * strict semantics if possible.
52 #ifndef pgprot_dmacoherent
53 #define pgprot_dmacoherent(prot) pgprot_noncached(prot)
56 pgprot_t
dma_pgprot(struct device
*dev
, pgprot_t prot
, unsigned long attrs
);
58 static inline pgprot_t
dma_pgprot(struct device
*dev
, pgprot_t prot
,
61 return prot
; /* no protection bits supported without page tables */
63 #endif /* CONFIG_MMU */
65 #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
66 void arch_dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
67 enum dma_data_direction direction
);
69 static inline void arch_dma_cache_sync(struct device
*dev
, void *vaddr
,
70 size_t size
, enum dma_data_direction direction
)
73 #endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
75 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
76 void arch_sync_dma_for_device(phys_addr_t paddr
, size_t size
,
77 enum dma_data_direction dir
);
79 static inline void arch_sync_dma_for_device(phys_addr_t paddr
, size_t size
,
80 enum dma_data_direction dir
)
83 #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
85 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
86 void arch_sync_dma_for_cpu(phys_addr_t paddr
, size_t size
,
87 enum dma_data_direction dir
);
89 static inline void arch_sync_dma_for_cpu(phys_addr_t paddr
, size_t size
,
90 enum dma_data_direction dir
)
93 #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
95 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
96 void arch_sync_dma_for_cpu_all(void);
98 static inline void arch_sync_dma_for_cpu_all(void)
101 #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
103 #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
104 void arch_dma_prep_coherent(struct page
*page
, size_t size
);
106 static inline void arch_dma_prep_coherent(struct page
*page
, size_t size
)
109 #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
111 void *arch_dma_set_uncached(void *addr
, size_t size
);
112 void arch_dma_clear_uncached(void *addr
, size_t size
);
114 #endif /* _LINUX_DMA_NONCOHERENT_H */