]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
92a73bd2 CH |
2 | #ifndef _ASM_GENERIC_CACHEFLUSH_H |
3 | #define _ASM_GENERIC_CACHEFLUSH_H | |
5c01b46b | 4 | |
c296d4dc QC |
5 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 |
6 | ||
5c01b46b AB |
7 | /* |
8 | * The cache doesn't need to be flushed when TLB entries change when | |
9 | * the cache is mapped to physical memory, not virtual memory | |
10 | */ | |
4f0bd808 | 11 | #ifndef flush_cache_all |
c296d4dc QC |
12 | static inline void flush_cache_all(void) |
13 | { | |
14 | } | |
4f0bd808 | 15 | #endif |
c296d4dc | 16 | |
4f0bd808 | 17 | #ifndef flush_cache_mm |
c296d4dc QC |
18 | static inline void flush_cache_mm(struct mm_struct *mm) |
19 | { | |
20 | } | |
4f0bd808 | 21 | #endif |
c296d4dc | 22 | |
4f0bd808 | 23 | #ifndef flush_cache_dup_mm |
c296d4dc QC |
24 | static inline void flush_cache_dup_mm(struct mm_struct *mm) |
25 | { | |
26 | } | |
4f0bd808 | 27 | #endif |
c296d4dc | 28 | |
4f0bd808 | 29 | #ifndef flush_cache_range |
c296d4dc QC |
30 | static inline void flush_cache_range(struct vm_area_struct *vma, |
31 | unsigned long start, | |
32 | unsigned long end) | |
33 | { | |
34 | } | |
4f0bd808 | 35 | #endif |
c296d4dc | 36 | |
4f0bd808 | 37 | #ifndef flush_cache_page |
c296d4dc QC |
38 | static inline void flush_cache_page(struct vm_area_struct *vma, |
39 | unsigned long vmaddr, | |
40 | unsigned long pfn) | |
41 | { | |
42 | } | |
4f0bd808 | 43 | #endif |
c296d4dc | 44 | |
4f0bd808 | 45 | #ifndef flush_dcache_page |
c296d4dc QC |
46 | static inline void flush_dcache_page(struct page *page) |
47 | { | |
48 | } | |
4f0bd808 | 49 | #endif |
c296d4dc | 50 | |
4f0bd808 | 51 | #ifndef flush_dcache_mmap_lock |
c296d4dc QC |
52 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) |
53 | { | |
54 | } | |
4f0bd808 | 55 | #endif |
c296d4dc | 56 | |
4f0bd808 | 57 | #ifndef flush_dcache_mmap_unlock |
c296d4dc QC |
58 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) |
59 | { | |
60 | } | |
4f0bd808 | 61 | #endif |
c296d4dc | 62 | |
4f0bd808 | 63 | #ifndef flush_icache_range |
c296d4dc QC |
64 | static inline void flush_icache_range(unsigned long start, unsigned long end) |
65 | { | |
66 | } | |
4f0bd808 | 67 | #endif |
c296d4dc | 68 | |
4f0bd808 | 69 | #ifndef flush_icache_page |
c296d4dc QC |
70 | static inline void flush_icache_page(struct vm_area_struct *vma, |
71 | struct page *page) | |
72 | { | |
73 | } | |
4f0bd808 | 74 | #endif |
c296d4dc | 75 | |
4f0bd808 | 76 | #ifndef flush_icache_user_range |
c296d4dc QC |
77 | static inline void flush_icache_user_range(struct vm_area_struct *vma, |
78 | struct page *page, | |
79 | unsigned long addr, int len) | |
80 | { | |
81 | } | |
4f0bd808 | 82 | #endif |
c296d4dc | 83 | |
4f0bd808 | 84 | #ifndef flush_cache_vmap |
c296d4dc QC |
85 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
86 | { | |
87 | } | |
4f0bd808 | 88 | #endif |
c296d4dc | 89 | |
4f0bd808 | 90 | #ifndef flush_cache_vunmap |
c296d4dc QC |
91 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) |
92 | { | |
93 | } | |
4f0bd808 | 94 | #endif |
5c01b46b | 95 | |
4f0bd808 MR |
96 | #ifndef copy_to_user_page |
97 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | |
f68aa5b4 MF |
98 | do { \ |
99 | memcpy(dst, src, len); \ | |
100 | flush_icache_user_range(vma, page, vaddr, len); \ | |
101 | } while (0) | |
4f0bd808 MR |
102 | #endif |
103 | ||
104 | #ifndef copy_from_user_page | |
5c01b46b AB |
105 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
106 | memcpy(dst, src, len) | |
4f0bd808 | 107 | #endif |
5c01b46b | 108 | |
92a73bd2 | 109 | #endif /* _ASM_GENERIC_CACHEFLUSH_H */ |