]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/powerpc/include/asm/cacheflush.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 #ifndef _ASM_POWERPC_CACHEFLUSH_H
5 #define _ASM_POWERPC_CACHEFLUSH_H
8 #include <asm/cputable.h>
9 #include <asm/cpu_has_feature.h>
12 * This flag is used to indicate that the page pointed to by a pte is clean
13 * and does not require cleaning before returning it to the user.
15 #define PG_dcache_clean PG_arch_1
17 #ifdef CONFIG_PPC_BOOK3S_64
19 * Book3s has no ptesync after setting a pte, so without this ptesync it's
20 * possible for a kernel virtual mapping access to return a spurious fault
21 * if it's accessed right after the pte is set. The page fault handler does
22 * not expect this type of fault. flush_cache_vmap is not exactly the right
23 * place to put this, but it seems to work well enough.
25 static inline void flush_cache_vmap(unsigned long start
, unsigned long end
)
27 asm volatile("ptesync" ::: "memory");
29 #define flush_cache_vmap flush_cache_vmap
30 #endif /* CONFIG_PPC_BOOK3S_64 */
32 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
34 * This is called when a page has been modified by the kernel.
35 * It just marks the page as not i-cache clean. We do the i-cache
36 * flush later when the page is given to a user process, if necessary.
38 static inline void flush_dcache_page(struct page
*page
)
40 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
42 /* avoid an atomic op if possible */
43 if (test_bit(PG_dcache_clean
, &page
->flags
))
44 clear_bit(PG_dcache_clean
, &page
->flags
);
47 void flush_icache_range(unsigned long start
, unsigned long stop
);
48 #define flush_icache_range flush_icache_range
50 void flush_icache_user_page(struct vm_area_struct
*vma
, struct page
*page
,
51 unsigned long addr
, int len
);
52 #define flush_icache_user_page flush_icache_user_page
54 void flush_dcache_icache_page(struct page
*page
);
57 * flush_dcache_range(): Write any modified data cache blocks out to memory and
58 * invalidate them. Does not invalidate the corresponding instruction cache
61 * @start: the start address
62 * @stop: the stop address (exclusive)
64 static inline void flush_dcache_range(unsigned long start
, unsigned long stop
)
66 unsigned long shift
= l1_dcache_shift();
67 unsigned long bytes
= l1_dcache_bytes();
68 void *addr
= (void *)(start
& ~(bytes
- 1));
69 unsigned long size
= stop
- (unsigned long)addr
+ (bytes
- 1);
72 if (IS_ENABLED(CONFIG_PPC64
))
75 for (i
= 0; i
< size
>> shift
; i
++, addr
+= bytes
)
82 * Write any modified data cache blocks out to memory.
83 * Does not invalidate the corresponding cache lines (especially for
84 * any corresponding instruction cache).
86 static inline void clean_dcache_range(unsigned long start
, unsigned long stop
)
88 unsigned long shift
= l1_dcache_shift();
89 unsigned long bytes
= l1_dcache_bytes();
90 void *addr
= (void *)(start
& ~(bytes
- 1));
91 unsigned long size
= stop
- (unsigned long)addr
+ (bytes
- 1);
94 for (i
= 0; i
< size
>> shift
; i
++, addr
+= bytes
)
100 * Like above, but invalidate the D-cache. This is used by the 8xx
101 * to invalidate the cache so the PPC core doesn't get stale data
102 * from the CPM (no cache snooping here :-).
104 static inline void invalidate_dcache_range(unsigned long start
,
107 unsigned long shift
= l1_dcache_shift();
108 unsigned long bytes
= l1_dcache_bytes();
109 void *addr
= (void *)(start
& ~(bytes
- 1));
110 unsigned long size
= stop
- (unsigned long)addr
+ (bytes
- 1);
113 for (i
= 0; i
< size
>> shift
; i
++, addr
+= bytes
)
119 static inline void flush_instruction_cache(void)
121 iccci((void *)KERNELBASE
);
125 void flush_instruction_cache(void);
128 #include <asm-generic/cacheflush.h>
130 #endif /* _ASM_POWERPC_CACHEFLUSH_H */