]>
Commit | Line | Data |
---|---|---|
10c9c10c G |
1 | /* |
2 | * linux/arch/unicore32/include/asm/tlbflush.h | |
3 | * | |
4 | * Code specific to PKUnity SoC and UniCore ISA | |
5 | * | |
6 | * Copyright (C) 2001-2010 GUAN Xue-tao | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | #ifndef __UNICORE_TLBFLUSH_H__ | |
13 | #define __UNICORE_TLBFLUSH_H__ | |
14 | ||
15 | #ifndef __ASSEMBLY__ | |
16 | ||
17 | #include <linux/sched.h> | |
18 | ||
19 | extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, | |
20 | struct vm_area_struct *); | |
21 | extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); | |
22 | ||
23 | /* | |
24 | * TLB Management | |
25 | * ============== | |
26 | * | |
27 | * The arch/unicore/mm/tlb-*.S files implement these methods. | |
28 | * | |
29 | * The TLB specific code is expected to perform whatever tests it | |
30 | * needs to determine if it should invalidate the TLB for each | |
31 | * call. Start addresses are inclusive and end addresses are | |
32 | * exclusive; it is safe to round these addresses down. | |
33 | * | |
34 | * flush_tlb_all() | |
35 | * | |
36 | * Invalidate the entire TLB. | |
37 | * | |
38 | * flush_tlb_mm(mm) | |
39 | * | |
40 | * Invalidate all TLB entries in a particular address | |
41 | * space. | |
42 | * - mm - mm_struct describing address space | |
43 | * | |
44 | * flush_tlb_range(mm,start,end) | |
45 | * | |
46 | * Invalidate a range of TLB entries in the specified | |
47 | * address space. | |
48 | * - mm - mm_struct describing address space | |
49 | * - start - start address (may not be aligned) | |
50 | * - end - end address (exclusive, may not be aligned) | |
51 | * | |
52 | * flush_tlb_page(vaddr,vma) | |
53 | * | |
54 | * Invalidate the specified page in the specified address range. | |
55 | * - vaddr - virtual address (may not be aligned) | |
56 | * - vma - vma_struct describing address range | |
57 | * | |
58 | * flush_kern_tlb_page(kaddr) | |
59 | * | |
60 | * Invalidate the TLB entry for the specified page. The address | |
61 | * will be in the kernels virtual memory space. Current uses | |
62 | * only require the D-TLB to be invalidated. | |
63 | * - kaddr - Kernel virtual memory address | |
64 | */ | |
65 | ||
66 | static inline void local_flush_tlb_all(void) | |
67 | { | |
68 | const int zero = 0; | |
69 | ||
70 | /* TLB invalidate all */ | |
71 | asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop" | |
72 | : : "r" (zero) : "cc"); | |
73 | } | |
74 | ||
75 | static inline void local_flush_tlb_mm(struct mm_struct *mm) | |
76 | { | |
77 | const int zero = 0; | |
78 | ||
79 | if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { | |
80 | /* TLB invalidate all */ | |
81 | asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop" | |
82 | : : "r" (zero) : "cc"); | |
83 | } | |
84 | put_cpu(); | |
85 | } | |
86 | ||
87 | static inline void | |
88 | local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |
89 | { | |
90 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | |
91 | #ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE | |
92 | /* iTLB invalidate page */ | |
93 | asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop" | |
94 | : : "r" (uaddr & PAGE_MASK) : "cc"); | |
95 | /* dTLB invalidate page */ | |
96 | asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop" | |
97 | : : "r" (uaddr & PAGE_MASK) : "cc"); | |
98 | #else | |
99 | /* TLB invalidate all */ | |
100 | asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop" | |
101 | : : "r" (uaddr & PAGE_MASK) : "cc"); | |
102 | #endif | |
103 | } | |
104 | } | |
105 | ||
106 | static inline void local_flush_tlb_kernel_page(unsigned long kaddr) | |
107 | { | |
108 | #ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE | |
109 | /* iTLB invalidate page */ | |
110 | asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop" | |
111 | : : "r" (kaddr & PAGE_MASK) : "cc"); | |
112 | /* dTLB invalidate page */ | |
113 | asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop" | |
114 | : : "r" (kaddr & PAGE_MASK) : "cc"); | |
115 | #else | |
116 | /* TLB invalidate all */ | |
117 | asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop" | |
118 | : : "r" (kaddr & PAGE_MASK) : "cc"); | |
119 | #endif | |
120 | } | |
121 | ||
122 | /* | |
123 | * flush_pmd_entry | |
124 | * | |
125 | * Flush a PMD entry (word aligned, or double-word aligned) to | |
126 | * RAM if the TLB for the CPU we are running on requires this. | |
127 | * This is typically used when we are creating PMD entries. | |
128 | * | |
129 | * clean_pmd_entry | |
130 | * | |
131 | * Clean (but don't drain the write buffer) if the CPU requires | |
132 | * these operations. This is typically used when we are removing | |
133 | * PMD entries. | |
134 | */ | |
135 | static inline void flush_pmd_entry(pmd_t *pmd) | |
136 | { | |
137 | #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE | |
138 | /* flush dcache line, see dcacheline_flush in proc-macros.S */ | |
139 | asm("mov r1, %0 << #20\n" | |
140 | "ldw r2, =_stext\n" | |
141 | "add r2, r2, r1 >> #20\n" | |
142 | "ldw r1, [r2+], #0x0000\n" | |
143 | "ldw r1, [r2+], #0x1000\n" | |
144 | "ldw r1, [r2+], #0x2000\n" | |
145 | "ldw r1, [r2+], #0x3000\n" | |
146 | : : "r" (pmd) : "r1", "r2"); | |
147 | #else | |
148 | /* flush dcache all */ | |
149 | asm("movc p0.c5, %0, #14; nop; nop; nop; nop; nop; nop; nop; nop" | |
150 | : : "r" (pmd) : "cc"); | |
151 | #endif | |
152 | } | |
153 | ||
154 | static inline void clean_pmd_entry(pmd_t *pmd) | |
155 | { | |
156 | #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE | |
157 | /* clean dcache line */ | |
158 | asm("movc p0.c5, %0, #11; nop; nop; nop; nop; nop; nop; nop; nop" | |
159 | : : "r" (__pa(pmd) & ~(L1_CACHE_BYTES - 1)) : "cc"); | |
160 | #else | |
161 | /* clean dcache all */ | |
162 | asm("movc p0.c5, %0, #10; nop; nop; nop; nop; nop; nop; nop; nop" | |
163 | : : "r" (pmd) : "cc"); | |
164 | #endif | |
165 | } | |
166 | ||
167 | /* | |
168 | * Convert calls to our calling convention. | |
169 | */ | |
170 | #define local_flush_tlb_range(vma, start, end) \ | |
171 | __cpu_flush_user_tlb_range(start, end, vma) | |
172 | #define local_flush_tlb_kernel_range(s, e) \ | |
173 | __cpu_flush_kern_tlb_range(s, e) | |
174 | ||
175 | #define flush_tlb_all local_flush_tlb_all | |
176 | #define flush_tlb_mm local_flush_tlb_mm | |
177 | #define flush_tlb_page local_flush_tlb_page | |
178 | #define flush_tlb_kernel_page local_flush_tlb_kernel_page | |
179 | #define flush_tlb_range local_flush_tlb_range | |
180 | #define flush_tlb_kernel_range local_flush_tlb_kernel_range | |
181 | ||
182 | /* | |
183 | * if PG_dcache_clean is not set for the page, we need to ensure that any | |
184 | * cache entries for the kernels virtual memory range are written | |
185 | * back to the page. | |
186 | */ | |
187 | extern void update_mmu_cache(struct vm_area_struct *vma, | |
188 | unsigned long addr, pte_t *ptep); | |
189 | ||
190 | extern void do_bad_area(unsigned long addr, unsigned int fsr, | |
191 | struct pt_regs *regs); | |
192 | ||
193 | #endif | |
194 | ||
195 | #endif |