]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/asm-arm/tlbflush.h | |
3 | * | |
4 | * Copyright (C) 1999-2003 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef _ASMARM_TLBFLUSH_H | |
11 | #define _ASMARM_TLBFLUSH_H | |
12 | ||
0157903e HC |
13 | |
14 | #ifndef CONFIG_MMU | |
15 | ||
16 | #define tlb_flush(tlb) ((void) tlb) | |
17 | ||
fb1c7762 | 18 | #else /* CONFIG_MMU */ |
0157903e | 19 | |
1da177e4 LT |
20 | #include <asm/glue.h> |
21 | ||
22 | #define TLB_V3_PAGE (1 << 0) | |
23 | #define TLB_V4_U_PAGE (1 << 1) | |
24 | #define TLB_V4_D_PAGE (1 << 2) | |
25 | #define TLB_V4_I_PAGE (1 << 3) | |
26 | #define TLB_V6_U_PAGE (1 << 4) | |
27 | #define TLB_V6_D_PAGE (1 << 5) | |
28 | #define TLB_V6_I_PAGE (1 << 6) | |
29 | ||
30 | #define TLB_V3_FULL (1 << 8) | |
31 | #define TLB_V4_U_FULL (1 << 9) | |
32 | #define TLB_V4_D_FULL (1 << 10) | |
33 | #define TLB_V4_I_FULL (1 << 11) | |
34 | #define TLB_V6_U_FULL (1 << 12) | |
35 | #define TLB_V6_D_FULL (1 << 13) | |
36 | #define TLB_V6_I_FULL (1 << 14) | |
37 | ||
38 | #define TLB_V6_U_ASID (1 << 16) | |
39 | #define TLB_V6_D_ASID (1 << 17) | |
40 | #define TLB_V6_I_ASID (1 << 18) | |
41 | ||
99c6dc11 | 42 | #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ |
1da177e4 LT |
43 | #define TLB_DCLEAN (1 << 30) |
44 | #define TLB_WB (1 << 31) | |
45 | ||
46 | /* | |
47 | * MMU TLB Model | |
48 | * ============= | |
49 | * | |
50 | * We have the following to choose from: | |
51 | * v3 - ARMv3 | |
52 | * v4 - ARMv4 without write buffer | |
53 | * v4wb - ARMv4 with write buffer without I TLB flush entry instruction | |
54 | * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction | |
99c6dc11 | 55 | * fr - Feroceon (v4wbi with non-outer-cacheable page table walks) |
1da177e4 LT |
56 | * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction |
57 | */ | |
58 | #undef _TLB | |
59 | #undef MULTI_TLB | |
60 | ||
61 | #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) | |
62 | ||
63 | #ifdef CONFIG_CPU_TLB_V3 | |
64 | # define v3_possible_flags v3_tlb_flags | |
65 | # define v3_always_flags v3_tlb_flags | |
66 | # ifdef _TLB | |
67 | # define MULTI_TLB 1 | |
68 | # else | |
69 | # define _TLB v3 | |
70 | # endif | |
71 | #else | |
72 | # define v3_possible_flags 0 | |
73 | # define v3_always_flags (-1UL) | |
74 | #endif | |
75 | ||
76 | #define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE) | |
77 | ||
78 | #ifdef CONFIG_CPU_TLB_V4WT | |
79 | # define v4_possible_flags v4_tlb_flags | |
80 | # define v4_always_flags v4_tlb_flags | |
81 | # ifdef _TLB | |
82 | # define MULTI_TLB 1 | |
83 | # else | |
84 | # define _TLB v4 | |
85 | # endif | |
86 | #else | |
87 | # define v4_possible_flags 0 | |
88 | # define v4_always_flags (-1UL) | |
89 | #endif | |
90 | ||
91 | #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ | |
92 | TLB_V4_I_FULL | TLB_V4_D_FULL | \ | |
93 | TLB_V4_I_PAGE | TLB_V4_D_PAGE) | |
94 | ||
95 | #ifdef CONFIG_CPU_TLB_V4WBI | |
96 | # define v4wbi_possible_flags v4wbi_tlb_flags | |
97 | # define v4wbi_always_flags v4wbi_tlb_flags | |
98 | # ifdef _TLB | |
99 | # define MULTI_TLB 1 | |
100 | # else | |
101 | # define _TLB v4wbi | |
102 | # endif | |
103 | #else | |
104 | # define v4wbi_possible_flags 0 | |
105 | # define v4wbi_always_flags (-1UL) | |
106 | #endif | |
107 | ||
99c6dc11 LB |
108 | #define fr_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \ |
109 | TLB_V4_I_FULL | TLB_V4_D_FULL | \ | |
110 | TLB_V4_I_PAGE | TLB_V4_D_PAGE) | |
111 | ||
112 | #ifdef CONFIG_CPU_TLB_FEROCEON | |
113 | # define fr_possible_flags fr_tlb_flags | |
114 | # define fr_always_flags fr_tlb_flags | |
115 | # ifdef _TLB | |
116 | # define MULTI_TLB 1 | |
117 | # else | |
118 | # define _TLB v4wbi | |
119 | # endif | |
120 | #else | |
121 | # define fr_possible_flags 0 | |
122 | # define fr_always_flags (-1UL) | |
123 | #endif | |
124 | ||
1da177e4 LT |
125 | #define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \ |
126 | TLB_V4_I_FULL | TLB_V4_D_FULL | \ | |
127 | TLB_V4_D_PAGE) | |
128 | ||
129 | #ifdef CONFIG_CPU_TLB_V4WB | |
130 | # define v4wb_possible_flags v4wb_tlb_flags | |
131 | # define v4wb_always_flags v4wb_tlb_flags | |
132 | # ifdef _TLB | |
133 | # define MULTI_TLB 1 | |
134 | # else | |
135 | # define _TLB v4wb | |
136 | # endif | |
137 | #else | |
138 | # define v4wb_possible_flags 0 | |
139 | # define v4wb_always_flags (-1UL) | |
140 | #endif | |
141 | ||
142 | #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ | |
143 | TLB_V6_I_FULL | TLB_V6_D_FULL | \ | |
144 | TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ | |
145 | TLB_V6_I_ASID | TLB_V6_D_ASID) | |
146 | ||
147 | #ifdef CONFIG_CPU_TLB_V6 | |
148 | # define v6wbi_possible_flags v6wbi_tlb_flags | |
149 | # define v6wbi_always_flags v6wbi_tlb_flags | |
150 | # ifdef _TLB | |
151 | # define MULTI_TLB 1 | |
152 | # else | |
153 | # define _TLB v6wbi | |
154 | # endif | |
155 | #else | |
156 | # define v6wbi_possible_flags 0 | |
157 | # define v6wbi_always_flags (-1UL) | |
158 | #endif | |
159 | ||
2ccdd1e7 CM |
160 | #ifdef CONFIG_CPU_TLB_V7 |
161 | # define v7wbi_possible_flags v6wbi_tlb_flags | |
162 | # define v7wbi_always_flags v6wbi_tlb_flags | |
163 | # ifdef _TLB | |
164 | # define MULTI_TLB 1 | |
165 | # else | |
166 | # define _TLB v7wbi | |
167 | # endif | |
168 | #else | |
169 | # define v7wbi_possible_flags 0 | |
170 | # define v7wbi_always_flags (-1UL) | |
171 | #endif | |
172 | ||
1da177e4 LT |
173 | #ifndef _TLB |
174 | #error Unknown TLB model | |
175 | #endif | |
176 | ||
177 | #ifndef __ASSEMBLY__ | |
178 | ||
e8edc6e0 AD |
179 | #include <linux/sched.h> |
180 | ||
1da177e4 LT |
181 | struct cpu_tlb_fns { |
182 | void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *); | |
183 | void (*flush_kern_range)(unsigned long, unsigned long); | |
184 | unsigned long tlb_flags; | |
185 | }; | |
186 | ||
187 | /* | |
188 | * Select the calling method | |
189 | */ | |
190 | #ifdef MULTI_TLB | |
191 | ||
192 | #define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range | |
193 | #define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range | |
194 | ||
195 | #else | |
196 | ||
197 | #define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range) | |
198 | #define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range) | |
199 | ||
200 | extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); | |
201 | extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); | |
202 | ||
203 | #endif | |
204 | ||
205 | extern struct cpu_tlb_fns cpu_tlb; | |
206 | ||
207 | #define __cpu_tlb_flags cpu_tlb.tlb_flags | |
208 | ||
209 | /* | |
210 | * TLB Management | |
211 | * ============== | |
212 | * | |
213 | * The arch/arm/mm/tlb-*.S files implement these methods. | |
214 | * | |
215 | * The TLB specific code is expected to perform whatever tests it | |
216 | * needs to determine if it should invalidate the TLB for each | |
217 | * call. Start addresses are inclusive and end addresses are | |
218 | * exclusive; it is safe to round these addresses down. | |
219 | * | |
220 | * flush_tlb_all() | |
221 | * | |
222 | * Invalidate the entire TLB. | |
223 | * | |
224 | * flush_tlb_mm(mm) | |
225 | * | |
226 | * Invalidate all TLB entries in a particular address | |
227 | * space. | |
228 | * - mm - mm_struct describing address space | |
229 | * | |
230 | * flush_tlb_range(mm,start,end) | |
231 | * | |
232 | * Invalidate a range of TLB entries in the specified | |
233 | * address space. | |
234 | * - mm - mm_struct describing address space | |
235 | * - start - start address (may not be aligned) | |
236 | * - end - end address (exclusive, may not be aligned) | |
237 | * | |
238 | * flush_tlb_page(vaddr,vma) | |
239 | * | |
240 | * Invalidate the specified page in the specified address range. | |
241 | * - vaddr - virtual address (may not be aligned) | |
242 | * - vma - vma_struct describing address range | |
243 | * | |
244 | * flush_kern_tlb_page(kaddr) | |
245 | * | |
246 | * Invalidate the TLB entry for the specified page. The address | |
247 | * will be in the kernels virtual memory space. Current uses | |
248 | * only require the D-TLB to be invalidated. | |
249 | * - kaddr - Kernel virtual memory address | |
250 | */ | |
251 | ||
252 | /* | |
253 | * We optimise the code below by: | |
254 | * - building a set of TLB flags that might be set in __cpu_tlb_flags | |
255 | * - building a set of TLB flags that will always be set in __cpu_tlb_flags | |
256 | * - if we're going to need __cpu_tlb_flags, access it once and only once | |
257 | * | |
258 | * This allows us to build optimal assembly for the single-CPU type case, | |
259 | * and as close to optimal given the compiler constrants for multi-CPU | |
260 | * case. We could do better for the multi-CPU case if the compiler | |
261 | * implemented the "%?" method, but this has been discontinued due to too | |
262 | * many people getting it wrong. | |
263 | */ | |
264 | #define possible_tlb_flags (v3_possible_flags | \ | |
265 | v4_possible_flags | \ | |
266 | v4wbi_possible_flags | \ | |
99c6dc11 | 267 | fr_possible_flags | \ |
1da177e4 LT |
268 | v4wb_possible_flags | \ |
269 | v6wbi_possible_flags) | |
270 | ||
271 | #define always_tlb_flags (v3_always_flags & \ | |
272 | v4_always_flags & \ | |
273 | v4wbi_always_flags & \ | |
99c6dc11 | 274 | fr_always_flags & \ |
1da177e4 LT |
275 | v4wb_always_flags & \ |
276 | v6wbi_always_flags) | |
277 | ||
278 | #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) | |
279 | ||
603fff54 | 280 | static inline void local_flush_tlb_all(void) |
1da177e4 LT |
281 | { |
282 | const int zero = 0; | |
283 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
284 | ||
285 | if (tlb_flag(TLB_WB)) | |
e6a5d66f | 286 | dsb(); |
1da177e4 LT |
287 | |
288 | if (tlb_flag(TLB_V3_FULL)) | |
6a39dd62 | 289 | asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); |
1da177e4 | 290 | if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) |
6a39dd62 | 291 | asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); |
1da177e4 | 292 | if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) |
6a39dd62 | 293 | asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); |
1da177e4 | 294 | if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) |
6a39dd62 | 295 | asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); |
e6a5d66f CM |
296 | |
297 | if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL | | |
298 | TLB_V6_I_PAGE | TLB_V6_D_PAGE | | |
299 | TLB_V6_I_ASID | TLB_V6_D_ASID)) { | |
300 | /* flush the branch target cache */ | |
301 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); | |
302 | dsb(); | |
303 | isb(); | |
304 | } | |
1da177e4 LT |
305 | } |
306 | ||
603fff54 | 307 | static inline void local_flush_tlb_mm(struct mm_struct *mm) |
1da177e4 LT |
308 | { |
309 | const int zero = 0; | |
310 | const int asid = ASID(mm); | |
311 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
312 | ||
313 | if (tlb_flag(TLB_WB)) | |
e6a5d66f | 314 | dsb(); |
1da177e4 LT |
315 | |
316 | if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { | |
317 | if (tlb_flag(TLB_V3_FULL)) | |
6a39dd62 | 318 | asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); |
1da177e4 | 319 | if (tlb_flag(TLB_V4_U_FULL)) |
6a39dd62 | 320 | asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); |
1da177e4 | 321 | if (tlb_flag(TLB_V4_D_FULL)) |
6a39dd62 | 322 | asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); |
1da177e4 | 323 | if (tlb_flag(TLB_V4_I_FULL)) |
6a39dd62 | 324 | asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); |
1da177e4 LT |
325 | } |
326 | ||
327 | if (tlb_flag(TLB_V6_U_ASID)) | |
6a39dd62 | 328 | asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); |
1da177e4 | 329 | if (tlb_flag(TLB_V6_D_ASID)) |
6a39dd62 | 330 | asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); |
1da177e4 | 331 | if (tlb_flag(TLB_V6_I_ASID)) |
6a39dd62 | 332 | asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); |
e6a5d66f CM |
333 | |
334 | if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL | | |
335 | TLB_V6_I_PAGE | TLB_V6_D_PAGE | | |
336 | TLB_V6_I_ASID | TLB_V6_D_ASID)) { | |
337 | /* flush the branch target cache */ | |
338 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); | |
339 | dsb(); | |
340 | } | |
1da177e4 LT |
341 | } |
342 | ||
343 | static inline void | |
603fff54 | 344 | local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
1da177e4 LT |
345 | { |
346 | const int zero = 0; | |
347 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
348 | ||
349 | uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); | |
350 | ||
351 | if (tlb_flag(TLB_WB)) | |
e6a5d66f | 352 | dsb(); |
1da177e4 LT |
353 | |
354 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | |
355 | if (tlb_flag(TLB_V3_PAGE)) | |
6a39dd62 | 356 | asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); |
1da177e4 | 357 | if (tlb_flag(TLB_V4_U_PAGE)) |
6a39dd62 | 358 | asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); |
1da177e4 | 359 | if (tlb_flag(TLB_V4_D_PAGE)) |
6a39dd62 | 360 | asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); |
1da177e4 | 361 | if (tlb_flag(TLB_V4_I_PAGE)) |
6a39dd62 | 362 | asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); |
1da177e4 | 363 | if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) |
6a39dd62 | 364 | asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); |
1da177e4 LT |
365 | } |
366 | ||
367 | if (tlb_flag(TLB_V6_U_PAGE)) | |
6a39dd62 | 368 | asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); |
1da177e4 | 369 | if (tlb_flag(TLB_V6_D_PAGE)) |
6a39dd62 | 370 | asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); |
1da177e4 | 371 | if (tlb_flag(TLB_V6_I_PAGE)) |
6a39dd62 | 372 | asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); |
e6a5d66f CM |
373 | |
374 | if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL | | |
375 | TLB_V6_I_PAGE | TLB_V6_D_PAGE | | |
376 | TLB_V6_I_ASID | TLB_V6_D_ASID)) { | |
377 | /* flush the branch target cache */ | |
378 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); | |
379 | dsb(); | |
380 | } | |
1da177e4 LT |
381 | } |
382 | ||
603fff54 | 383 | static inline void local_flush_tlb_kernel_page(unsigned long kaddr) |
1da177e4 LT |
384 | { |
385 | const int zero = 0; | |
386 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
387 | ||
388 | kaddr &= PAGE_MASK; | |
389 | ||
390 | if (tlb_flag(TLB_WB)) | |
e6a5d66f | 391 | dsb(); |
1da177e4 LT |
392 | |
393 | if (tlb_flag(TLB_V3_PAGE)) | |
6a39dd62 | 394 | asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); |
1da177e4 | 395 | if (tlb_flag(TLB_V4_U_PAGE)) |
6a39dd62 | 396 | asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); |
1da177e4 | 397 | if (tlb_flag(TLB_V4_D_PAGE)) |
6a39dd62 | 398 | asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); |
1da177e4 | 399 | if (tlb_flag(TLB_V4_I_PAGE)) |
6a39dd62 | 400 | asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); |
1da177e4 | 401 | if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) |
6a39dd62 | 402 | asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); |
1da177e4 LT |
403 | |
404 | if (tlb_flag(TLB_V6_U_PAGE)) | |
6a39dd62 | 405 | asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); |
1da177e4 | 406 | if (tlb_flag(TLB_V6_D_PAGE)) |
6a39dd62 | 407 | asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); |
1da177e4 | 408 | if (tlb_flag(TLB_V6_I_PAGE)) |
6a39dd62 | 409 | asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); |
6a0e2430 | 410 | |
e6a5d66f CM |
411 | if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL | |
412 | TLB_V6_I_PAGE | TLB_V6_D_PAGE | | |
413 | TLB_V6_I_ASID | TLB_V6_D_ASID)) { | |
414 | /* flush the branch target cache */ | |
415 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); | |
416 | dsb(); | |
417 | isb(); | |
418 | } | |
1da177e4 LT |
419 | } |
420 | ||
421 | /* | |
422 | * flush_pmd_entry | |
423 | * | |
424 | * Flush a PMD entry (word aligned, or double-word aligned) to | |
425 | * RAM if the TLB for the CPU we are running on requires this. | |
426 | * This is typically used when we are creating PMD entries. | |
427 | * | |
428 | * clean_pmd_entry | |
429 | * | |
430 | * Clean (but don't drain the write buffer) if the CPU requires | |
431 | * these operations. This is typically used when we are removing | |
432 | * PMD entries. | |
433 | */ | |
434 | static inline void flush_pmd_entry(pmd_t *pmd) | |
435 | { | |
1da177e4 LT |
436 | const unsigned int __tlb_flag = __cpu_tlb_flags; |
437 | ||
438 | if (tlb_flag(TLB_DCLEAN)) | |
6a39dd62 DJ |
439 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" |
440 | : : "r" (pmd) : "cc"); | |
99c6dc11 LB |
441 | |
442 | if (tlb_flag(TLB_L2CLEAN_FR)) | |
443 | asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" | |
444 | : : "r" (pmd) : "cc"); | |
445 | ||
1da177e4 | 446 | if (tlb_flag(TLB_WB)) |
e6a5d66f | 447 | dsb(); |
1da177e4 LT |
448 | } |
449 | ||
450 | static inline void clean_pmd_entry(pmd_t *pmd) | |
451 | { | |
452 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
453 | ||
454 | if (tlb_flag(TLB_DCLEAN)) | |
6a39dd62 DJ |
455 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" |
456 | : : "r" (pmd) : "cc"); | |
99c6dc11 LB |
457 | |
458 | if (tlb_flag(TLB_L2CLEAN_FR)) | |
459 | asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" | |
460 | : : "r" (pmd) : "cc"); | |
1da177e4 LT |
461 | } |
462 | ||
463 | #undef tlb_flag | |
464 | #undef always_tlb_flags | |
465 | #undef possible_tlb_flags | |
466 | ||
467 | /* | |
468 | * Convert calls to our calling convention. | |
469 | */ | |
603fff54 RK |
470 | #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) |
471 | #define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) | |
472 | ||
473 | #ifndef CONFIG_SMP | |
474 | #define flush_tlb_all local_flush_tlb_all | |
475 | #define flush_tlb_mm local_flush_tlb_mm | |
476 | #define flush_tlb_page local_flush_tlb_page | |
477 | #define flush_tlb_kernel_page local_flush_tlb_kernel_page | |
478 | #define flush_tlb_range local_flush_tlb_range | |
479 | #define flush_tlb_kernel_range local_flush_tlb_kernel_range | |
480 | #else | |
481 | extern void flush_tlb_all(void); | |
482 | extern void flush_tlb_mm(struct mm_struct *mm); | |
483 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); | |
484 | extern void flush_tlb_kernel_page(unsigned long kaddr); | |
485 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | |
486 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | |
487 | #endif | |
1da177e4 LT |
488 | |
489 | /* | |
490 | * if PG_dcache_dirty is set for the page, we need to ensure that any | |
491 | * cache entries for the kernels virtual memory range are written | |
492 | * back to the page. | |
493 | */ | |
494 | extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); | |
495 | ||
1da177e4 LT |
496 | #endif |
497 | ||
0157903e HC |
498 | #endif /* CONFIG_MMU */ |
499 | ||
1da177e4 | 500 | #endif |