]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_TLBFLUSH_H |
2 | #define _ASM_X86_TLBFLUSH_H | |
d291cf83 TG |
3 | |
4 | #include <linux/mm.h> | |
5 | #include <linux/sched.h> | |
6 | ||
7 | #include <asm/processor.h> | |
cd4d09ec | 8 | #include <asm/cpufeature.h> |
f05e798a | 9 | #include <asm/special_insns.h> |
ce4a4e56 | 10 | #include <asm/smp.h> |
d291cf83 | 11 | |
060a402a AL |
12 | static inline void __invpcid(unsigned long pcid, unsigned long addr, |
13 | unsigned long type) | |
14 | { | |
e2c7698c | 15 | struct { u64 d[2]; } desc = { { pcid, addr } }; |
060a402a AL |
16 | |
17 | /* | |
18 | * The memory clobber is because the whole point is to invalidate | |
19 | * stale TLB entries and, especially if we're flushing global | |
20 | * mappings, we don't want the compiler to reorder any subsequent | |
21 | * memory accesses before the TLB flush. | |
22 | * | |
23 | * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and | |
24 | * invpcid (%rcx), %rax in long mode. | |
25 | */ | |
26 | asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" | |
e2c7698c | 27 | : : "m" (desc), "a" (type), "c" (&desc) : "memory"); |
060a402a AL |
28 | } |
29 | ||
30 | #define INVPCID_TYPE_INDIV_ADDR 0 | |
31 | #define INVPCID_TYPE_SINGLE_CTXT 1 | |
32 | #define INVPCID_TYPE_ALL_INCL_GLOBAL 2 | |
33 | #define INVPCID_TYPE_ALL_NON_GLOBAL 3 | |
34 | ||
35 | /* Flush all mappings for a given pcid and addr, not including globals. */ | |
36 | static inline void invpcid_flush_one(unsigned long pcid, | |
37 | unsigned long addr) | |
38 | { | |
39 | __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); | |
40 | } | |
41 | ||
42 | /* Flush all mappings for a given PCID, not including globals. */ | |
43 | static inline void invpcid_flush_single_context(unsigned long pcid) | |
44 | { | |
45 | __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); | |
46 | } | |
47 | ||
48 | /* Flush all mappings, including globals, for all PCIDs. */ | |
49 | static inline void invpcid_flush_all(void) | |
50 | { | |
51 | __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); | |
52 | } | |
53 | ||
54 | /* Flush all mappings for all PCIDs except globals. */ | |
55 | static inline void invpcid_flush_all_nonglobals(void) | |
56 | { | |
57 | __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); | |
58 | } | |
59 | ||
f39681ed AL |
60 | static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) |
61 | { | |
62 | u64 new_tlb_gen; | |
63 | ||
64 | /* | |
65 | * Bump the generation count. This also serves as a full barrier | |
66 | * that synchronizes with switch_mm(): callers are required to order | |
67 | * their read of mm_cpumask after their writes to the paging | |
68 | * structures. | |
69 | */ | |
70 | smp_mb__before_atomic(); | |
71 | new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen); | |
72 | smp_mb__after_atomic(); | |
73 | ||
74 | return new_tlb_gen; | |
75 | } | |
76 | ||
d291cf83 TG |
77 | #ifdef CONFIG_PARAVIRT |
78 | #include <asm/paravirt.h> | |
79 | #else | |
80 | #define __flush_tlb() __native_flush_tlb() | |
81 | #define __flush_tlb_global() __native_flush_tlb_global() | |
82 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | |
83 | #endif | |
84 | ||
4e57b946 AL |
85 | static inline bool tlb_defer_switch_to_init_mm(void) |
86 | { | |
7ac7f2c3 AL |
87 | /* |
88 | * If we have PCID, then switching to init_mm is reasonably | |
89 | * fast. If we don't have PCID, then switching to init_mm is | |
90 | * quite slow, so we try to defer it in the hopes that we can | |
91 | * avoid it entirely. The latter approach runs the risk of | |
92 | * receiving otherwise unnecessary IPIs. | |
93 | * | |
94 | * This choice is just a heuristic. The tlb code can handle this | |
95 | * function returning true or false regardless of whether we have | |
96 | * PCID. | |
97 | */ | |
98 | return !static_cpu_has(X86_FEATURE_PCID); | |
4e57b946 | 99 | } |
b956575b | 100 | |
10af6235 AL |
101 | /* |
102 | * 6 because 6 should be plenty and struct tlb_state will fit in | |
103 | * two cache lines. | |
104 | */ | |
105 | #define TLB_NR_DYN_ASIDS 6 | |
106 | ||
b0579ade AL |
107 | struct tlb_context { |
108 | u64 ctx_id; | |
109 | u64 tlb_gen; | |
110 | }; | |
111 | ||
1e02ce4c | 112 | struct tlb_state { |
3d28ebce AL |
113 | /* |
114 | * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts | |
115 | * are on. This means that it may not match current->active_mm, | |
116 | * which will contain the previous user mm when we're in lazy TLB | |
117 | * mode even if we've already switched back to swapper_pg_dir. | |
118 | */ | |
119 | struct mm_struct *loaded_mm; | |
10af6235 AL |
120 | u16 loaded_mm_asid; |
121 | u16 next_asid; | |
1e02ce4c | 122 | |
b956575b AL |
123 | /* |
124 | * We can be in one of several states: | |
125 | * | |
126 | * - Actively using an mm. Our CPU's bit will be set in | |
127 | * mm_cpumask(loaded_mm) and is_lazy == false; | |
128 | * | |
129 | * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit | |
130 | * will not be set in mm_cpumask(&init_mm) and is_lazy == false. | |
131 | * | |
132 | * - Lazily using a real mm. loaded_mm != &init_mm, our bit | |
133 | * is set in mm_cpumask(loaded_mm), but is_lazy == true. | |
134 | * We're heuristically guessing that the CR3 load we | |
135 | * skipped more than makes up for the overhead added by | |
136 | * lazy mode. | |
137 | */ | |
138 | bool is_lazy; | |
139 | ||
1e02ce4c AL |
140 | /* |
141 | * Access to this CR4 shadow and to H/W CR4 is protected by | |
142 | * disabling interrupts when modifying either one. | |
143 | */ | |
144 | unsigned long cr4; | |
b0579ade AL |
145 | |
146 | /* | |
147 | * This is a list of all contexts that might exist in the TLB. | |
10af6235 AL |
148 | * There is one per ASID that we use, and the ASID (what the |
149 | * CPU calls PCID) is the index into ctxts. | |
b0579ade AL |
150 | * |
151 | * For each context, ctx_id indicates which mm the TLB's user | |
152 | * entries came from. As an invariant, the TLB will never | |
153 | * contain entries that are out-of-date as when that mm reached | |
154 | * the tlb_gen in the list. | |
155 | * | |
156 | * To be clear, this means that it's legal for the TLB code to | |
157 | * flush the TLB without updating tlb_gen. This can happen | |
158 | * (for now, at least) due to paravirt remote flushes. | |
10af6235 AL |
159 | * |
160 | * NB: context 0 is a bit special, since it's also used by | |
161 | * various bits of init code. This is fine -- code that | |
162 | * isn't aware of PCID will end up harmlessly flushing | |
163 | * context 0. | |
b0579ade | 164 | */ |
10af6235 | 165 | struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; |
1e02ce4c AL |
166 | }; |
167 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); | |
168 | ||
169 | /* Initialize cr4 shadow for this CPU. */ | |
170 | static inline void cr4_init_shadow(void) | |
171 | { | |
1ef55be1 | 172 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
1e02ce4c AL |
173 | } |
174 | ||
375074cc AL |
175 | /* Set in this cpu's CR4. */ |
176 | static inline void cr4_set_bits(unsigned long mask) | |
177 | { | |
178 | unsigned long cr4; | |
179 | ||
1e02ce4c AL |
180 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
181 | if ((cr4 | mask) != cr4) { | |
182 | cr4 |= mask; | |
183 | this_cpu_write(cpu_tlbstate.cr4, cr4); | |
184 | __write_cr4(cr4); | |
185 | } | |
375074cc AL |
186 | } |
187 | ||
188 | /* Clear in this cpu's CR4. */ | |
189 | static inline void cr4_clear_bits(unsigned long mask) | |
190 | { | |
191 | unsigned long cr4; | |
192 | ||
1e02ce4c AL |
193 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
194 | if ((cr4 & ~mask) != cr4) { | |
195 | cr4 &= ~mask; | |
196 | this_cpu_write(cpu_tlbstate.cr4, cr4); | |
197 | __write_cr4(cr4); | |
198 | } | |
199 | } | |
200 | ||
5a920155 TG |
201 | static inline void cr4_toggle_bits(unsigned long mask) |
202 | { | |
203 | unsigned long cr4; | |
204 | ||
205 | cr4 = this_cpu_read(cpu_tlbstate.cr4); | |
206 | cr4 ^= mask; | |
207 | this_cpu_write(cpu_tlbstate.cr4, cr4); | |
208 | __write_cr4(cr4); | |
209 | } | |
210 | ||
1e02ce4c AL |
211 | /* Read the CR4 shadow. */ |
212 | static inline unsigned long cr4_read_shadow(void) | |
213 | { | |
214 | return this_cpu_read(cpu_tlbstate.cr4); | |
375074cc AL |
215 | } |
216 | ||
217 | /* | |
218 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB | |
219 | * enable and PPro Global page enable), so that any CPU's that boot | |
220 | * up after us can get the correct flags. This should only be used | |
221 | * during boot on the boot cpu. | |
222 | */ | |
223 | extern unsigned long mmu_cr4_features; | |
224 | extern u32 *trampoline_cr4_features; | |
225 | ||
226 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) | |
227 | { | |
228 | mmu_cr4_features |= mask; | |
229 | if (trampoline_cr4_features) | |
230 | *trampoline_cr4_features = mmu_cr4_features; | |
231 | cr4_set_bits(mask); | |
232 | } | |
233 | ||
72c0098d AL |
234 | extern void initialize_tlbstate_and_flush(void); |
235 | ||
d291cf83 TG |
236 | static inline void __native_flush_tlb(void) |
237 | { | |
5cf0791d SAS |
238 | /* |
239 | * If current->mm == NULL then we borrow a mm which may change during a | |
240 | * task switch and therefore we must not be preempted while we write CR3 | |
241 | * back: | |
242 | */ | |
243 | preempt_disable(); | |
6c690ee1 | 244 | native_write_cr3(__native_read_cr3()); |
5cf0791d | 245 | preempt_enable(); |
d291cf83 TG |
246 | } |
247 | ||
086fc8f8 FY |
248 | static inline void __native_flush_tlb_global_irq_disabled(void) |
249 | { | |
250 | unsigned long cr4; | |
251 | ||
1e02ce4c | 252 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
086fc8f8 FY |
253 | /* clear PGE */ |
254 | native_write_cr4(cr4 & ~X86_CR4_PGE); | |
255 | /* write old PGE again and flush TLBs */ | |
256 | native_write_cr4(cr4); | |
257 | } | |
258 | ||
d291cf83 TG |
259 | static inline void __native_flush_tlb_global(void) |
260 | { | |
b1979a5f | 261 | unsigned long flags; |
d291cf83 | 262 | |
d8bced79 AL |
263 | if (static_cpu_has(X86_FEATURE_INVPCID)) { |
264 | /* | |
265 | * Using INVPCID is considerably faster than a pair of writes | |
266 | * to CR4 sandwiched inside an IRQ flag save/restore. | |
267 | */ | |
268 | invpcid_flush_all(); | |
269 | return; | |
270 | } | |
271 | ||
b1979a5f IM |
272 | /* |
273 | * Read-modify-write to CR4 - protect it from preemption and | |
274 | * from interrupts. (Use the raw variant because this code can | |
275 | * be called from deep inside debugging code.) | |
276 | */ | |
277 | raw_local_irq_save(flags); | |
278 | ||
086fc8f8 | 279 | __native_flush_tlb_global_irq_disabled(); |
b1979a5f IM |
280 | |
281 | raw_local_irq_restore(flags); | |
d291cf83 TG |
282 | } |
283 | ||
284 | static inline void __native_flush_tlb_single(unsigned long addr) | |
285 | { | |
94cf8de0 | 286 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
d291cf83 TG |
287 | } |
288 | ||
289 | static inline void __flush_tlb_all(void) | |
290 | { | |
2c4ea6e2 | 291 | if (boot_cpu_has(X86_FEATURE_PGE)) |
d291cf83 TG |
292 | __flush_tlb_global(); |
293 | else | |
294 | __flush_tlb(); | |
660da7c9 AL |
295 | |
296 | /* | |
297 | * Note: if we somehow had PCID but not PGE, then this wouldn't work -- | |
298 | * we'd end up flushing kernel translations for the current ASID but | |
299 | * we might fail to flush kernel translations for other cached ASIDs. | |
300 | * | |
301 | * To avoid this issue, we force PCID off if PGE is off. | |
302 | */ | |
d291cf83 TG |
303 | } |
304 | ||
305 | static inline void __flush_tlb_one(unsigned long addr) | |
306 | { | |
ec659934 | 307 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
e8747f10 | 308 | __flush_tlb_single(addr); |
d291cf83 TG |
309 | } |
310 | ||
3e7f3db0 | 311 | #define TLB_FLUSH_ALL -1UL |
d291cf83 TG |
312 | |
313 | /* | |
314 | * TLB flushing: | |
315 | * | |
d291cf83 TG |
316 | * - flush_tlb_all() flushes all processes TLBs |
317 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
318 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
319 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
320 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
a2055abe | 321 | * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus |
d291cf83 TG |
322 | * |
323 | * ..but the i386 has somewhat limited tlb flushing capabilities, | |
324 | * and page-granular flushes are available only on i486 and up. | |
d291cf83 | 325 | */ |
a2055abe | 326 | struct flush_tlb_info { |
b0579ade AL |
327 | /* |
328 | * We support several kinds of flushes. | |
329 | * | |
330 | * - Fully flush a single mm. .mm will be set, .end will be | |
331 | * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to | |
332 | * which the IPI sender is trying to catch us up. | |
333 | * | |
334 | * - Partially flush a single mm. .mm will be set, .start and | |
335 | * .end will indicate the range, and .new_tlb_gen will be set | |
336 | * such that the changes between generation .new_tlb_gen-1 and | |
337 | * .new_tlb_gen are entirely contained in the indicated range. | |
338 | * | |
339 | * - Fully flush all mms whose tlb_gens have been updated. .mm | |
340 | * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen | |
341 | * will be zero. | |
342 | */ | |
343 | struct mm_struct *mm; | |
344 | unsigned long start; | |
345 | unsigned long end; | |
346 | u64 new_tlb_gen; | |
a2055abe AL |
347 | }; |
348 | ||
d291cf83 TG |
349 | #define local_flush_tlb() __flush_tlb() |
350 | ||
611ae8e3 AS |
351 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) |
352 | ||
353 | #define flush_tlb_range(vma, start, end) \ | |
354 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) | |
355 | ||
d291cf83 | 356 | extern void flush_tlb_all(void); |
611ae8e3 AS |
357 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
358 | unsigned long end, unsigned long vmflag); | |
effee4b9 | 359 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
d291cf83 | 360 | |
ca6c99c0 AL |
361 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
362 | { | |
363 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE); | |
364 | } | |
365 | ||
4595f962 | 366 | void native_flush_tlb_others(const struct cpumask *cpumask, |
a2055abe | 367 | const struct flush_tlb_info *info); |
d291cf83 | 368 | |
e73ad5ff AL |
369 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, |
370 | struct mm_struct *mm) | |
371 | { | |
f39681ed | 372 | inc_mm_tlb_gen(mm); |
e73ad5ff AL |
373 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); |
374 | } | |
375 | ||
376 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); | |
377 | ||
d291cf83 | 378 | #ifndef CONFIG_PARAVIRT |
a2055abe AL |
379 | #define flush_tlb_others(mask, info) \ |
380 | native_flush_tlb_others(mask, info) | |
96a388de | 381 | #endif |
d291cf83 | 382 | |
1965aae3 | 383 | #endif /* _ASM_X86_TLBFLUSH_H */ |