]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef _ASM_X86_TLBFLUSH_H | |
3 | #define _ASM_X86_TLBFLUSH_H | |
4 | ||
5 | #include <linux/mm.h> | |
6 | #include <linux/sched.h> | |
7 | ||
8 | #include <asm/processor.h> | |
9 | #include <asm/cpufeature.h> | |
10 | #include <asm/special_insns.h> | |
11 | #include <asm/smp.h> | |
12 | ||
13 | static inline void __invpcid(unsigned long pcid, unsigned long addr, | |
14 | unsigned long type) | |
15 | { | |
16 | struct { u64 d[2]; } desc = { { pcid, addr } }; | |
17 | ||
18 | /* | |
19 | * The memory clobber is because the whole point is to invalidate | |
20 | * stale TLB entries and, especially if we're flushing global | |
21 | * mappings, we don't want the compiler to reorder any subsequent | |
22 | * memory accesses before the TLB flush. | |
23 | * | |
24 | * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and | |
25 | * invpcid (%rcx), %rax in long mode. | |
26 | */ | |
27 | asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" | |
28 | : : "m" (desc), "a" (type), "c" (&desc) : "memory"); | |
29 | } | |
30 | ||
31 | #define INVPCID_TYPE_INDIV_ADDR 0 | |
32 | #define INVPCID_TYPE_SINGLE_CTXT 1 | |
33 | #define INVPCID_TYPE_ALL_INCL_GLOBAL 2 | |
34 | #define INVPCID_TYPE_ALL_NON_GLOBAL 3 | |
35 | ||
36 | /* Flush all mappings for a given pcid and addr, not including globals. */ | |
37 | static inline void invpcid_flush_one(unsigned long pcid, | |
38 | unsigned long addr) | |
39 | { | |
40 | __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); | |
41 | } | |
42 | ||
43 | /* Flush all mappings for a given PCID, not including globals. */ | |
44 | static inline void invpcid_flush_single_context(unsigned long pcid) | |
45 | { | |
46 | __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); | |
47 | } | |
48 | ||
49 | /* Flush all mappings, including globals, for all PCIDs. */ | |
50 | static inline void invpcid_flush_all(void) | |
51 | { | |
52 | __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); | |
53 | } | |
54 | ||
55 | /* Flush all mappings for all PCIDs except globals. */ | |
56 | static inline void invpcid_flush_all_nonglobals(void) | |
57 | { | |
58 | __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); | |
59 | } | |
60 | ||
61 | static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) | |
62 | { | |
63 | u64 new_tlb_gen; | |
64 | ||
65 | /* | |
66 | * Bump the generation count. This also serves as a full barrier | |
67 | * that synchronizes with switch_mm(): callers are required to order | |
68 | * their read of mm_cpumask after their writes to the paging | |
69 | * structures. | |
70 | */ | |
71 | smp_mb__before_atomic(); | |
72 | new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen); | |
73 | smp_mb__after_atomic(); | |
74 | ||
75 | return new_tlb_gen; | |
76 | } | |
77 | ||
78 | #ifdef CONFIG_PARAVIRT | |
79 | #include <asm/paravirt.h> | |
80 | #else | |
81 | #define __flush_tlb() __native_flush_tlb() | |
82 | #define __flush_tlb_global() __native_flush_tlb_global() | |
83 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | |
84 | #endif | |
85 | ||
86 | static inline bool tlb_defer_switch_to_init_mm(void) | |
87 | { | |
88 | /* | |
89 | * If we have PCID, then switching to init_mm is reasonably | |
90 | * fast. If we don't have PCID, then switching to init_mm is | |
91 | * quite slow, so we try to defer it in the hopes that we can | |
92 | * avoid it entirely. The latter approach runs the risk of | |
93 | * receiving otherwise unnecessary IPIs. | |
94 | * | |
95 | * This choice is just a heuristic. The tlb code can handle this | |
96 | * function returning true or false regardless of whether we have | |
97 | * PCID. | |
98 | */ | |
99 | return !static_cpu_has(X86_FEATURE_PCID); | |
100 | } | |
101 | ||
102 | /* | |
103 | * 6 because 6 should be plenty and struct tlb_state will fit in | |
104 | * two cache lines. | |
105 | */ | |
106 | #define TLB_NR_DYN_ASIDS 6 | |
107 | ||
108 | struct tlb_context { | |
109 | u64 ctx_id; | |
110 | u64 tlb_gen; | |
111 | }; | |
112 | ||
113 | struct tlb_state { | |
114 | /* | |
115 | * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts | |
116 | * are on. This means that it may not match current->active_mm, | |
117 | * which will contain the previous user mm when we're in lazy TLB | |
118 | * mode even if we've already switched back to swapper_pg_dir. | |
119 | */ | |
120 | struct mm_struct *loaded_mm; | |
121 | u16 loaded_mm_asid; | |
122 | u16 next_asid; | |
123 | ||
124 | /* | |
125 | * We can be in one of several states: | |
126 | * | |
127 | * - Actively using an mm. Our CPU's bit will be set in | |
128 | * mm_cpumask(loaded_mm) and is_lazy == false; | |
129 | * | |
130 | * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit | |
131 | * will not be set in mm_cpumask(&init_mm) and is_lazy == false. | |
132 | * | |
133 | * - Lazily using a real mm. loaded_mm != &init_mm, our bit | |
134 | * is set in mm_cpumask(loaded_mm), but is_lazy == true. | |
135 | * We're heuristically guessing that the CR3 load we | |
136 | * skipped more than makes up for the overhead added by | |
137 | * lazy mode. | |
138 | */ | |
139 | bool is_lazy; | |
140 | ||
141 | /* | |
142 | * Access to this CR4 shadow and to H/W CR4 is protected by | |
143 | * disabling interrupts when modifying either one. | |
144 | */ | |
145 | unsigned long cr4; | |
146 | ||
147 | /* | |
148 | * This is a list of all contexts that might exist in the TLB. | |
149 | * There is one per ASID that we use, and the ASID (what the | |
150 | * CPU calls PCID) is the index into ctxts. | |
151 | * | |
152 | * For each context, ctx_id indicates which mm the TLB's user | |
153 | * entries came from. As an invariant, the TLB will never | |
154 | * contain entries that are out-of-date as when that mm reached | |
155 | * the tlb_gen in the list. | |
156 | * | |
157 | * To be clear, this means that it's legal for the TLB code to | |
158 | * flush the TLB without updating tlb_gen. This can happen | |
159 | * (for now, at least) due to paravirt remote flushes. | |
160 | * | |
161 | * NB: context 0 is a bit special, since it's also used by | |
162 | * various bits of init code. This is fine -- code that | |
163 | * isn't aware of PCID will end up harmlessly flushing | |
164 | * context 0. | |
165 | */ | |
166 | struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; | |
167 | }; | |
168 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); | |
169 | ||
170 | /* Initialize cr4 shadow for this CPU. */ | |
171 | static inline void cr4_init_shadow(void) | |
172 | { | |
173 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); | |
174 | } | |
175 | ||
176 | static inline void __cr4_set(unsigned long cr4) | |
177 | { | |
178 | lockdep_assert_irqs_disabled(); | |
179 | this_cpu_write(cpu_tlbstate.cr4, cr4); | |
180 | __write_cr4(cr4); | |
181 | } | |
182 | ||
183 | /* Set in this cpu's CR4. */ | |
184 | static inline void cr4_set_bits(unsigned long mask) | |
185 | { | |
186 | unsigned long cr4, flags; | |
187 | ||
188 | local_irq_save(flags); | |
189 | cr4 = this_cpu_read(cpu_tlbstate.cr4); | |
190 | if ((cr4 | mask) != cr4) | |
191 | __cr4_set(cr4 | mask); | |
192 | local_irq_restore(flags); | |
193 | } | |
194 | ||
195 | /* Clear in this cpu's CR4. */ | |
196 | static inline void cr4_clear_bits(unsigned long mask) | |
197 | { | |
198 | unsigned long cr4, flags; | |
199 | ||
200 | local_irq_save(flags); | |
201 | cr4 = this_cpu_read(cpu_tlbstate.cr4); | |
202 | if ((cr4 & ~mask) != cr4) | |
203 | __cr4_set(cr4 & ~mask); | |
204 | local_irq_restore(flags); | |
205 | } | |
206 | ||
207 | static inline void cr4_toggle_bits_irqsoff(unsigned long mask) | |
208 | { | |
209 | unsigned long cr4; | |
210 | ||
211 | cr4 = this_cpu_read(cpu_tlbstate.cr4); | |
212 | __cr4_set(cr4 ^ mask); | |
213 | } | |
214 | ||
215 | /* Read the CR4 shadow. */ | |
216 | static inline unsigned long cr4_read_shadow(void) | |
217 | { | |
218 | return this_cpu_read(cpu_tlbstate.cr4); | |
219 | } | |
220 | ||
221 | /* | |
222 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB | |
223 | * enable and PPro Global page enable), so that any CPU's that boot | |
224 | * up after us can get the correct flags. This should only be used | |
225 | * during boot on the boot cpu. | |
226 | */ | |
227 | extern unsigned long mmu_cr4_features; | |
228 | extern u32 *trampoline_cr4_features; | |
229 | ||
230 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) | |
231 | { | |
232 | mmu_cr4_features |= mask; | |
233 | if (trampoline_cr4_features) | |
234 | *trampoline_cr4_features = mmu_cr4_features; | |
235 | cr4_set_bits(mask); | |
236 | } | |
237 | ||
238 | extern void initialize_tlbstate_and_flush(void); | |
239 | ||
240 | static inline void __native_flush_tlb(void) | |
241 | { | |
242 | /* | |
243 | * If current->mm == NULL then we borrow a mm which may change during a | |
244 | * task switch and therefore we must not be preempted while we write CR3 | |
245 | * back: | |
246 | */ | |
247 | preempt_disable(); | |
248 | native_write_cr3(__native_read_cr3()); | |
249 | preempt_enable(); | |
250 | } | |
251 | ||
252 | static inline void __native_flush_tlb_global_irq_disabled(void) | |
253 | { | |
254 | unsigned long cr4; | |
255 | ||
256 | cr4 = this_cpu_read(cpu_tlbstate.cr4); | |
257 | /* clear PGE */ | |
258 | native_write_cr4(cr4 & ~X86_CR4_PGE); | |
259 | /* write old PGE again and flush TLBs */ | |
260 | native_write_cr4(cr4); | |
261 | } | |
262 | ||
263 | static inline void __native_flush_tlb_global(void) | |
264 | { | |
265 | unsigned long flags; | |
266 | ||
267 | if (static_cpu_has(X86_FEATURE_INVPCID)) { | |
268 | /* | |
269 | * Using INVPCID is considerably faster than a pair of writes | |
270 | * to CR4 sandwiched inside an IRQ flag save/restore. | |
271 | */ | |
272 | invpcid_flush_all(); | |
273 | return; | |
274 | } | |
275 | ||
276 | /* | |
277 | * Read-modify-write to CR4 - protect it from preemption and | |
278 | * from interrupts. (Use the raw variant because this code can | |
279 | * be called from deep inside debugging code.) | |
280 | */ | |
281 | raw_local_irq_save(flags); | |
282 | ||
283 | __native_flush_tlb_global_irq_disabled(); | |
284 | ||
285 | raw_local_irq_restore(flags); | |
286 | } | |
287 | ||
288 | static inline void __native_flush_tlb_single(unsigned long addr) | |
289 | { | |
290 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); | |
291 | } | |
292 | ||
293 | static inline void __flush_tlb_all(void) | |
294 | { | |
295 | if (boot_cpu_has(X86_FEATURE_PGE)) | |
296 | __flush_tlb_global(); | |
297 | else | |
298 | __flush_tlb(); | |
299 | ||
300 | /* | |
301 | * Note: if we somehow had PCID but not PGE, then this wouldn't work -- | |
302 | * we'd end up flushing kernel translations for the current ASID but | |
303 | * we might fail to flush kernel translations for other cached ASIDs. | |
304 | * | |
305 | * To avoid this issue, we force PCID off if PGE is off. | |
306 | */ | |
307 | } | |
308 | ||
309 | static inline void __flush_tlb_one(unsigned long addr) | |
310 | { | |
311 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); | |
312 | __flush_tlb_single(addr); | |
313 | } | |
314 | ||
315 | #define TLB_FLUSH_ALL -1UL | |
316 | ||
317 | /* | |
318 | * TLB flushing: | |
319 | * | |
320 | * - flush_tlb_all() flushes all processes TLBs | |
321 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
322 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
323 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
324 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
325 | * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus | |
326 | * | |
327 | * ..but the i386 has somewhat limited tlb flushing capabilities, | |
328 | * and page-granular flushes are available only on i486 and up. | |
329 | */ | |
330 | struct flush_tlb_info { | |
331 | /* | |
332 | * We support several kinds of flushes. | |
333 | * | |
334 | * - Fully flush a single mm. .mm will be set, .end will be | |
335 | * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to | |
336 | * which the IPI sender is trying to catch us up. | |
337 | * | |
338 | * - Partially flush a single mm. .mm will be set, .start and | |
339 | * .end will indicate the range, and .new_tlb_gen will be set | |
340 | * such that the changes between generation .new_tlb_gen-1 and | |
341 | * .new_tlb_gen are entirely contained in the indicated range. | |
342 | * | |
343 | * - Fully flush all mms whose tlb_gens have been updated. .mm | |
344 | * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen | |
345 | * will be zero. | |
346 | */ | |
347 | struct mm_struct *mm; | |
348 | unsigned long start; | |
349 | unsigned long end; | |
350 | u64 new_tlb_gen; | |
351 | }; | |
352 | ||
353 | #define local_flush_tlb() __flush_tlb() | |
354 | ||
355 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) | |
356 | ||
357 | #define flush_tlb_range(vma, start, end) \ | |
358 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) | |
359 | ||
360 | extern void flush_tlb_all(void); | |
361 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | |
362 | unsigned long end, unsigned long vmflag); | |
363 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | |
364 | ||
365 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) | |
366 | { | |
367 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE); | |
368 | } | |
369 | ||
370 | void native_flush_tlb_others(const struct cpumask *cpumask, | |
371 | const struct flush_tlb_info *info); | |
372 | ||
373 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, | |
374 | struct mm_struct *mm) | |
375 | { | |
376 | inc_mm_tlb_gen(mm); | |
377 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); | |
378 | } | |
379 | ||
380 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); | |
381 | ||
382 | #ifndef CONFIG_PARAVIRT | |
383 | #define flush_tlb_others(mask, info) \ | |
384 | native_flush_tlb_others(mask, info) | |
385 | #endif | |
386 | ||
387 | #endif /* _ASM_X86_TLBFLUSH_H */ |