]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/tlbflush.h
x86/mm: Remove hard-coded ASID limit checks
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / tlbflush.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_TLBFLUSH_H
3 #define _ASM_X86_TLBFLUSH_H
4
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7
8 #include <asm/processor.h>
9 #include <asm/cpufeature.h>
10 #include <asm/special_insns.h>
11 #include <asm/smp.h>
12
13 static inline void __invpcid(unsigned long pcid, unsigned long addr,
14 unsigned long type)
15 {
16 struct { u64 d[2]; } desc = { { pcid, addr } };
17
18 /*
19 * The memory clobber is because the whole point is to invalidate
20 * stale TLB entries and, especially if we're flushing global
21 * mappings, we don't want the compiler to reorder any subsequent
22 * memory accesses before the TLB flush.
23 *
24 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
25 * invpcid (%rcx), %rax in long mode.
26 */
27 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
28 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
29 }
30
31 #define INVPCID_TYPE_INDIV_ADDR 0
32 #define INVPCID_TYPE_SINGLE_CTXT 1
33 #define INVPCID_TYPE_ALL_INCL_GLOBAL 2
34 #define INVPCID_TYPE_ALL_NON_GLOBAL 3
35
36 /* Flush all mappings for a given pcid and addr, not including globals. */
37 static inline void invpcid_flush_one(unsigned long pcid,
38 unsigned long addr)
39 {
40 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
41 }
42
43 /* Flush all mappings for a given PCID, not including globals. */
44 static inline void invpcid_flush_single_context(unsigned long pcid)
45 {
46 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
47 }
48
49 /* Flush all mappings, including globals, for all PCIDs. */
50 static inline void invpcid_flush_all(void)
51 {
52 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
53 }
54
55 /* Flush all mappings for all PCIDs except globals. */
56 static inline void invpcid_flush_all_nonglobals(void)
57 {
58 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
59 }
60
61 static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
62 {
63 /*
64 * Bump the generation count. This also serves as a full barrier
65 * that synchronizes with switch_mm(): callers are required to order
66 * their read of mm_cpumask after their writes to the paging
67 * structures.
68 */
69 return atomic64_inc_return(&mm->context.tlb_gen);
70 }
71
72 /* There are 12 bits of space for ASIDS in CR3 */
73 #define CR3_HW_ASID_BITS 12
74 /*
75 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
76 * user/kernel switches
77 */
78 #define PTI_CONSUMED_ASID_BITS 0
79
80 #define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS)
81 /*
82 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
83 * for them being zero-based. Another -1 is because ASID 0 is reserved for
84 * use by non-PCID-aware users.
85 */
86 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2)
87
88 /*
89 * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID bits.
90 * This serves two purposes. It prevents a nasty situation in which
91 * PCID-unaware code saves CR3, loads some other value (with PCID == 0),
92 * and then restores CR3, thus corrupting the TLB for ASID 0 if the saved
93 * ASID was nonzero. It also means that any bugs involving loading a
94 * PCID-enabled CR3 with CR4.PCIDE off will trigger deterministically.
95 */
96 struct pgd_t;
97 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
98 {
99 if (static_cpu_has(X86_FEATURE_PCID)) {
100 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
101 return __sme_pa(pgd) | (asid + 1);
102 } else {
103 VM_WARN_ON_ONCE(asid != 0);
104 return __sme_pa(pgd);
105 }
106 }
107
108 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
109 {
110 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
111 return __sme_pa(pgd) | (asid + 1) | CR3_NOFLUSH;
112 }
113
114 #ifdef CONFIG_PARAVIRT
115 #include <asm/paravirt.h>
116 #else
117 #define __flush_tlb() __native_flush_tlb()
118 #define __flush_tlb_global() __native_flush_tlb_global()
119 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
120 #endif
121
122 static inline bool tlb_defer_switch_to_init_mm(void)
123 {
124 /*
125 * If we have PCID, then switching to init_mm is reasonably
126 * fast. If we don't have PCID, then switching to init_mm is
127 * quite slow, so we try to defer it in the hopes that we can
128 * avoid it entirely. The latter approach runs the risk of
129 * receiving otherwise unnecessary IPIs.
130 *
131 * This choice is just a heuristic. The tlb code can handle this
132 * function returning true or false regardless of whether we have
133 * PCID.
134 */
135 return !static_cpu_has(X86_FEATURE_PCID);
136 }
137
138 /*
139 * 6 because 6 should be plenty and struct tlb_state will fit in
140 * two cache lines.
141 */
142 #define TLB_NR_DYN_ASIDS 6
143
144 struct tlb_context {
145 u64 ctx_id;
146 u64 tlb_gen;
147 };
148
149 struct tlb_state {
150 /*
151 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
152 * are on. This means that it may not match current->active_mm,
153 * which will contain the previous user mm when we're in lazy TLB
154 * mode even if we've already switched back to swapper_pg_dir.
155 */
156 struct mm_struct *loaded_mm;
157 u16 loaded_mm_asid;
158 u16 next_asid;
159
160 /*
161 * We can be in one of several states:
162 *
163 * - Actively using an mm. Our CPU's bit will be set in
164 * mm_cpumask(loaded_mm) and is_lazy == false;
165 *
166 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
167 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
168 *
169 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
170 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
171 * We're heuristically guessing that the CR3 load we
172 * skipped more than makes up for the overhead added by
173 * lazy mode.
174 */
175 bool is_lazy;
176
177 /*
178 * Access to this CR4 shadow and to H/W CR4 is protected by
179 * disabling interrupts when modifying either one.
180 */
181 unsigned long cr4;
182
183 /*
184 * This is a list of all contexts that might exist in the TLB.
185 * There is one per ASID that we use, and the ASID (what the
186 * CPU calls PCID) is the index into ctxts.
187 *
188 * For each context, ctx_id indicates which mm the TLB's user
189 * entries came from. As an invariant, the TLB will never
190 * contain entries that are out-of-date as when that mm reached
191 * the tlb_gen in the list.
192 *
193 * To be clear, this means that it's legal for the TLB code to
194 * flush the TLB without updating tlb_gen. This can happen
195 * (for now, at least) due to paravirt remote flushes.
196 *
197 * NB: context 0 is a bit special, since it's also used by
198 * various bits of init code. This is fine -- code that
199 * isn't aware of PCID will end up harmlessly flushing
200 * context 0.
201 */
202 struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
203 };
204 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
205
206 /* Initialize cr4 shadow for this CPU. */
207 static inline void cr4_init_shadow(void)
208 {
209 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
210 }
211
212 /* Set in this cpu's CR4. */
213 static inline void cr4_set_bits(unsigned long mask)
214 {
215 unsigned long cr4;
216
217 cr4 = this_cpu_read(cpu_tlbstate.cr4);
218 if ((cr4 | mask) != cr4) {
219 cr4 |= mask;
220 this_cpu_write(cpu_tlbstate.cr4, cr4);
221 __write_cr4(cr4);
222 }
223 }
224
225 /* Clear in this cpu's CR4. */
226 static inline void cr4_clear_bits(unsigned long mask)
227 {
228 unsigned long cr4;
229
230 cr4 = this_cpu_read(cpu_tlbstate.cr4);
231 if ((cr4 & ~mask) != cr4) {
232 cr4 &= ~mask;
233 this_cpu_write(cpu_tlbstate.cr4, cr4);
234 __write_cr4(cr4);
235 }
236 }
237
238 static inline void cr4_toggle_bits(unsigned long mask)
239 {
240 unsigned long cr4;
241
242 cr4 = this_cpu_read(cpu_tlbstate.cr4);
243 cr4 ^= mask;
244 this_cpu_write(cpu_tlbstate.cr4, cr4);
245 __write_cr4(cr4);
246 }
247
248 /* Read the CR4 shadow. */
249 static inline unsigned long cr4_read_shadow(void)
250 {
251 return this_cpu_read(cpu_tlbstate.cr4);
252 }
253
254 /*
255 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
256 * enable and PPro Global page enable), so that any CPU's that boot
257 * up after us can get the correct flags. This should only be used
258 * during boot on the boot cpu.
259 */
260 extern unsigned long mmu_cr4_features;
261 extern u32 *trampoline_cr4_features;
262
263 static inline void cr4_set_bits_and_update_boot(unsigned long mask)
264 {
265 mmu_cr4_features |= mask;
266 if (trampoline_cr4_features)
267 *trampoline_cr4_features = mmu_cr4_features;
268 cr4_set_bits(mask);
269 }
270
271 extern void initialize_tlbstate_and_flush(void);
272
273 /*
274 * flush the entire current user mapping
275 */
276 static inline void __native_flush_tlb(void)
277 {
278 /*
279 * If current->mm == NULL then we borrow a mm which may change during a
280 * task switch and therefore we must not be preempted while we write CR3
281 * back:
282 */
283 preempt_disable();
284 native_write_cr3(__native_read_cr3());
285 preempt_enable();
286 }
287
288 /*
289 * flush everything
290 */
291 static inline void __native_flush_tlb_global(void)
292 {
293 unsigned long cr4, flags;
294
295 if (static_cpu_has(X86_FEATURE_INVPCID)) {
296 /*
297 * Using INVPCID is considerably faster than a pair of writes
298 * to CR4 sandwiched inside an IRQ flag save/restore.
299 */
300 invpcid_flush_all();
301 return;
302 }
303
304 /*
305 * Read-modify-write to CR4 - protect it from preemption and
306 * from interrupts. (Use the raw variant because this code can
307 * be called from deep inside debugging code.)
308 */
309 raw_local_irq_save(flags);
310
311 cr4 = this_cpu_read(cpu_tlbstate.cr4);
312 /* toggle PGE */
313 native_write_cr4(cr4 ^ X86_CR4_PGE);
314 /* write old PGE again and flush TLBs */
315 native_write_cr4(cr4);
316
317 raw_local_irq_restore(flags);
318 }
319
320 /*
321 * flush one page in the user mapping
322 */
323 static inline void __native_flush_tlb_single(unsigned long addr)
324 {
325 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
326 }
327
328 /*
329 * flush everything
330 */
331 static inline void __flush_tlb_all(void)
332 {
333 if (boot_cpu_has(X86_FEATURE_PGE)) {
334 __flush_tlb_global();
335 } else {
336 /*
337 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
338 */
339 __flush_tlb();
340 }
341
342 /*
343 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
344 * we'd end up flushing kernel translations for the current ASID but
345 * we might fail to flush kernel translations for other cached ASIDs.
346 *
347 * To avoid this issue, we force PCID off if PGE is off.
348 */
349 }
350
351 /*
352 * flush one page in the kernel mapping
353 */
354 static inline void __flush_tlb_one(unsigned long addr)
355 {
356 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
357 __flush_tlb_single(addr);
358 }
359
360 #define TLB_FLUSH_ALL -1UL
361
362 /*
363 * TLB flushing:
364 *
365 * - flush_tlb_all() flushes all processes TLBs
366 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
367 * - flush_tlb_page(vma, vmaddr) flushes one page
368 * - flush_tlb_range(vma, start, end) flushes a range of pages
369 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
370 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
371 *
372 * ..but the i386 has somewhat limited tlb flushing capabilities,
373 * and page-granular flushes are available only on i486 and up.
374 */
375 struct flush_tlb_info {
376 /*
377 * We support several kinds of flushes.
378 *
379 * - Fully flush a single mm. .mm will be set, .end will be
380 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
381 * which the IPI sender is trying to catch us up.
382 *
383 * - Partially flush a single mm. .mm will be set, .start and
384 * .end will indicate the range, and .new_tlb_gen will be set
385 * such that the changes between generation .new_tlb_gen-1 and
386 * .new_tlb_gen are entirely contained in the indicated range.
387 *
388 * - Fully flush all mms whose tlb_gens have been updated. .mm
389 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
390 * will be zero.
391 */
392 struct mm_struct *mm;
393 unsigned long start;
394 unsigned long end;
395 u64 new_tlb_gen;
396 };
397
398 #define local_flush_tlb() __flush_tlb()
399
400 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
401
402 #define flush_tlb_range(vma, start, end) \
403 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
404
405 extern void flush_tlb_all(void);
406 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
407 unsigned long end, unsigned long vmflag);
408 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
409
410 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
411 {
412 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
413 }
414
415 void native_flush_tlb_others(const struct cpumask *cpumask,
416 const struct flush_tlb_info *info);
417
418 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
419 struct mm_struct *mm)
420 {
421 inc_mm_tlb_gen(mm);
422 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
423 }
424
425 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
426
427 #ifndef CONFIG_PARAVIRT
428 #define flush_tlb_others(mask, info) \
429 native_flush_tlb_others(mask, info)
430 #endif
431
432 #endif /* _ASM_X86_TLBFLUSH_H */