1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
5 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/cpufeature.h>
9 #include <asm/special_insns.h>
11 #include <asm/invpcid.h>
13 #include <asm/processor-flags.h>
16 * The x86 feature is called PCID (Process Context IDentifier). It is similar
17 * to what is traditionally called ASID on the RISC processors.
19 * We don't use the traditional ASID implementation, where each process/mm gets
20 * its own ASID and flush/restart when we run out of ASID space.
22 * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
23 * that came by on this CPU, allowing cheaper switch_mm between processes on
26 * We end up with different spaces for different things. To avoid confusion we
27 * use different names for each of them:
29 * ASID - [0, TLB_NR_DYN_ASIDS-1]
30 * the canonical identifier for an mm
32 * kPCID - [1, TLB_NR_DYN_ASIDS]
33 * the value we write into the PCID part of CR3; corresponds to the
34 * ASID+1, because PCID 0 is special.
36 * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
37 * for KPTI each mm has two address spaces and thus needs two
38 * PCID values, but we can still do with a single ASID denomination
39 * for each mm. Corresponds to kPCID + 2048.
43 /* There are 12 bits of space for ASIDS in CR3 */
44 #define CR3_HW_ASID_BITS 12
47 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
48 * user/kernel switches
50 #ifdef CONFIG_PAGE_TABLE_ISOLATION
51 # define PTI_CONSUMED_PCID_BITS 1
53 # define PTI_CONSUMED_PCID_BITS 0
56 #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
59 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
60 * for them being zero-based. Another -1 is because PCID 0 is reserved for
61 * use by non-PCID-aware users.
63 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
66 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
69 #define TLB_NR_DYN_ASIDS 6
72 * Given @asid, compute kPCID
74 static inline u16
kern_pcid(u16 asid
)
76 VM_WARN_ON_ONCE(asid
> MAX_ASID_AVAILABLE
);
78 #ifdef CONFIG_PAGE_TABLE_ISOLATION
80 * Make sure that the dynamic ASID space does not confict with the
81 * bit we are using to switch between user and kernel ASIDs.
83 BUILD_BUG_ON(TLB_NR_DYN_ASIDS
>= (1 << X86_CR3_PTI_PCID_USER_BIT
));
86 * The ASID being passed in here should have respected the
87 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
89 VM_WARN_ON_ONCE(asid
& (1 << X86_CR3_PTI_PCID_USER_BIT
));
92 * The dynamically-assigned ASIDs that get passed in are small
93 * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
94 * so do not bother to clear it.
96 * If PCID is on, ASID-aware code paths put the ASID+1 into the
97 * PCID bits. This serves two purposes. It prevents a nasty
98 * situation in which PCID-unaware code saves CR3, loads some other
99 * value (with PCID == 0), and then restores CR3, thus corrupting
100 * the TLB for ASID 0 if the saved ASID was nonzero. It also means
101 * that any bugs involving loading a PCID-enabled CR3 with
102 * CR4.PCIDE off will trigger deterministically.
108 * Given @asid, compute uPCID
110 static inline u16
user_pcid(u16 asid
)
112 u16 ret
= kern_pcid(asid
);
113 #ifdef CONFIG_PAGE_TABLE_ISOLATION
114 ret
|= 1 << X86_CR3_PTI_PCID_USER_BIT
;
120 static inline unsigned long build_cr3(pgd_t
*pgd
, u16 asid
)
122 if (static_cpu_has(X86_FEATURE_PCID
)) {
123 return __pa(pgd
) | kern_pcid(asid
);
125 VM_WARN_ON_ONCE(asid
!= 0);
130 static inline unsigned long build_cr3_noflush(pgd_t
*pgd
, u16 asid
)
132 VM_WARN_ON_ONCE(asid
> MAX_ASID_AVAILABLE
);
133 VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID
));
134 return __pa(pgd
) | kern_pcid(asid
) | CR3_NOFLUSH
;
137 #ifdef CONFIG_PARAVIRT
138 #include <asm/paravirt.h>
140 #define __flush_tlb() __native_flush_tlb()
141 #define __flush_tlb_global() __native_flush_tlb_global()
142 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
146 * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
147 * to init_mm when we switch to a kernel thread (e.g. the idle thread). If
148 * it's false, then we immediately switch CR3 when entering a kernel thread.
150 DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode
);
159 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
160 * are on. This means that it may not match current->active_mm,
161 * which will contain the previous user mm when we're in lazy TLB
162 * mode even if we've already switched back to swapper_pg_dir.
164 struct mm_struct
*loaded_mm
;
167 /* last user mm's ctx id */
171 * We can be in one of several states:
173 * - Actively using an mm. Our CPU's bit will be set in
174 * mm_cpumask(loaded_mm) and is_lazy == false;
176 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
177 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
179 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
180 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
181 * We're heuristically guessing that the CR3 load we
182 * skipped more than makes up for the overhead added by
188 * If set we changed the page tables in such a way that we
189 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
190 * This tells us to go invalidate all the non-loaded ctxs[]
191 * on the next context switch.
193 * The current ctx was kept up-to-date as it ran and does not
194 * need to be invalidated.
196 bool invalidate_other
;
199 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
200 * the corresponding user PCID needs a flush next time we
201 * switch to it; see SWITCH_TO_USER_CR3.
203 unsigned short user_pcid_flush_mask
;
206 * Access to this CR4 shadow and to H/W CR4 is protected by
207 * disabling interrupts when modifying either one.
212 * This is a list of all contexts that might exist in the TLB.
213 * There is one per ASID that we use, and the ASID (what the
214 * CPU calls PCID) is the index into ctxts.
216 * For each context, ctx_id indicates which mm the TLB's user
217 * entries came from. As an invariant, the TLB will never
218 * contain entries that are out-of-date as when that mm reached
219 * the tlb_gen in the list.
221 * To be clear, this means that it's legal for the TLB code to
222 * flush the TLB without updating tlb_gen. This can happen
223 * (for now, at least) due to paravirt remote flushes.
225 * NB: context 0 is a bit special, since it's also used by
226 * various bits of init code. This is fine -- code that
227 * isn't aware of PCID will end up harmlessly flushing
230 struct tlb_context ctxs
[TLB_NR_DYN_ASIDS
];
232 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state
, cpu_tlbstate
);
234 /* Initialize cr4 shadow for this CPU. */
235 static inline void cr4_init_shadow(void)
237 this_cpu_write(cpu_tlbstate
.cr4
, __read_cr4());
240 /* Set in this cpu's CR4. */
241 static inline void cr4_set_bits(unsigned long mask
)
245 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
246 if ((cr4
| mask
) != cr4
) {
248 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
253 /* Clear in this cpu's CR4. */
254 static inline void cr4_clear_bits(unsigned long mask
)
258 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
259 if ((cr4
& ~mask
) != cr4
) {
261 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
266 static inline void cr4_toggle_bits(unsigned long mask
)
270 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
272 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
276 /* Read the CR4 shadow. */
277 static inline unsigned long cr4_read_shadow(void)
279 return this_cpu_read(cpu_tlbstate
.cr4
);
283 * Mark all other ASIDs as invalid, preserves the current.
285 static inline void invalidate_other_asid(void)
287 this_cpu_write(cpu_tlbstate
.invalidate_other
, true);
291 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
292 * enable and PPro Global page enable), so that any CPU's that boot
293 * up after us can get the correct flags. This should only be used
294 * during boot on the boot cpu.
296 extern unsigned long mmu_cr4_features
;
297 extern u32
*trampoline_cr4_features
;
299 static inline void cr4_set_bits_and_update_boot(unsigned long mask
)
301 mmu_cr4_features
|= mask
;
302 if (trampoline_cr4_features
)
303 *trampoline_cr4_features
= mmu_cr4_features
;
309 * Given an ASID, flush the corresponding user ASID. We can delay this
310 * until the next time we switch to it.
312 * See SWITCH_TO_USER_CR3.
314 static inline void invalidate_user_asid(u16 asid
)
316 /* There is no user ASID if address space separation is off */
317 if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION
))
321 * We only have a single ASID if PCID is off and the CR3
322 * write will have flushed it.
324 if (!cpu_feature_enabled(X86_FEATURE_PCID
))
327 if (!static_cpu_has(X86_FEATURE_PTI
))
330 __set_bit(kern_pcid(asid
),
331 (unsigned long *)this_cpu_ptr(&cpu_tlbstate
.user_pcid_flush_mask
));
334 extern void initialize_tlbstate_and_flush(void);
337 * flush the entire current user mapping
339 static inline void __native_flush_tlb(void)
342 * Preemption or interrupts must be disabled to protect the access
343 * to the per CPU variable and to prevent being preempted between
344 * read_cr3() and write_cr3().
346 WARN_ON_ONCE(preemptible());
348 invalidate_user_asid(this_cpu_read(cpu_tlbstate
.loaded_mm_asid
));
350 /* If current->mm == NULL then the read_cr3() "borrows" an mm */
351 native_write_cr3(__native_read_cr3());
357 static inline void __native_flush_tlb_global(void)
359 unsigned long cr4
, flags
;
361 if (static_cpu_has(X86_FEATURE_INVPCID
)) {
363 * Using INVPCID is considerably faster than a pair of writes
364 * to CR4 sandwiched inside an IRQ flag save/restore.
366 * Note, this works with CR4.PCIDE=0 or 1.
373 * Read-modify-write to CR4 - protect it from preemption and
374 * from interrupts. (Use the raw variant because this code can
375 * be called from deep inside debugging code.)
377 raw_local_irq_save(flags
);
379 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
381 native_write_cr4(cr4
^ X86_CR4_PGE
);
382 /* write old PGE again and flush TLBs */
383 native_write_cr4(cr4
);
385 raw_local_irq_restore(flags
);
389 * flush one page in the user mapping
391 static inline void __native_flush_tlb_single(unsigned long addr
)
393 u32 loaded_mm_asid
= this_cpu_read(cpu_tlbstate
.loaded_mm_asid
);
395 asm volatile("invlpg (%0)" ::"r" (addr
) : "memory");
397 if (!static_cpu_has(X86_FEATURE_PTI
))
401 * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
402 * Just use invalidate_user_asid() in case we are called early.
404 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE
))
405 invalidate_user_asid(loaded_mm_asid
);
407 invpcid_flush_one(user_pcid(loaded_mm_asid
), addr
);
413 static inline void __flush_tlb_all(void)
415 if (boot_cpu_has(X86_FEATURE_PGE
)) {
416 __flush_tlb_global();
419 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
426 * flush one page in the kernel mapping
428 static inline void __flush_tlb_one(unsigned long addr
)
430 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE
);
431 __flush_tlb_single(addr
);
433 if (!static_cpu_has(X86_FEATURE_PTI
))
437 * __flush_tlb_single() will have cleared the TLB entry for this ASID,
438 * but since kernel space is replicated across all, we must also
439 * invalidate all others.
441 invalidate_other_asid();
444 #define TLB_FLUSH_ALL -1UL
449 * - flush_tlb_all() flushes all processes TLBs
450 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
451 * - flush_tlb_page(vma, vmaddr) flushes one page
452 * - flush_tlb_range(vma, start, end) flushes a range of pages
453 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
454 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
456 * ..but the i386 has somewhat limited tlb flushing capabilities,
457 * and page-granular flushes are available only on i486 and up.
459 struct flush_tlb_info
{
461 * We support several kinds of flushes.
463 * - Fully flush a single mm. .mm will be set, .end will be
464 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
465 * which the IPI sender is trying to catch us up.
467 * - Partially flush a single mm. .mm will be set, .start and
468 * .end will indicate the range, and .new_tlb_gen will be set
469 * such that the changes between generation .new_tlb_gen-1 and
470 * .new_tlb_gen are entirely contained in the indicated range.
472 * - Fully flush all mms whose tlb_gens have been updated. .mm
473 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
476 struct mm_struct
*mm
;
482 #define local_flush_tlb() __flush_tlb()
484 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
486 #define flush_tlb_range(vma, start, end) \
487 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
489 extern void flush_tlb_all(void);
490 extern void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
491 unsigned long end
, unsigned long vmflag
);
492 extern void flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
494 static inline void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long a
)
496 flush_tlb_mm_range(vma
->vm_mm
, a
, a
+ PAGE_SIZE
, VM_NONE
);
499 void native_flush_tlb_others(const struct cpumask
*cpumask
,
500 const struct flush_tlb_info
*info
);
502 static inline u64
inc_mm_tlb_gen(struct mm_struct
*mm
)
505 * Bump the generation count. This also serves as a full barrier
506 * that synchronizes with switch_mm(): callers are required to order
507 * their read of mm_cpumask after their writes to the paging
510 return atomic64_inc_return(&mm
->context
.tlb_gen
);
513 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch
*batch
,
514 struct mm_struct
*mm
)
517 cpumask_or(&batch
->cpumask
, &batch
->cpumask
, mm_cpumask(mm
));
520 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch
*batch
);
522 #ifndef CONFIG_PARAVIRT
523 #define flush_tlb_others(mask, info) \
524 native_flush_tlb_others(mask, info)
527 #endif /* _ASM_X86_TLBFLUSH_H */