1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
5 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/cpufeature.h>
9 #include <asm/special_insns.h>
11 #include <asm/invpcid.h>
13 static inline u64
inc_mm_tlb_gen(struct mm_struct
*mm
)
16 * Bump the generation count. This also serves as a full barrier
17 * that synchronizes with switch_mm(): callers are required to order
18 * their read of mm_cpumask after their writes to the paging
21 return atomic64_inc_return(&mm
->context
.tlb_gen
);
24 /* There are 12 bits of space for ASIDS in CR3 */
25 #define CR3_HW_ASID_BITS 12
27 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
28 * user/kernel switches
30 #define PTI_CONSUMED_ASID_BITS 0
32 #define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS)
34 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
35 * for them being zero-based. Another -1 is because ASID 0 is reserved for
36 * use by non-PCID-aware users.
38 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2)
40 static inline u16
kern_pcid(u16 asid
)
42 VM_WARN_ON_ONCE(asid
> MAX_ASID_AVAILABLE
);
44 * If PCID is on, ASID-aware code paths put the ASID+1 into the
45 * PCID bits. This serves two purposes. It prevents a nasty
46 * situation in which PCID-unaware code saves CR3, loads some other
47 * value (with PCID == 0), and then restores CR3, thus corrupting
48 * the TLB for ASID 0 if the saved ASID was nonzero. It also means
49 * that any bugs involving loading a PCID-enabled CR3 with
50 * CR4.PCIDE off will trigger deterministically.
56 static inline unsigned long build_cr3(pgd_t
*pgd
, u16 asid
)
58 if (static_cpu_has(X86_FEATURE_PCID
)) {
59 return __pa(pgd
) | kern_pcid(asid
);
61 VM_WARN_ON_ONCE(asid
!= 0);
66 static inline unsigned long build_cr3_noflush(pgd_t
*pgd
, u16 asid
)
68 VM_WARN_ON_ONCE(asid
> MAX_ASID_AVAILABLE
);
69 VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID
));
70 return __pa(pgd
) | kern_pcid(asid
) | CR3_NOFLUSH
;
73 #ifdef CONFIG_PARAVIRT
74 #include <asm/paravirt.h>
76 #define __flush_tlb() __native_flush_tlb()
77 #define __flush_tlb_global() __native_flush_tlb_global()
78 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
82 * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
83 * to init_mm when we switch to a kernel thread (e.g. the idle thread). If
84 * it's false, then we immediately switch CR3 when entering a kernel thread.
86 DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode
);
89 * 6 because 6 should be plenty and struct tlb_state will fit in
92 #define TLB_NR_DYN_ASIDS 6
101 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
102 * are on. This means that it may not match current->active_mm,
103 * which will contain the previous user mm when we're in lazy TLB
104 * mode even if we've already switched back to swapper_pg_dir.
106 struct mm_struct
*loaded_mm
;
111 * We can be in one of several states:
113 * - Actively using an mm. Our CPU's bit will be set in
114 * mm_cpumask(loaded_mm) and is_lazy == false;
116 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
117 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
119 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
120 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
121 * We're heuristically guessing that the CR3 load we
122 * skipped more than makes up for the overhead added by
128 * If set we changed the page tables in such a way that we
129 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
130 * This tells us to go invalidate all the non-loaded ctxs[]
131 * on the next context switch.
133 * The current ctx was kept up-to-date as it ran and does not
134 * need to be invalidated.
136 bool invalidate_other
;
139 * Access to this CR4 shadow and to H/W CR4 is protected by
140 * disabling interrupts when modifying either one.
145 * This is a list of all contexts that might exist in the TLB.
146 * There is one per ASID that we use, and the ASID (what the
147 * CPU calls PCID) is the index into ctxts.
149 * For each context, ctx_id indicates which mm the TLB's user
150 * entries came from. As an invariant, the TLB will never
151 * contain entries that are out-of-date as when that mm reached
152 * the tlb_gen in the list.
154 * To be clear, this means that it's legal for the TLB code to
155 * flush the TLB without updating tlb_gen. This can happen
156 * (for now, at least) due to paravirt remote flushes.
158 * NB: context 0 is a bit special, since it's also used by
159 * various bits of init code. This is fine -- code that
160 * isn't aware of PCID will end up harmlessly flushing
163 struct tlb_context ctxs
[TLB_NR_DYN_ASIDS
];
165 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state
, cpu_tlbstate
);
167 /* Initialize cr4 shadow for this CPU. */
168 static inline void cr4_init_shadow(void)
170 this_cpu_write(cpu_tlbstate
.cr4
, __read_cr4());
173 /* Set in this cpu's CR4. */
174 static inline void cr4_set_bits(unsigned long mask
)
178 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
179 if ((cr4
| mask
) != cr4
) {
181 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
186 /* Clear in this cpu's CR4. */
187 static inline void cr4_clear_bits(unsigned long mask
)
191 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
192 if ((cr4
& ~mask
) != cr4
) {
194 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
199 static inline void cr4_toggle_bits(unsigned long mask
)
203 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
205 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
209 /* Read the CR4 shadow. */
210 static inline unsigned long cr4_read_shadow(void)
212 return this_cpu_read(cpu_tlbstate
.cr4
);
216 * Mark all other ASIDs as invalid, preserves the current.
218 static inline void invalidate_other_asid(void)
220 this_cpu_write(cpu_tlbstate
.invalidate_other
, true);
224 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
225 * enable and PPro Global page enable), so that any CPU's that boot
226 * up after us can get the correct flags. This should only be used
227 * during boot on the boot cpu.
229 extern unsigned long mmu_cr4_features
;
230 extern u32
*trampoline_cr4_features
;
232 static inline void cr4_set_bits_and_update_boot(unsigned long mask
)
234 mmu_cr4_features
|= mask
;
235 if (trampoline_cr4_features
)
236 *trampoline_cr4_features
= mmu_cr4_features
;
242 * flush the entire current user mapping
244 static inline void __native_flush_tlb(void)
247 * If current->mm == NULL then we borrow a mm which may change during a
248 * task switch and therefore we must not be preempted while we write CR3
252 native_write_cr3(__native_read_cr3());
259 static inline void __native_flush_tlb_global(void)
261 unsigned long cr4
, flags
;
263 if (static_cpu_has(X86_FEATURE_INVPCID
)) {
265 * Using INVPCID is considerably faster than a pair of writes
266 * to CR4 sandwiched inside an IRQ flag save/restore.
273 * Read-modify-write to CR4 - protect it from preemption and
274 * from interrupts. (Use the raw variant because this code can
275 * be called from deep inside debugging code.)
277 raw_local_irq_save(flags
);
279 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
281 native_write_cr4(cr4
^ X86_CR4_PGE
);
282 /* write old PGE again and flush TLBs */
283 native_write_cr4(cr4
);
285 raw_local_irq_restore(flags
);
289 * flush one page in the user mapping
291 static inline void __native_flush_tlb_single(unsigned long addr
)
293 asm volatile("invlpg (%0)" ::"r" (addr
) : "memory");
299 static inline void __flush_tlb_all(void)
301 if (boot_cpu_has(X86_FEATURE_PGE
)) {
302 __flush_tlb_global();
305 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
312 * flush one page in the kernel mapping
314 static inline void __flush_tlb_one(unsigned long addr
)
316 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE
);
317 __flush_tlb_single(addr
);
319 if (!static_cpu_has(X86_FEATURE_PTI
))
323 * __flush_tlb_single() will have cleared the TLB entry for this ASID,
324 * but since kernel space is replicated across all, we must also
325 * invalidate all others.
327 invalidate_other_asid();
330 #define TLB_FLUSH_ALL -1UL
335 * - flush_tlb_all() flushes all processes TLBs
336 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
337 * - flush_tlb_page(vma, vmaddr) flushes one page
338 * - flush_tlb_range(vma, start, end) flushes a range of pages
339 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
340 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
342 * ..but the i386 has somewhat limited tlb flushing capabilities,
343 * and page-granular flushes are available only on i486 and up.
345 struct flush_tlb_info
{
347 * We support several kinds of flushes.
349 * - Fully flush a single mm. .mm will be set, .end will be
350 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
351 * which the IPI sender is trying to catch us up.
353 * - Partially flush a single mm. .mm will be set, .start and
354 * .end will indicate the range, and .new_tlb_gen will be set
355 * such that the changes between generation .new_tlb_gen-1 and
356 * .new_tlb_gen are entirely contained in the indicated range.
358 * - Fully flush all mms whose tlb_gens have been updated. .mm
359 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
362 struct mm_struct
*mm
;
368 #define local_flush_tlb() __flush_tlb()
370 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
372 #define flush_tlb_range(vma, start, end) \
373 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
375 extern void flush_tlb_all(void);
376 extern void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
377 unsigned long end
, unsigned long vmflag
);
378 extern void flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
380 static inline void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long a
)
382 flush_tlb_mm_range(vma
->vm_mm
, a
, a
+ PAGE_SIZE
, VM_NONE
);
385 void native_flush_tlb_others(const struct cpumask
*cpumask
,
386 const struct flush_tlb_info
*info
);
388 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch
*batch
,
389 struct mm_struct
*mm
)
392 cpumask_or(&batch
->cpumask
, &batch
->cpumask
, mm_cpumask(mm
));
395 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch
*batch
);
397 #ifndef CONFIG_PARAVIRT
398 #define flush_tlb_others(mask, info) \
399 native_flush_tlb_others(mask, info)
402 #endif /* _ASM_X86_TLBFLUSH_H */