1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_TLBFLUSH_H
3 #define _ASM_X86_TLBFLUSH_H
6 #include <linux/sched.h>
8 #include <asm/processor.h>
9 #include <asm/cpufeature.h>
10 #include <asm/special_insns.h>
12 #include <asm/invpcid.h>
14 static inline u64
inc_mm_tlb_gen(struct mm_struct
*mm
)
17 * Bump the generation count. This also serves as a full barrier
18 * that synchronizes with switch_mm(): callers are required to order
19 * their read of mm_cpumask after their writes to the paging
22 return atomic64_inc_return(&mm
->context
.tlb_gen
);
25 /* There are 12 bits of space for ASIDS in CR3 */
26 #define CR3_HW_ASID_BITS 12
28 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
29 * user/kernel switches
31 #define PTI_CONSUMED_ASID_BITS 0
33 #define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS)
35 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
36 * for them being zero-based. Another -1 is because ASID 0 is reserved for
37 * use by non-PCID-aware users.
39 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2)
41 static inline u16
kern_pcid(u16 asid
)
43 VM_WARN_ON_ONCE(asid
> MAX_ASID_AVAILABLE
);
45 * If PCID is on, ASID-aware code paths put the ASID+1 into the
46 * PCID bits. This serves two purposes. It prevents a nasty
47 * situation in which PCID-unaware code saves CR3, loads some other
48 * value (with PCID == 0), and then restores CR3, thus corrupting
49 * the TLB for ASID 0 if the saved ASID was nonzero. It also means
50 * that any bugs involving loading a PCID-enabled CR3 with
51 * CR4.PCIDE off will trigger deterministically.
57 static inline unsigned long build_cr3(pgd_t
*pgd
, u16 asid
)
59 if (static_cpu_has(X86_FEATURE_PCID
)) {
60 return __sme_pa(pgd
) | kern_pcid(asid
);
62 VM_WARN_ON_ONCE(asid
!= 0);
67 static inline unsigned long build_cr3_noflush(pgd_t
*pgd
, u16 asid
)
69 VM_WARN_ON_ONCE(asid
> MAX_ASID_AVAILABLE
);
70 VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID
));
71 return __sme_pa(pgd
) | kern_pcid(asid
) | CR3_NOFLUSH
;
74 #ifdef CONFIG_PARAVIRT
75 #include <asm/paravirt.h>
77 #define __flush_tlb() __native_flush_tlb()
78 #define __flush_tlb_global() __native_flush_tlb_global()
79 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
82 static inline bool tlb_defer_switch_to_init_mm(void)
85 * If we have PCID, then switching to init_mm is reasonably
86 * fast. If we don't have PCID, then switching to init_mm is
87 * quite slow, so we try to defer it in the hopes that we can
88 * avoid it entirely. The latter approach runs the risk of
89 * receiving otherwise unnecessary IPIs.
91 * This choice is just a heuristic. The tlb code can handle this
92 * function returning true or false regardless of whether we have
95 return !static_cpu_has(X86_FEATURE_PCID
);
99 * 6 because 6 should be plenty and struct tlb_state will fit in
102 #define TLB_NR_DYN_ASIDS 6
111 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
112 * are on. This means that it may not match current->active_mm,
113 * which will contain the previous user mm when we're in lazy TLB
114 * mode even if we've already switched back to swapper_pg_dir.
116 struct mm_struct
*loaded_mm
;
121 * We can be in one of several states:
123 * - Actively using an mm. Our CPU's bit will be set in
124 * mm_cpumask(loaded_mm) and is_lazy == false;
126 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
127 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
129 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
130 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
131 * We're heuristically guessing that the CR3 load we
132 * skipped more than makes up for the overhead added by
138 * If set we changed the page tables in such a way that we
139 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
140 * This tells us to go invalidate all the non-loaded ctxs[]
141 * on the next context switch.
143 * The current ctx was kept up-to-date as it ran and does not
144 * need to be invalidated.
146 bool invalidate_other
;
149 * Access to this CR4 shadow and to H/W CR4 is protected by
150 * disabling interrupts when modifying either one.
155 * This is a list of all contexts that might exist in the TLB.
156 * There is one per ASID that we use, and the ASID (what the
157 * CPU calls PCID) is the index into ctxts.
159 * For each context, ctx_id indicates which mm the TLB's user
160 * entries came from. As an invariant, the TLB will never
161 * contain entries that are out-of-date as when that mm reached
162 * the tlb_gen in the list.
164 * To be clear, this means that it's legal for the TLB code to
165 * flush the TLB without updating tlb_gen. This can happen
166 * (for now, at least) due to paravirt remote flushes.
168 * NB: context 0 is a bit special, since it's also used by
169 * various bits of init code. This is fine -- code that
170 * isn't aware of PCID will end up harmlessly flushing
173 struct tlb_context ctxs
[TLB_NR_DYN_ASIDS
];
175 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state
, cpu_tlbstate
);
177 /* Initialize cr4 shadow for this CPU. */
178 static inline void cr4_init_shadow(void)
180 this_cpu_write(cpu_tlbstate
.cr4
, __read_cr4());
183 /* Set in this cpu's CR4. */
184 static inline void cr4_set_bits(unsigned long mask
)
188 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
189 if ((cr4
| mask
) != cr4
) {
191 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
196 /* Clear in this cpu's CR4. */
197 static inline void cr4_clear_bits(unsigned long mask
)
201 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
202 if ((cr4
& ~mask
) != cr4
) {
204 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
209 static inline void cr4_toggle_bits(unsigned long mask
)
213 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
215 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
219 /* Read the CR4 shadow. */
220 static inline unsigned long cr4_read_shadow(void)
222 return this_cpu_read(cpu_tlbstate
.cr4
);
226 * Mark all other ASIDs as invalid, preserves the current.
228 static inline void invalidate_other_asid(void)
230 this_cpu_write(cpu_tlbstate
.invalidate_other
, true);
234 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
235 * enable and PPro Global page enable), so that any CPU's that boot
236 * up after us can get the correct flags. This should only be used
237 * during boot on the boot cpu.
239 extern unsigned long mmu_cr4_features
;
240 extern u32
*trampoline_cr4_features
;
242 static inline void cr4_set_bits_and_update_boot(unsigned long mask
)
244 mmu_cr4_features
|= mask
;
245 if (trampoline_cr4_features
)
246 *trampoline_cr4_features
= mmu_cr4_features
;
250 extern void initialize_tlbstate_and_flush(void);
253 * flush the entire current user mapping
255 static inline void __native_flush_tlb(void)
258 * If current->mm == NULL then we borrow a mm which may change during a
259 * task switch and therefore we must not be preempted while we write CR3
263 native_write_cr3(__native_read_cr3());
270 static inline void __native_flush_tlb_global(void)
272 unsigned long cr4
, flags
;
274 if (static_cpu_has(X86_FEATURE_INVPCID
)) {
276 * Using INVPCID is considerably faster than a pair of writes
277 * to CR4 sandwiched inside an IRQ flag save/restore.
284 * Read-modify-write to CR4 - protect it from preemption and
285 * from interrupts. (Use the raw variant because this code can
286 * be called from deep inside debugging code.)
288 raw_local_irq_save(flags
);
290 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
292 native_write_cr4(cr4
^ X86_CR4_PGE
);
293 /* write old PGE again and flush TLBs */
294 native_write_cr4(cr4
);
296 raw_local_irq_restore(flags
);
300 * flush one page in the user mapping
302 static inline void __native_flush_tlb_single(unsigned long addr
)
304 asm volatile("invlpg (%0)" ::"r" (addr
) : "memory");
310 static inline void __flush_tlb_all(void)
312 if (boot_cpu_has(X86_FEATURE_PGE
)) {
313 __flush_tlb_global();
316 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
323 * flush one page in the kernel mapping
325 static inline void __flush_tlb_one(unsigned long addr
)
327 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE
);
328 __flush_tlb_single(addr
);
330 if (!static_cpu_has(X86_FEATURE_PTI
))
334 * __flush_tlb_single() will have cleared the TLB entry for this ASID,
335 * but since kernel space is replicated across all, we must also
336 * invalidate all others.
338 invalidate_other_asid();
341 #define TLB_FLUSH_ALL -1UL
346 * - flush_tlb_all() flushes all processes TLBs
347 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
348 * - flush_tlb_page(vma, vmaddr) flushes one page
349 * - flush_tlb_range(vma, start, end) flushes a range of pages
350 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
351 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
353 * ..but the i386 has somewhat limited tlb flushing capabilities,
354 * and page-granular flushes are available only on i486 and up.
356 struct flush_tlb_info
{
358 * We support several kinds of flushes.
360 * - Fully flush a single mm. .mm will be set, .end will be
361 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
362 * which the IPI sender is trying to catch us up.
364 * - Partially flush a single mm. .mm will be set, .start and
365 * .end will indicate the range, and .new_tlb_gen will be set
366 * such that the changes between generation .new_tlb_gen-1 and
367 * .new_tlb_gen are entirely contained in the indicated range.
369 * - Fully flush all mms whose tlb_gens have been updated. .mm
370 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
373 struct mm_struct
*mm
;
379 #define local_flush_tlb() __flush_tlb()
381 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
383 #define flush_tlb_range(vma, start, end) \
384 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
386 extern void flush_tlb_all(void);
387 extern void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
388 unsigned long end
, unsigned long vmflag
);
389 extern void flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
391 static inline void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long a
)
393 flush_tlb_mm_range(vma
->vm_mm
, a
, a
+ PAGE_SIZE
, VM_NONE
);
396 void native_flush_tlb_others(const struct cpumask
*cpumask
,
397 const struct flush_tlb_info
*info
);
399 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch
*batch
,
400 struct mm_struct
*mm
)
403 cpumask_or(&batch
->cpumask
, &batch
->cpumask
, mm_cpumask(mm
));
406 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch
*batch
);
408 #ifndef CONFIG_PARAVIRT
409 #define flush_tlb_others(mask, info) \
410 native_flush_tlb_others(mask, info)
413 #endif /* _ASM_X86_TLBFLUSH_H */