1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
5 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/cpufeature.h>
9 #include <asm/special_insns.h>
12 static inline void __invpcid(unsigned long pcid
, unsigned long addr
,
15 struct { u64 d
[2]; } desc
= { { pcid
, addr
} };
18 * The memory clobber is because the whole point is to invalidate
19 * stale TLB entries and, especially if we're flushing global
20 * mappings, we don't want the compiler to reorder any subsequent
21 * memory accesses before the TLB flush.
23 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
24 * invpcid (%rcx), %rax in long mode.
26 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
27 : : "m" (desc
), "a" (type
), "c" (&desc
) : "memory");
30 #define INVPCID_TYPE_INDIV_ADDR 0
31 #define INVPCID_TYPE_SINGLE_CTXT 1
32 #define INVPCID_TYPE_ALL_INCL_GLOBAL 2
33 #define INVPCID_TYPE_ALL_NON_GLOBAL 3
35 /* Flush all mappings for a given pcid and addr, not including globals. */
36 static inline void invpcid_flush_one(unsigned long pcid
,
39 __invpcid(pcid
, addr
, INVPCID_TYPE_INDIV_ADDR
);
42 /* Flush all mappings for a given PCID, not including globals. */
43 static inline void invpcid_flush_single_context(unsigned long pcid
)
45 __invpcid(pcid
, 0, INVPCID_TYPE_SINGLE_CTXT
);
48 /* Flush all mappings, including globals, for all PCIDs. */
49 static inline void invpcid_flush_all(void)
51 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL
);
54 /* Flush all mappings for all PCIDs except globals. */
55 static inline void invpcid_flush_all_nonglobals(void)
57 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL
);
60 static inline u64
inc_mm_tlb_gen(struct mm_struct
*mm
)
65 * Bump the generation count. This also serves as a full barrier
66 * that synchronizes with switch_mm(): callers are required to order
67 * their read of mm_cpumask after their writes to the paging
70 smp_mb__before_atomic();
71 new_tlb_gen
= atomic64_inc_return(&mm
->context
.tlb_gen
);
72 smp_mb__after_atomic();
77 #ifdef CONFIG_PARAVIRT
78 #include <asm/paravirt.h>
80 #define __flush_tlb() __native_flush_tlb()
81 #define __flush_tlb_global() __native_flush_tlb_global()
82 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
85 static inline bool tlb_defer_switch_to_init_mm(void)
88 * If we have PCID, then switching to init_mm is reasonably
89 * fast. If we don't have PCID, then switching to init_mm is
90 * quite slow, so we try to defer it in the hopes that we can
91 * avoid it entirely. The latter approach runs the risk of
92 * receiving otherwise unnecessary IPIs.
94 * This choice is just a heuristic. The tlb code can handle this
95 * function returning true or false regardless of whether we have
98 return !static_cpu_has(X86_FEATURE_PCID
);
102 * 6 because 6 should be plenty and struct tlb_state will fit in
105 #define TLB_NR_DYN_ASIDS 6
114 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
115 * are on. This means that it may not match current->active_mm,
116 * which will contain the previous user mm when we're in lazy TLB
117 * mode even if we've already switched back to swapper_pg_dir.
119 struct mm_struct
*loaded_mm
;
124 * We can be in one of several states:
126 * - Actively using an mm. Our CPU's bit will be set in
127 * mm_cpumask(loaded_mm) and is_lazy == false;
129 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
130 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
132 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
133 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
134 * We're heuristically guessing that the CR3 load we
135 * skipped more than makes up for the overhead added by
141 * Access to this CR4 shadow and to H/W CR4 is protected by
142 * disabling interrupts when modifying either one.
147 * This is a list of all contexts that might exist in the TLB.
148 * There is one per ASID that we use, and the ASID (what the
149 * CPU calls PCID) is the index into ctxts.
151 * For each context, ctx_id indicates which mm the TLB's user
152 * entries came from. As an invariant, the TLB will never
153 * contain entries that are out-of-date as when that mm reached
154 * the tlb_gen in the list.
156 * To be clear, this means that it's legal for the TLB code to
157 * flush the TLB without updating tlb_gen. This can happen
158 * (for now, at least) due to paravirt remote flushes.
160 * NB: context 0 is a bit special, since it's also used by
161 * various bits of init code. This is fine -- code that
162 * isn't aware of PCID will end up harmlessly flushing
165 struct tlb_context ctxs
[TLB_NR_DYN_ASIDS
];
167 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state
, cpu_tlbstate
);
169 /* Initialize cr4 shadow for this CPU. */
170 static inline void cr4_init_shadow(void)
172 this_cpu_write(cpu_tlbstate
.cr4
, __read_cr4());
175 /* Set in this cpu's CR4. */
176 static inline void cr4_set_bits(unsigned long mask
)
180 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
181 if ((cr4
| mask
) != cr4
) {
183 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
188 /* Clear in this cpu's CR4. */
189 static inline void cr4_clear_bits(unsigned long mask
)
193 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
194 if ((cr4
& ~mask
) != cr4
) {
196 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
201 static inline void cr4_toggle_bits(unsigned long mask
)
205 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
207 this_cpu_write(cpu_tlbstate
.cr4
, cr4
);
211 /* Read the CR4 shadow. */
212 static inline unsigned long cr4_read_shadow(void)
214 return this_cpu_read(cpu_tlbstate
.cr4
);
218 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
219 * enable and PPro Global page enable), so that any CPU's that boot
220 * up after us can get the correct flags. This should only be used
221 * during boot on the boot cpu.
223 extern unsigned long mmu_cr4_features
;
224 extern u32
*trampoline_cr4_features
;
226 static inline void cr4_set_bits_and_update_boot(unsigned long mask
)
228 mmu_cr4_features
|= mask
;
229 if (trampoline_cr4_features
)
230 *trampoline_cr4_features
= mmu_cr4_features
;
234 extern void initialize_tlbstate_and_flush(void);
236 static inline void __native_flush_tlb(void)
239 * If current->mm == NULL then we borrow a mm which may change during a
240 * task switch and therefore we must not be preempted while we write CR3
244 native_write_cr3(__native_read_cr3());
248 static inline void __native_flush_tlb_global_irq_disabled(void)
252 cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
254 native_write_cr4(cr4
& ~X86_CR4_PGE
);
255 /* write old PGE again and flush TLBs */
256 native_write_cr4(cr4
);
259 static inline void __native_flush_tlb_global(void)
263 if (static_cpu_has(X86_FEATURE_INVPCID
)) {
265 * Using INVPCID is considerably faster than a pair of writes
266 * to CR4 sandwiched inside an IRQ flag save/restore.
273 * Read-modify-write to CR4 - protect it from preemption and
274 * from interrupts. (Use the raw variant because this code can
275 * be called from deep inside debugging code.)
277 raw_local_irq_save(flags
);
279 __native_flush_tlb_global_irq_disabled();
281 raw_local_irq_restore(flags
);
284 static inline void __native_flush_tlb_single(unsigned long addr
)
286 asm volatile("invlpg (%0)" ::"r" (addr
) : "memory");
289 static inline void __flush_tlb_all(void)
291 if (boot_cpu_has(X86_FEATURE_PGE
))
292 __flush_tlb_global();
297 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
298 * we'd end up flushing kernel translations for the current ASID but
299 * we might fail to flush kernel translations for other cached ASIDs.
301 * To avoid this issue, we force PCID off if PGE is off.
305 static inline void __flush_tlb_one(unsigned long addr
)
307 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE
);
308 __flush_tlb_single(addr
);
311 #define TLB_FLUSH_ALL -1UL
316 * - flush_tlb_all() flushes all processes TLBs
317 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
318 * - flush_tlb_page(vma, vmaddr) flushes one page
319 * - flush_tlb_range(vma, start, end) flushes a range of pages
320 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
321 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
323 * ..but the i386 has somewhat limited tlb flushing capabilities,
324 * and page-granular flushes are available only on i486 and up.
326 struct flush_tlb_info
{
328 * We support several kinds of flushes.
330 * - Fully flush a single mm. .mm will be set, .end will be
331 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
332 * which the IPI sender is trying to catch us up.
334 * - Partially flush a single mm. .mm will be set, .start and
335 * .end will indicate the range, and .new_tlb_gen will be set
336 * such that the changes between generation .new_tlb_gen-1 and
337 * .new_tlb_gen are entirely contained in the indicated range.
339 * - Fully flush all mms whose tlb_gens have been updated. .mm
340 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
343 struct mm_struct
*mm
;
349 #define local_flush_tlb() __flush_tlb()
351 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
353 #define flush_tlb_range(vma, start, end) \
354 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
356 extern void flush_tlb_all(void);
357 extern void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
358 unsigned long end
, unsigned long vmflag
);
359 extern void flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
361 static inline void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long a
)
363 flush_tlb_mm_range(vma
->vm_mm
, a
, a
+ PAGE_SIZE
, VM_NONE
);
366 void native_flush_tlb_others(const struct cpumask
*cpumask
,
367 const struct flush_tlb_info
*info
);
369 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch
*batch
,
370 struct mm_struct
*mm
)
373 cpumask_or(&batch
->cpumask
, &batch
->cpumask
, mm_cpumask(mm
));
376 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch
*batch
);
378 #ifndef CONFIG_PARAVIRT
379 #define flush_tlb_others(mask, info) \
380 native_flush_tlb_others(mask, info)
383 #endif /* _ASM_X86_TLBFLUSH_H */