]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/tlbflush.h
Merge commit 'upstream-x86-entry' into WIP.x86/mm
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / tlbflush.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_TLBFLUSH_H
3 #define _ASM_X86_TLBFLUSH_H
4
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7
8 #include <asm/processor.h>
9 #include <asm/cpufeature.h>
10 #include <asm/special_insns.h>
11 #include <asm/smp.h>
12
13 static inline void __invpcid(unsigned long pcid, unsigned long addr,
14 unsigned long type)
15 {
16 struct { u64 d[2]; } desc = { { pcid, addr } };
17
18 /*
19 * The memory clobber is because the whole point is to invalidate
20 * stale TLB entries and, especially if we're flushing global
21 * mappings, we don't want the compiler to reorder any subsequent
22 * memory accesses before the TLB flush.
23 *
24 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
25 * invpcid (%rcx), %rax in long mode.
26 */
27 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
28 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
29 }
30
31 #define INVPCID_TYPE_INDIV_ADDR 0
32 #define INVPCID_TYPE_SINGLE_CTXT 1
33 #define INVPCID_TYPE_ALL_INCL_GLOBAL 2
34 #define INVPCID_TYPE_ALL_NON_GLOBAL 3
35
36 /* Flush all mappings for a given pcid and addr, not including globals. */
37 static inline void invpcid_flush_one(unsigned long pcid,
38 unsigned long addr)
39 {
40 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
41 }
42
43 /* Flush all mappings for a given PCID, not including globals. */
44 static inline void invpcid_flush_single_context(unsigned long pcid)
45 {
46 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
47 }
48
49 /* Flush all mappings, including globals, for all PCIDs. */
50 static inline void invpcid_flush_all(void)
51 {
52 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
53 }
54
55 /* Flush all mappings for all PCIDs except globals. */
56 static inline void invpcid_flush_all_nonglobals(void)
57 {
58 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
59 }
60
61 static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
62 {
63 u64 new_tlb_gen;
64
65 /*
66 * Bump the generation count. This also serves as a full barrier
67 * that synchronizes with switch_mm(): callers are required to order
68 * their read of mm_cpumask after their writes to the paging
69 * structures.
70 */
71 smp_mb__before_atomic();
72 new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
73 smp_mb__after_atomic();
74
75 return new_tlb_gen;
76 }
77
78 #ifdef CONFIG_PARAVIRT
79 #include <asm/paravirt.h>
80 #else
81 #define __flush_tlb() __native_flush_tlb()
82 #define __flush_tlb_global() __native_flush_tlb_global()
83 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
84 #endif
85
86 static inline bool tlb_defer_switch_to_init_mm(void)
87 {
88 /*
89 * If we have PCID, then switching to init_mm is reasonably
90 * fast. If we don't have PCID, then switching to init_mm is
91 * quite slow, so we try to defer it in the hopes that we can
92 * avoid it entirely. The latter approach runs the risk of
93 * receiving otherwise unnecessary IPIs.
94 *
95 * This choice is just a heuristic. The tlb code can handle this
96 * function returning true or false regardless of whether we have
97 * PCID.
98 */
99 return !static_cpu_has(X86_FEATURE_PCID);
100 }
101
102 /*
103 * 6 because 6 should be plenty and struct tlb_state will fit in
104 * two cache lines.
105 */
106 #define TLB_NR_DYN_ASIDS 6
107
108 struct tlb_context {
109 u64 ctx_id;
110 u64 tlb_gen;
111 };
112
113 struct tlb_state {
114 /*
115 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
116 * are on. This means that it may not match current->active_mm,
117 * which will contain the previous user mm when we're in lazy TLB
118 * mode even if we've already switched back to swapper_pg_dir.
119 */
120 struct mm_struct *loaded_mm;
121 u16 loaded_mm_asid;
122 u16 next_asid;
123
124 /*
125 * We can be in one of several states:
126 *
127 * - Actively using an mm. Our CPU's bit will be set in
128 * mm_cpumask(loaded_mm) and is_lazy == false;
129 *
130 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
131 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
132 *
133 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
134 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
135 * We're heuristically guessing that the CR3 load we
136 * skipped more than makes up for the overhead added by
137 * lazy mode.
138 */
139 bool is_lazy;
140
141 /*
142 * Access to this CR4 shadow and to H/W CR4 is protected by
143 * disabling interrupts when modifying either one.
144 */
145 unsigned long cr4;
146
147 /*
148 * This is a list of all contexts that might exist in the TLB.
149 * There is one per ASID that we use, and the ASID (what the
150 * CPU calls PCID) is the index into ctxts.
151 *
152 * For each context, ctx_id indicates which mm the TLB's user
153 * entries came from. As an invariant, the TLB will never
154 * contain entries that are out-of-date as when that mm reached
155 * the tlb_gen in the list.
156 *
157 * To be clear, this means that it's legal for the TLB code to
158 * flush the TLB without updating tlb_gen. This can happen
159 * (for now, at least) due to paravirt remote flushes.
160 *
161 * NB: context 0 is a bit special, since it's also used by
162 * various bits of init code. This is fine -- code that
163 * isn't aware of PCID will end up harmlessly flushing
164 * context 0.
165 */
166 struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
167 };
168 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
169
170 /* Initialize cr4 shadow for this CPU. */
171 static inline void cr4_init_shadow(void)
172 {
173 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
174 }
175
176 /* Set in this cpu's CR4. */
177 static inline void cr4_set_bits(unsigned long mask)
178 {
179 unsigned long cr4;
180
181 cr4 = this_cpu_read(cpu_tlbstate.cr4);
182 if ((cr4 | mask) != cr4) {
183 cr4 |= mask;
184 this_cpu_write(cpu_tlbstate.cr4, cr4);
185 __write_cr4(cr4);
186 }
187 }
188
189 /* Clear in this cpu's CR4. */
190 static inline void cr4_clear_bits(unsigned long mask)
191 {
192 unsigned long cr4;
193
194 cr4 = this_cpu_read(cpu_tlbstate.cr4);
195 if ((cr4 & ~mask) != cr4) {
196 cr4 &= ~mask;
197 this_cpu_write(cpu_tlbstate.cr4, cr4);
198 __write_cr4(cr4);
199 }
200 }
201
202 static inline void cr4_toggle_bits(unsigned long mask)
203 {
204 unsigned long cr4;
205
206 cr4 = this_cpu_read(cpu_tlbstate.cr4);
207 cr4 ^= mask;
208 this_cpu_write(cpu_tlbstate.cr4, cr4);
209 __write_cr4(cr4);
210 }
211
212 /* Read the CR4 shadow. */
213 static inline unsigned long cr4_read_shadow(void)
214 {
215 return this_cpu_read(cpu_tlbstate.cr4);
216 }
217
218 /*
219 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
220 * enable and PPro Global page enable), so that any CPU's that boot
221 * up after us can get the correct flags. This should only be used
222 * during boot on the boot cpu.
223 */
224 extern unsigned long mmu_cr4_features;
225 extern u32 *trampoline_cr4_features;
226
227 static inline void cr4_set_bits_and_update_boot(unsigned long mask)
228 {
229 mmu_cr4_features |= mask;
230 if (trampoline_cr4_features)
231 *trampoline_cr4_features = mmu_cr4_features;
232 cr4_set_bits(mask);
233 }
234
235 extern void initialize_tlbstate_and_flush(void);
236
237 static inline void __native_flush_tlb(void)
238 {
239 /*
240 * If current->mm == NULL then we borrow a mm which may change during a
241 * task switch and therefore we must not be preempted while we write CR3
242 * back:
243 */
244 preempt_disable();
245 native_write_cr3(__native_read_cr3());
246 preempt_enable();
247 }
248
249 static inline void __native_flush_tlb_global_irq_disabled(void)
250 {
251 unsigned long cr4;
252
253 cr4 = this_cpu_read(cpu_tlbstate.cr4);
254 /* clear PGE */
255 native_write_cr4(cr4 & ~X86_CR4_PGE);
256 /* write old PGE again and flush TLBs */
257 native_write_cr4(cr4);
258 }
259
260 static inline void __native_flush_tlb_global(void)
261 {
262 unsigned long flags;
263
264 if (static_cpu_has(X86_FEATURE_INVPCID)) {
265 /*
266 * Using INVPCID is considerably faster than a pair of writes
267 * to CR4 sandwiched inside an IRQ flag save/restore.
268 */
269 invpcid_flush_all();
270 return;
271 }
272
273 /*
274 * Read-modify-write to CR4 - protect it from preemption and
275 * from interrupts. (Use the raw variant because this code can
276 * be called from deep inside debugging code.)
277 */
278 raw_local_irq_save(flags);
279
280 __native_flush_tlb_global_irq_disabled();
281
282 raw_local_irq_restore(flags);
283 }
284
285 static inline void __native_flush_tlb_single(unsigned long addr)
286 {
287 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
288 }
289
290 static inline void __flush_tlb_all(void)
291 {
292 if (boot_cpu_has(X86_FEATURE_PGE))
293 __flush_tlb_global();
294 else
295 __flush_tlb();
296
297 /*
298 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
299 * we'd end up flushing kernel translations for the current ASID but
300 * we might fail to flush kernel translations for other cached ASIDs.
301 *
302 * To avoid this issue, we force PCID off if PGE is off.
303 */
304 }
305
306 static inline void __flush_tlb_one(unsigned long addr)
307 {
308 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
309 __flush_tlb_single(addr);
310 }
311
312 #define TLB_FLUSH_ALL -1UL
313
314 /*
315 * TLB flushing:
316 *
317 * - flush_tlb_all() flushes all processes TLBs
318 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
319 * - flush_tlb_page(vma, vmaddr) flushes one page
320 * - flush_tlb_range(vma, start, end) flushes a range of pages
321 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
322 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
323 *
324 * ..but the i386 has somewhat limited tlb flushing capabilities,
325 * and page-granular flushes are available only on i486 and up.
326 */
327 struct flush_tlb_info {
328 /*
329 * We support several kinds of flushes.
330 *
331 * - Fully flush a single mm. .mm will be set, .end will be
332 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
333 * which the IPI sender is trying to catch us up.
334 *
335 * - Partially flush a single mm. .mm will be set, .start and
336 * .end will indicate the range, and .new_tlb_gen will be set
337 * such that the changes between generation .new_tlb_gen-1 and
338 * .new_tlb_gen are entirely contained in the indicated range.
339 *
340 * - Fully flush all mms whose tlb_gens have been updated. .mm
341 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
342 * will be zero.
343 */
344 struct mm_struct *mm;
345 unsigned long start;
346 unsigned long end;
347 u64 new_tlb_gen;
348 };
349
350 #define local_flush_tlb() __flush_tlb()
351
352 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
353
354 #define flush_tlb_range(vma, start, end) \
355 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
356
357 extern void flush_tlb_all(void);
358 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
359 unsigned long end, unsigned long vmflag);
360 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
361
362 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
363 {
364 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
365 }
366
367 void native_flush_tlb_others(const struct cpumask *cpumask,
368 const struct flush_tlb_info *info);
369
370 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
371 struct mm_struct *mm)
372 {
373 inc_mm_tlb_gen(mm);
374 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
375 }
376
377 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
378
379 #ifndef CONFIG_PARAVIRT
380 #define flush_tlb_others(mask, info) \
381 native_flush_tlb_others(mask, info)
382 #endif
383
384 #endif /* _ASM_X86_TLBFLUSH_H */