]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/x86/mm/tlb.c
x86/mm: Give each mm TLB flush generation a unique ID
[mirror_ubuntu-focal-kernel.git] / arch / x86 / mm / tlb.c
CommitLineData
c048fdfe
GC
1#include <linux/init.h>
2
3#include <linux/mm.h>
c048fdfe
GC
4#include <linux/spinlock.h>
5#include <linux/smp.h>
c048fdfe 6#include <linux/interrupt.h>
4b599fed 7#include <linux/export.h>
93296720 8#include <linux/cpu.h>
c048fdfe 9
c048fdfe 10#include <asm/tlbflush.h>
c048fdfe 11#include <asm/mmu_context.h>
350f8f56 12#include <asm/cache.h>
6dd01bed 13#include <asm/apic.h>
bdbcdd48 14#include <asm/uv/uv.h>
3df3212f 15#include <linux/debugfs.h>
5af5573e 16
c048fdfe 17/*
ce4a4e56 18 * TLB flushing, formerly SMP-only
c048fdfe
GC
19 * c/o Linus Torvalds.
20 *
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
23 *
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
25 *
26 * More scalable flush, from Andi Kleen
27 *
52aec330 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
c048fdfe
GC
29 */
30
f39681ed
AL
31atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
32
c048fdfe
GC
33void leave_mm(int cpu)
34{
3d28ebce
AL
35 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
36
37 /*
38 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
39 * If so, our callers still expect us to flush the TLB, but there
40 * aren't any user TLB entries in init_mm to worry about.
41 *
42 * This needs to happen before any other sanity checks due to
43 * intel_idle's shenanigans.
44 */
45 if (loaded_mm == &init_mm)
46 return;
47
c6ae41e7 48 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
c048fdfe 49 BUG();
3d28ebce
AL
50
51 switch_mm(NULL, &init_mm, NULL);
c048fdfe
GC
52}
53EXPORT_SYMBOL_GPL(leave_mm);
54
69c0319a
AL
55void switch_mm(struct mm_struct *prev, struct mm_struct *next,
56 struct task_struct *tsk)
078194f8
AL
57{
58 unsigned long flags;
59
60 local_irq_save(flags);
61 switch_mm_irqs_off(prev, next, tsk);
62 local_irq_restore(flags);
63}
64
65void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
66 struct task_struct *tsk)
69c0319a
AL
67{
68 unsigned cpu = smp_processor_id();
3d28ebce 69 struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
69c0319a 70
3d28ebce
AL
71 /*
72 * NB: The scheduler will call us with prev == next when
73 * switching from lazy TLB mode to normal mode if active_mm
74 * isn't changing. When this happens, there is no guarantee
75 * that CR3 (and hence cpu_tlbstate.loaded_mm) matches next.
76 *
77 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
78 */
e37e43a4 79
3d28ebce 80 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
e37e43a4 81
3d28ebce
AL
82 if (real_prev == next) {
83 /*
84 * There's nothing to do: we always keep the per-mm control
85 * regs in sync with cpu_tlbstate.loaded_mm. Just
86 * sanity-check mm_cpumask.
87 */
88 if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(next))))
89 cpumask_set_cpu(cpu, mm_cpumask(next));
90 return;
91 }
69c0319a 92
3d28ebce 93 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
69c0319a 94 /*
3d28ebce
AL
95 * If our current stack is in vmalloc space and isn't
96 * mapped in the new pgd, we'll double-fault. Forcibly
97 * map it.
69c0319a 98 */
3d28ebce 99 unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
69c0319a 100
3d28ebce 101 pgd_t *pgd = next->pgd + stack_pgd_index;
69c0319a 102
3d28ebce
AL
103 if (unlikely(pgd_none(*pgd)))
104 set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
105 }
69c0319a 106
3d28ebce
AL
107 this_cpu_write(cpu_tlbstate.loaded_mm, next);
108
109 WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
110 cpumask_set_cpu(cpu, mm_cpumask(next));
111
112 /*
113 * Re-load page tables.
114 *
115 * This logic has an ordering constraint:
116 *
117 * CPU 0: Write to a PTE for 'next'
118 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
119 * CPU 1: set bit 1 in next's mm_cpumask
120 * CPU 1: load from the PTE that CPU 0 writes (implicit)
121 *
122 * We need to prevent an outcome in which CPU 1 observes
123 * the new PTE value and CPU 0 observes bit 1 clear in
124 * mm_cpumask. (If that occurs, then the IPI will never
125 * be sent, and CPU 0's TLB will contain a stale entry.)
126 *
127 * The bad outcome can occur if either CPU's load is
128 * reordered before that CPU's store, so both CPUs must
129 * execute full barriers to prevent this from happening.
130 *
131 * Thus, switch_mm needs a full barrier between the
132 * store to mm_cpumask and any operation that could load
133 * from next->pgd. TLB fills are special and can happen
134 * due to instruction fetches or for no reason at all,
135 * and neither LOCK nor MFENCE orders them.
136 * Fortunately, load_cr3() is serializing and gives the
137 * ordering guarantee we need.
138 */
139 load_cr3(next->pgd);
140
141 /*
142 * This gets called via leave_mm() in the idle path where RCU
143 * functions differently. Tracing normally uses RCU, so we have to
144 * call the tracepoint specially here.
145 */
146 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
147
148 /* Stop flush ipis for the previous mm */
149 WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
150 real_prev != &init_mm);
151 cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
152
73534258 153 /* Load per-mm CR4 and LDTR state */
3d28ebce 154 load_mm_cr4(next);
73534258 155 switch_ldt(real_prev, next);
69c0319a
AL
156}
157
454bbad9
AL
158static void flush_tlb_func_common(const struct flush_tlb_info *f,
159 bool local, enum tlb_flush_reason reason)
c048fdfe 160{
bc0d5a89
AL
161 /* This code cannot presently handle being reentered. */
162 VM_WARN_ON(!irqs_disabled());
163
b3b90e5a 164 if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
52aec330 165 leave_mm(smp_processor_id());
b3b90e5a
AL
166 return;
167 }
c048fdfe 168
a2055abe 169 if (f->end == TLB_FLUSH_ALL) {
b3b90e5a 170 local_flush_tlb();
454bbad9
AL
171 if (local)
172 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
173 trace_tlb_flush(reason, TLB_FLUSH_ALL);
b3b90e5a
AL
174 } else {
175 unsigned long addr;
be4ffc0d 176 unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
a2055abe
AL
177 addr = f->start;
178 while (addr < f->end) {
b3b90e5a
AL
179 __flush_tlb_single(addr);
180 addr += PAGE_SIZE;
181 }
454bbad9
AL
182 if (local)
183 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
184 trace_tlb_flush(reason, nr_pages);
b3b90e5a 185 }
c048fdfe
GC
186}
187
454bbad9
AL
188static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
189{
190 const struct flush_tlb_info *f = info;
191
192 flush_tlb_func_common(f, true, reason);
193}
194
195static void flush_tlb_func_remote(void *info)
196{
197 const struct flush_tlb_info *f = info;
198
199 inc_irq_stat(irq_tlb_count);
200
3d28ebce 201 if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
454bbad9
AL
202 return;
203
204 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
205 flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
206}
207
4595f962 208void native_flush_tlb_others(const struct cpumask *cpumask,
a2055abe 209 const struct flush_tlb_info *info)
4595f962 210{
ec659934 211 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
a2055abe 212 if (info->end == TLB_FLUSH_ALL)
18c98243
NA
213 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
214 else
215 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
a2055abe 216 (info->end - info->start) >> PAGE_SHIFT);
18c98243 217
4595f962 218 if (is_uv_system()) {
bdbcdd48 219 unsigned int cpu;
0e21990a 220
25542c64 221 cpu = smp_processor_id();
a2055abe 222 cpumask = uv_flush_tlb_others(cpumask, info);
bdbcdd48 223 if (cpumask)
454bbad9 224 smp_call_function_many(cpumask, flush_tlb_func_remote,
a2055abe 225 (void *)info, 1);
0e21990a 226 return;
4595f962 227 }
454bbad9 228 smp_call_function_many(cpumask, flush_tlb_func_remote,
a2055abe 229 (void *)info, 1);
c048fdfe 230}
c048fdfe 231
a5102476
DH
232/*
233 * See Documentation/x86/tlb.txt for details. We choose 33
234 * because it is large enough to cover the vast majority (at
235 * least 95%) of allocations, and is small enough that we are
236 * confident it will not cause too much overhead. Each single
237 * flush is about 100 ns, so this caps the maximum overhead at
238 * _about_ 3,000 ns.
239 *
240 * This is in units of pages.
241 */
86426851 242static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
e9f4e0a9 243
611ae8e3
AS
244void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
245 unsigned long end, unsigned long vmflag)
246{
454bbad9 247 int cpu;
ce27374f 248
454bbad9
AL
249 struct flush_tlb_info info = {
250 .mm = mm,
251 };
ce27374f 252
454bbad9 253 cpu = get_cpu();
71b3c126 254
f39681ed
AL
255 /* This is also a barrier that synchronizes with switch_mm(). */
256 inc_mm_tlb_gen(mm);
71b3c126 257
454bbad9
AL
258 /* Should we flush just the requested range? */
259 if ((end != TLB_FLUSH_ALL) &&
260 !(vmflag & VM_HUGETLB) &&
261 ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
262 info.start = start;
263 info.end = end;
9824cf97 264 } else {
a2055abe
AL
265 info.start = 0UL;
266 info.end = TLB_FLUSH_ALL;
4995ab9c 267 }
454bbad9 268
bc0d5a89
AL
269 if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
270 VM_WARN_ON(irqs_disabled());
271 local_irq_disable();
454bbad9 272 flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
bc0d5a89
AL
273 local_irq_enable();
274 }
275
454bbad9 276 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
a2055abe 277 flush_tlb_others(mm_cpumask(mm), &info);
454bbad9 278 put_cpu();
c048fdfe
GC
279}
280
a2055abe 281
c048fdfe
GC
282static void do_flush_tlb_all(void *info)
283{
ec659934 284 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
c048fdfe 285 __flush_tlb_all();
c6ae41e7 286 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
3f8afb77 287 leave_mm(smp_processor_id());
c048fdfe
GC
288}
289
290void flush_tlb_all(void)
291{
ec659934 292 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
15c8b6c1 293 on_each_cpu(do_flush_tlb_all, NULL, 1);
c048fdfe 294}
3df3212f 295
effee4b9
AS
296static void do_kernel_range_flush(void *info)
297{
298 struct flush_tlb_info *f = info;
299 unsigned long addr;
300
301 /* flush range by one by one 'invlpg' */
a2055abe 302 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
effee4b9
AS
303 __flush_tlb_single(addr);
304}
305
306void flush_tlb_kernel_range(unsigned long start, unsigned long end)
307{
effee4b9
AS
308
309 /* Balance as user space task's flush, a bit conservative */
e9f4e0a9 310 if (end == TLB_FLUSH_ALL ||
be4ffc0d 311 (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
effee4b9 312 on_each_cpu(do_flush_tlb_all, NULL, 1);
e9f4e0a9
DH
313 } else {
314 struct flush_tlb_info info;
a2055abe
AL
315 info.start = start;
316 info.end = end;
effee4b9
AS
317 on_each_cpu(do_kernel_range_flush, &info, 1);
318 }
319}
2d040a1c 320
e73ad5ff
AL
321void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
322{
a2055abe
AL
323 struct flush_tlb_info info = {
324 .mm = NULL,
325 .start = 0UL,
326 .end = TLB_FLUSH_ALL,
327 };
328
e73ad5ff
AL
329 int cpu = get_cpu();
330
bc0d5a89
AL
331 if (cpumask_test_cpu(cpu, &batch->cpumask)) {
332 VM_WARN_ON(irqs_disabled());
333 local_irq_disable();
3f79e4c7 334 flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
bc0d5a89
AL
335 local_irq_enable();
336 }
337
e73ad5ff 338 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
a2055abe 339 flush_tlb_others(&batch->cpumask, &info);
e73ad5ff
AL
340 cpumask_clear(&batch->cpumask);
341
342 put_cpu();
343}
344
2d040a1c
DH
345static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
346 size_t count, loff_t *ppos)
347{
348 char buf[32];
349 unsigned int len;
350
351 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
352 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
353}
354
355static ssize_t tlbflush_write_file(struct file *file,
356 const char __user *user_buf, size_t count, loff_t *ppos)
357{
358 char buf[32];
359 ssize_t len;
360 int ceiling;
361
362 len = min(count, sizeof(buf) - 1);
363 if (copy_from_user(buf, user_buf, len))
364 return -EFAULT;
365
366 buf[len] = '\0';
367 if (kstrtoint(buf, 0, &ceiling))
368 return -EINVAL;
369
370 if (ceiling < 0)
371 return -EINVAL;
372
373 tlb_single_page_flush_ceiling = ceiling;
374 return count;
375}
376
377static const struct file_operations fops_tlbflush = {
378 .read = tlbflush_read_file,
379 .write = tlbflush_write_file,
380 .llseek = default_llseek,
381};
382
383static int __init create_tlb_single_page_flush_ceiling(void)
384{
385 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
386 arch_debugfs_dir, NULL, &fops_tlbflush);
387 return 0;
388}
389late_initcall(create_tlb_single_page_flush_ceiling);