]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/x86/mm/tlb.c
x86/mm, sched/core: Uninline switch_mm()
[mirror_ubuntu-focal-kernel.git] / arch / x86 / mm / tlb.c
CommitLineData
c048fdfe
GC
1#include <linux/init.h>
2
3#include <linux/mm.h>
c048fdfe
GC
4#include <linux/spinlock.h>
5#include <linux/smp.h>
c048fdfe 6#include <linux/interrupt.h>
6dd01bed 7#include <linux/module.h>
93296720 8#include <linux/cpu.h>
c048fdfe 9
c048fdfe 10#include <asm/tlbflush.h>
c048fdfe 11#include <asm/mmu_context.h>
350f8f56 12#include <asm/cache.h>
6dd01bed 13#include <asm/apic.h>
bdbcdd48 14#include <asm/uv/uv.h>
3df3212f 15#include <linux/debugfs.h>
5af5573e 16
c048fdfe
GC
17/*
18 * Smarter SMP flushing macros.
19 * c/o Linus Torvalds.
20 *
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
23 *
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
25 *
26 * More scalable flush, from Andi Kleen
27 *
52aec330 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
c048fdfe
GC
29 */
30
e1074888
AL
31#ifdef CONFIG_SMP
32
52aec330
AS
33struct flush_tlb_info {
34 struct mm_struct *flush_mm;
35 unsigned long flush_start;
36 unsigned long flush_end;
37};
93296720 38
c048fdfe
GC
39/*
40 * We cannot call mmdrop() because we are in interrupt context,
41 * instead update mm->cpu_vm_mask.
42 */
43void leave_mm(int cpu)
44{
02171b4a 45 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
c6ae41e7 46 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
c048fdfe 47 BUG();
a6fca40f
SS
48 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
49 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
50 load_cr3(swapper_pg_dir);
7c7f1547
DH
51 /*
52 * This gets called in the idle path where RCU
53 * functions differently. Tracing normally
54 * uses RCU, so we have to call the tracepoint
55 * specially here.
56 */
57 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
a6fca40f 58 }
c048fdfe
GC
59}
60EXPORT_SYMBOL_GPL(leave_mm);
61
69c0319a
AL
62#endif /* CONFIG_SMP */
63
64void switch_mm(struct mm_struct *prev, struct mm_struct *next,
65 struct task_struct *tsk)
66{
67 unsigned cpu = smp_processor_id();
68
69 if (likely(prev != next)) {
70#ifdef CONFIG_SMP
71 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
72 this_cpu_write(cpu_tlbstate.active_mm, next);
73#endif
74 cpumask_set_cpu(cpu, mm_cpumask(next));
75
76 /*
77 * Re-load page tables.
78 *
79 * This logic has an ordering constraint:
80 *
81 * CPU 0: Write to a PTE for 'next'
82 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
83 * CPU 1: set bit 1 in next's mm_cpumask
84 * CPU 1: load from the PTE that CPU 0 writes (implicit)
85 *
86 * We need to prevent an outcome in which CPU 1 observes
87 * the new PTE value and CPU 0 observes bit 1 clear in
88 * mm_cpumask. (If that occurs, then the IPI will never
89 * be sent, and CPU 0's TLB will contain a stale entry.)
90 *
91 * The bad outcome can occur if either CPU's load is
92 * reordered before that CPU's store, so both CPUs must
93 * execute full barriers to prevent this from happening.
94 *
95 * Thus, switch_mm needs a full barrier between the
96 * store to mm_cpumask and any operation that could load
97 * from next->pgd. TLB fills are special and can happen
98 * due to instruction fetches or for no reason at all,
99 * and neither LOCK nor MFENCE orders them.
100 * Fortunately, load_cr3() is serializing and gives the
101 * ordering guarantee we need.
102 *
103 */
104 load_cr3(next->pgd);
105
106 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
107
108 /* Stop flush ipis for the previous mm */
109 cpumask_clear_cpu(cpu, mm_cpumask(prev));
110
111 /* Load per-mm CR4 state */
112 load_mm_cr4(next);
113
114#ifdef CONFIG_MODIFY_LDT_SYSCALL
115 /*
116 * Load the LDT, if the LDT is different.
117 *
118 * It's possible that prev->context.ldt doesn't match
119 * the LDT register. This can happen if leave_mm(prev)
120 * was called and then modify_ldt changed
121 * prev->context.ldt but suppressed an IPI to this CPU.
122 * In this case, prev->context.ldt != NULL, because we
123 * never set context.ldt to NULL while the mm still
124 * exists. That means that next->context.ldt !=
125 * prev->context.ldt, because mms never share an LDT.
126 */
127 if (unlikely(prev->context.ldt != next->context.ldt))
128 load_mm_ldt(next);
129#endif
130 }
131#ifdef CONFIG_SMP
132 else {
133 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
134 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
135
136 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
137 /*
138 * On established mms, the mm_cpumask is only changed
139 * from irq context, from ptep_clear_flush() while in
140 * lazy tlb mode, and here. Irqs are blocked during
141 * schedule, protecting us from simultaneous changes.
142 */
143 cpumask_set_cpu(cpu, mm_cpumask(next));
144
145 /*
146 * We were in lazy tlb mode and leave_mm disabled
147 * tlb flush IPI delivery. We must reload CR3
148 * to make sure to use no freed page tables.
149 *
150 * As above, load_cr3() is serializing and orders TLB
151 * fills with respect to the mm_cpumask write.
152 */
153 load_cr3(next->pgd);
154 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
155 load_mm_cr4(next);
156 load_mm_ldt(next);
157 }
158 }
159#endif
160}
161
162#ifdef CONFIG_SMP
163
c048fdfe 164/*
c048fdfe
GC
165 * The flush IPI assumes that a thread switch happens in this order:
166 * [cpu0: the cpu that switches]
167 * 1) switch_mm() either 1a) or 1b)
168 * 1a) thread switch to a different mm
52aec330
AS
169 * 1a1) set cpu_tlbstate to TLBSTATE_OK
170 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
171 * if cpu0 was in lazy tlb mode.
172 * 1a2) update cpu active_mm
c048fdfe 173 * Now cpu0 accepts tlb flushes for the new mm.
52aec330 174 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
c048fdfe
GC
175 * Now the other cpus will send tlb flush ipis.
176 * 1a4) change cr3.
52aec330
AS
177 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
178 * Stop ipi delivery for the old mm. This is not synchronized with
179 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
180 * mm, and in the worst case we perform a superfluous tlb flush.
c048fdfe 181 * 1b) thread switch without mm change
52aec330
AS
182 * cpu active_mm is correct, cpu0 already handles flush ipis.
183 * 1b1) set cpu_tlbstate to TLBSTATE_OK
c048fdfe
GC
184 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
185 * Atomically set the bit [other cpus will start sending flush ipis],
186 * and test the bit.
187 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
188 * 2) switch %%esp, ie current
189 *
190 * The interrupt must handle 2 special cases:
191 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
192 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
193 * runs in kernel space, the cpu could load tlb entries for user space
194 * pages.
195 *
52aec330 196 * The good news is that cpu_tlbstate is local to each cpu, no
c048fdfe
GC
197 * write/read ordering problems.
198 */
199
200/*
52aec330 201 * TLB flush funcation:
c048fdfe
GC
202 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
203 * 2) Leave the mm if we are in the lazy tlb mode.
02cf94c3 204 */
52aec330 205static void flush_tlb_func(void *info)
c048fdfe 206{
52aec330 207 struct flush_tlb_info *f = info;
c048fdfe 208
fd0f5869
TS
209 inc_irq_stat(irq_tlb_count);
210
858eaaa7 211 if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
52aec330 212 return;
c048fdfe 213
ec659934 214 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
52aec330 215 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
d17d8f9d 216 if (f->flush_end == TLB_FLUSH_ALL) {
52aec330 217 local_flush_tlb();
d17d8f9d
DH
218 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
219 } else {
52aec330 220 unsigned long addr;
d17d8f9d 221 unsigned long nr_pages =
bbc03778 222 (f->flush_end - f->flush_start) / PAGE_SIZE;
52aec330
AS
223 addr = f->flush_start;
224 while (addr < f->flush_end) {
225 __flush_tlb_single(addr);
226 addr += PAGE_SIZE;
e7b52ffd 227 }
d17d8f9d 228 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
52aec330
AS
229 }
230 } else
231 leave_mm(smp_processor_id());
c048fdfe 232
c048fdfe
GC
233}
234
4595f962 235void native_flush_tlb_others(const struct cpumask *cpumask,
e7b52ffd
AS
236 struct mm_struct *mm, unsigned long start,
237 unsigned long end)
4595f962 238{
52aec330 239 struct flush_tlb_info info;
18c98243
NA
240
241 if (end == 0)
242 end = start + PAGE_SIZE;
52aec330
AS
243 info.flush_mm = mm;
244 info.flush_start = start;
245 info.flush_end = end;
246
ec659934 247 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
18c98243
NA
248 if (end == TLB_FLUSH_ALL)
249 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
250 else
251 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
252 (end - start) >> PAGE_SHIFT);
253
4595f962 254 if (is_uv_system()) {
bdbcdd48 255 unsigned int cpu;
0e21990a 256
25542c64 257 cpu = smp_processor_id();
e7b52ffd 258 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
bdbcdd48 259 if (cpumask)
52aec330
AS
260 smp_call_function_many(cpumask, flush_tlb_func,
261 &info, 1);
0e21990a 262 return;
4595f962 263 }
52aec330 264 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
c048fdfe 265}
c048fdfe
GC
266
267void flush_tlb_current_task(void)
268{
269 struct mm_struct *mm = current->mm;
c048fdfe
GC
270
271 preempt_disable();
c048fdfe 272
ec659934 273 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
71b3c126
AL
274
275 /* This is an implicit full barrier that synchronizes with switch_mm. */
c048fdfe 276 local_flush_tlb();
71b3c126 277
d17d8f9d 278 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
78f1c4d6 279 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
e7b52ffd 280 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
c048fdfe
GC
281 preempt_enable();
282}
283
a5102476
DH
284/*
285 * See Documentation/x86/tlb.txt for details. We choose 33
286 * because it is large enough to cover the vast majority (at
287 * least 95%) of allocations, and is small enough that we are
288 * confident it will not cause too much overhead. Each single
289 * flush is about 100 ns, so this caps the maximum overhead at
290 * _about_ 3,000 ns.
291 *
292 * This is in units of pages.
293 */
86426851 294static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
e9f4e0a9 295
611ae8e3
AS
296void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
297 unsigned long end, unsigned long vmflag)
298{
299 unsigned long addr;
9dfa6dee
DH
300 /* do a global flush by default */
301 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
e7b52ffd
AS
302
303 preempt_disable();
71b3c126
AL
304 if (current->active_mm != mm) {
305 /* Synchronize with switch_mm. */
306 smp_mb();
307
4995ab9c 308 goto out;
71b3c126 309 }
e7b52ffd 310
611ae8e3
AS
311 if (!current->mm) {
312 leave_mm(smp_processor_id());
71b3c126
AL
313
314 /* Synchronize with switch_mm. */
315 smp_mb();
316
4995ab9c 317 goto out;
611ae8e3 318 }
c048fdfe 319
9dfa6dee
DH
320 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
321 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
e7b52ffd 322
71b3c126
AL
323 /*
324 * Both branches below are implicit full barriers (MOV to CR or
325 * INVLPG) that synchronize with switch_mm.
326 */
9dfa6dee
DH
327 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
328 base_pages_to_flush = TLB_FLUSH_ALL;
ec659934 329 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
611ae8e3 330 local_flush_tlb();
9824cf97 331 } else {
611ae8e3 332 /* flush range by one by one 'invlpg' */
9824cf97 333 for (addr = start; addr < end; addr += PAGE_SIZE) {
ec659934 334 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
611ae8e3 335 __flush_tlb_single(addr);
9824cf97 336 }
e7b52ffd 337 }
d17d8f9d 338 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
4995ab9c 339out:
9dfa6dee 340 if (base_pages_to_flush == TLB_FLUSH_ALL) {
4995ab9c
DH
341 start = 0UL;
342 end = TLB_FLUSH_ALL;
343 }
e7b52ffd 344 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
4995ab9c 345 flush_tlb_others(mm_cpumask(mm), mm, start, end);
c048fdfe
GC
346 preempt_enable();
347}
348
e7b52ffd 349void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
c048fdfe
GC
350{
351 struct mm_struct *mm = vma->vm_mm;
c048fdfe
GC
352
353 preempt_disable();
c048fdfe
GC
354
355 if (current->active_mm == mm) {
71b3c126
AL
356 if (current->mm) {
357 /*
358 * Implicit full barrier (INVLPG) that synchronizes
359 * with switch_mm.
360 */
e7b52ffd 361 __flush_tlb_one(start);
71b3c126 362 } else {
c048fdfe 363 leave_mm(smp_processor_id());
71b3c126
AL
364
365 /* Synchronize with switch_mm. */
366 smp_mb();
367 }
c048fdfe
GC
368 }
369
78f1c4d6 370 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
e7b52ffd 371 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
c048fdfe
GC
372
373 preempt_enable();
374}
375
376static void do_flush_tlb_all(void *info)
377{
ec659934 378 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
c048fdfe 379 __flush_tlb_all();
c6ae41e7 380 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
3f8afb77 381 leave_mm(smp_processor_id());
c048fdfe
GC
382}
383
384void flush_tlb_all(void)
385{
ec659934 386 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
15c8b6c1 387 on_each_cpu(do_flush_tlb_all, NULL, 1);
c048fdfe 388}
3df3212f 389
effee4b9
AS
390static void do_kernel_range_flush(void *info)
391{
392 struct flush_tlb_info *f = info;
393 unsigned long addr;
394
395 /* flush range by one by one 'invlpg' */
6df46865 396 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
effee4b9
AS
397 __flush_tlb_single(addr);
398}
399
400void flush_tlb_kernel_range(unsigned long start, unsigned long end)
401{
effee4b9
AS
402
403 /* Balance as user space task's flush, a bit conservative */
e9f4e0a9
DH
404 if (end == TLB_FLUSH_ALL ||
405 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
effee4b9 406 on_each_cpu(do_flush_tlb_all, NULL, 1);
e9f4e0a9
DH
407 } else {
408 struct flush_tlb_info info;
effee4b9
AS
409 info.flush_start = start;
410 info.flush_end = end;
411 on_each_cpu(do_kernel_range_flush, &info, 1);
412 }
413}
2d040a1c
DH
414
415static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
416 size_t count, loff_t *ppos)
417{
418 char buf[32];
419 unsigned int len;
420
421 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
422 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
423}
424
425static ssize_t tlbflush_write_file(struct file *file,
426 const char __user *user_buf, size_t count, loff_t *ppos)
427{
428 char buf[32];
429 ssize_t len;
430 int ceiling;
431
432 len = min(count, sizeof(buf) - 1);
433 if (copy_from_user(buf, user_buf, len))
434 return -EFAULT;
435
436 buf[len] = '\0';
437 if (kstrtoint(buf, 0, &ceiling))
438 return -EINVAL;
439
440 if (ceiling < 0)
441 return -EINVAL;
442
443 tlb_single_page_flush_ceiling = ceiling;
444 return count;
445}
446
447static const struct file_operations fops_tlbflush = {
448 .read = tlbflush_read_file,
449 .write = tlbflush_write_file,
450 .llseek = default_llseek,
451};
452
453static int __init create_tlb_single_page_flush_ceiling(void)
454{
455 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
456 arch_debugfs_dir, NULL, &fops_tlbflush);
457 return 0;
458}
459late_initcall(create_tlb_single_page_flush_ceiling);
e1074888
AL
460
461#endif /* CONFIG_SMP */