]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/mm/tlb.c
x86/mm: Provide general kernel support for memory encryption
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / mm / tlb.c
CommitLineData
c048fdfe
GC
1#include <linux/init.h>
2
3#include <linux/mm.h>
c048fdfe
GC
4#include <linux/spinlock.h>
5#include <linux/smp.h>
c048fdfe 6#include <linux/interrupt.h>
4b599fed 7#include <linux/export.h>
93296720 8#include <linux/cpu.h>
c048fdfe 9
c048fdfe 10#include <asm/tlbflush.h>
c048fdfe 11#include <asm/mmu_context.h>
350f8f56 12#include <asm/cache.h>
6dd01bed 13#include <asm/apic.h>
bdbcdd48 14#include <asm/uv/uv.h>
3df3212f 15#include <linux/debugfs.h>
5af5573e 16
c048fdfe 17/*
ce4a4e56 18 * TLB flushing, formerly SMP-only
c048fdfe
GC
19 * c/o Linus Torvalds.
20 *
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
23 *
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
25 *
26 * More scalable flush, from Andi Kleen
27 *
52aec330 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
c048fdfe
GC
29 */
30
f39681ed
AL
31atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
32
c048fdfe
GC
33void leave_mm(int cpu)
34{
3d28ebce
AL
35 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
36
37 /*
38 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
39 * If so, our callers still expect us to flush the TLB, but there
40 * aren't any user TLB entries in init_mm to worry about.
41 *
42 * This needs to happen before any other sanity checks due to
43 * intel_idle's shenanigans.
44 */
45 if (loaded_mm == &init_mm)
46 return;
47
94b1b03b
AL
48 /* Warn if we're not lazy. */
49 WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm)));
3d28ebce
AL
50
51 switch_mm(NULL, &init_mm, NULL);
c048fdfe 52}
c048fdfe 53
69c0319a
AL
54void switch_mm(struct mm_struct *prev, struct mm_struct *next,
55 struct task_struct *tsk)
078194f8
AL
56{
57 unsigned long flags;
58
59 local_irq_save(flags);
60 switch_mm_irqs_off(prev, next, tsk);
61 local_irq_restore(flags);
62}
63
64void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
65 struct task_struct *tsk)
69c0319a 66{
3d28ebce 67 struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
94b1b03b
AL
68 unsigned cpu = smp_processor_id();
69 u64 next_tlb_gen;
69c0319a 70
3d28ebce 71 /*
94b1b03b
AL
72 * NB: The scheduler will call us with prev == next when switching
73 * from lazy TLB mode to normal mode if active_mm isn't changing.
74 * When this happens, we don't assume that CR3 (and hence
75 * cpu_tlbstate.loaded_mm) matches next.
3d28ebce
AL
76 *
77 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
78 */
e37e43a4 79
94b1b03b
AL
80 /* We don't want flush_tlb_func_* to run concurrently with us. */
81 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
82 WARN_ON_ONCE(!irqs_disabled());
83
84 /*
85 * Verify that CR3 is what we think it is. This will catch
86 * hypothetical buggy code that directly switches to swapper_pg_dir
87 * without going through leave_mm() / switch_mm_irqs_off().
88 */
89 VM_BUG_ON(read_cr3_pa() != __pa(real_prev->pgd));
e37e43a4 90
3d28ebce 91 if (real_prev == next) {
94b1b03b
AL
92 VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[0].ctx_id) !=
93 next->context.ctx_id);
94
95 if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
96 /*
97 * There's nothing to do: we weren't lazy, and we
98 * aren't changing our mm. We don't need to flush
99 * anything, nor do we need to update CR3, CR4, or
100 * LDTR.
101 */
102 return;
103 }
104
105 /* Resume remote flushes and then read tlb_gen. */
106 cpumask_set_cpu(cpu, mm_cpumask(next));
107 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
108
109 if (this_cpu_read(cpu_tlbstate.ctxs[0].tlb_gen) < next_tlb_gen) {
110 /*
111 * Ideally, we'd have a flush_tlb() variant that
112 * takes the known CR3 value as input. This would
113 * be faster on Xen PV and on hypothetical CPUs
114 * on which INVPCID is fast.
115 */
116 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen,
117 next_tlb_gen);
21729f81 118 write_cr3(__sme_pa(next->pgd));
43858b4f
AL
119 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
120 TLB_FLUSH_ALL);
94b1b03b 121 }
69c0319a
AL
122
123 /*
94b1b03b
AL
124 * We just exited lazy mode, which means that CR4 and/or LDTR
125 * may be stale. (Changes to the required CR4 and LDTR states
126 * are not reflected in tlb_gen.)
69c0319a 127 */
94b1b03b
AL
128 } else {
129 VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[0].ctx_id) ==
130 next->context.ctx_id);
131
132 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
133 /*
134 * If our current stack is in vmalloc space and isn't
135 * mapped in the new pgd, we'll double-fault. Forcibly
136 * map it.
137 */
138 unsigned int index = pgd_index(current_stack_pointer());
139 pgd_t *pgd = next->pgd + index;
140
141 if (unlikely(pgd_none(*pgd)))
142 set_pgd(pgd, init_mm.pgd[index]);
143 }
69c0319a 144
94b1b03b
AL
145 /* Stop remote flushes for the previous mm */
146 if (cpumask_test_cpu(cpu, mm_cpumask(real_prev)))
147 cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
3d28ebce 148
94b1b03b 149 VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
3d28ebce 150
94b1b03b
AL
151 /*
152 * Start remote flushes and then read tlb_gen.
153 */
154 cpumask_set_cpu(cpu, mm_cpumask(next));
155 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
3d28ebce 156
94b1b03b
AL
157 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, next->context.ctx_id);
158 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, next_tlb_gen);
159 this_cpu_write(cpu_tlbstate.loaded_mm, next);
21729f81 160 write_cr3(__sme_pa(next->pgd));
3d28ebce 161
43858b4f 162 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
94b1b03b 163 }
3d28ebce 164
3d28ebce 165 load_mm_cr4(next);
73534258 166 switch_ldt(real_prev, next);
69c0319a
AL
167}
168
b0579ade
AL
169/*
170 * flush_tlb_func_common()'s memory ordering requirement is that any
171 * TLB fills that happen after we flush the TLB are ordered after we
172 * read active_mm's tlb_gen. We don't need any explicit barriers
173 * because all x86 flush operations are serializing and the
174 * atomic64_read operation won't be reordered by the compiler.
175 */
454bbad9
AL
176static void flush_tlb_func_common(const struct flush_tlb_info *f,
177 bool local, enum tlb_flush_reason reason)
c048fdfe 178{
b0579ade
AL
179 /*
180 * We have three different tlb_gen values in here. They are:
181 *
182 * - mm_tlb_gen: the latest generation.
183 * - local_tlb_gen: the generation that this CPU has already caught
184 * up to.
185 * - f->new_tlb_gen: the generation that the requester of the flush
186 * wants us to catch up to.
187 */
188 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
189 u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
190 u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[0].tlb_gen);
191
bc0d5a89
AL
192 /* This code cannot presently handle being reentered. */
193 VM_WARN_ON(!irqs_disabled());
194
b0579ade
AL
195 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[0].ctx_id) !=
196 loaded_mm->context.ctx_id);
197
94b1b03b 198 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) {
b0579ade 199 /*
94b1b03b
AL
200 * We're in lazy mode -- don't flush. We can get here on
201 * remote flushes due to races and on local flushes if a
202 * kernel thread coincidentally flushes the mm it's lazily
203 * still using.
b0579ade 204 */
b3b90e5a
AL
205 return;
206 }
c048fdfe 207
b0579ade
AL
208 if (unlikely(local_tlb_gen == mm_tlb_gen)) {
209 /*
210 * There's nothing to do: we're already up to date. This can
211 * happen if two concurrent flushes happen -- the first flush to
212 * be handled can catch us all the way up, leaving no work for
213 * the second flush.
214 */
94b1b03b 215 trace_tlb_flush(reason, 0);
b0579ade
AL
216 return;
217 }
218
219 WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
220 WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
221
222 /*
223 * If we get to this point, we know that our TLB is out of date.
224 * This does not strictly imply that we need to flush (it's
225 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
226 * going to need to flush in the very near future, so we might
227 * as well get it over with.
228 *
229 * The only question is whether to do a full or partial flush.
230 *
231 * We do a partial flush if requested and two extra conditions
232 * are met:
233 *
234 * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that
235 * we've always done all needed flushes to catch up to
236 * local_tlb_gen. If, for example, local_tlb_gen == 2 and
237 * f->new_tlb_gen == 3, then we know that the flush needed to bring
238 * us up to date for tlb_gen 3 is the partial flush we're
239 * processing.
240 *
241 * As an example of why this check is needed, suppose that there
242 * are two concurrent flushes. The first is a full flush that
243 * changes context.tlb_gen from 1 to 2. The second is a partial
244 * flush that changes context.tlb_gen from 2 to 3. If they get
245 * processed on this CPU in reverse order, we'll see
246 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
247 * If we were to use __flush_tlb_single() and set local_tlb_gen to
248 * 3, we'd be break the invariant: we'd update local_tlb_gen above
249 * 1 without the full flush that's needed for tlb_gen 2.
250 *
251 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation.
252 * Partial TLB flushes are not all that much cheaper than full TLB
253 * flushes, so it seems unlikely that it would be a performance win
254 * to do a partial flush if that won't bring our TLB fully up to
255 * date. By doing a full flush instead, we can increase
256 * local_tlb_gen all the way to mm_tlb_gen and we can probably
257 * avoid another flush in the very near future.
258 */
259 if (f->end != TLB_FLUSH_ALL &&
260 f->new_tlb_gen == local_tlb_gen + 1 &&
261 f->new_tlb_gen == mm_tlb_gen) {
262 /* Partial flush */
b3b90e5a 263 unsigned long addr;
be4ffc0d 264 unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
b0579ade 265
a2055abe
AL
266 addr = f->start;
267 while (addr < f->end) {
b3b90e5a
AL
268 __flush_tlb_single(addr);
269 addr += PAGE_SIZE;
270 }
454bbad9
AL
271 if (local)
272 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
273 trace_tlb_flush(reason, nr_pages);
b0579ade
AL
274 } else {
275 /* Full flush. */
276 local_flush_tlb();
277 if (local)
278 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
279 trace_tlb_flush(reason, TLB_FLUSH_ALL);
b3b90e5a 280 }
b0579ade
AL
281
282 /* Both paths above update our state to mm_tlb_gen. */
283 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, mm_tlb_gen);
c048fdfe
GC
284}
285
454bbad9
AL
286static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
287{
288 const struct flush_tlb_info *f = info;
289
290 flush_tlb_func_common(f, true, reason);
291}
292
293static void flush_tlb_func_remote(void *info)
294{
295 const struct flush_tlb_info *f = info;
296
297 inc_irq_stat(irq_tlb_count);
298
3d28ebce 299 if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
454bbad9
AL
300 return;
301
302 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
303 flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
304}
305
4595f962 306void native_flush_tlb_others(const struct cpumask *cpumask,
a2055abe 307 const struct flush_tlb_info *info)
4595f962 308{
ec659934 309 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
a2055abe 310 if (info->end == TLB_FLUSH_ALL)
18c98243
NA
311 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
312 else
313 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
a2055abe 314 (info->end - info->start) >> PAGE_SHIFT);
18c98243 315
4595f962 316 if (is_uv_system()) {
94b1b03b
AL
317 /*
318 * This whole special case is confused. UV has a "Broadcast
319 * Assist Unit", which seems to be a fancy way to send IPIs.
320 * Back when x86 used an explicit TLB flush IPI, UV was
321 * optimized to use its own mechanism. These days, x86 uses
322 * smp_call_function_many(), but UV still uses a manual IPI,
323 * and that IPI's action is out of date -- it does a manual
324 * flush instead of calling flush_tlb_func_remote(). This
325 * means that the percpu tlb_gen variables won't be updated
326 * and we'll do pointless flushes on future context switches.
327 *
328 * Rather than hooking native_flush_tlb_others() here, I think
329 * that UV should be updated so that smp_call_function_many(),
330 * etc, are optimal on UV.
331 */
bdbcdd48 332 unsigned int cpu;
0e21990a 333
25542c64 334 cpu = smp_processor_id();
a2055abe 335 cpumask = uv_flush_tlb_others(cpumask, info);
bdbcdd48 336 if (cpumask)
454bbad9 337 smp_call_function_many(cpumask, flush_tlb_func_remote,
a2055abe 338 (void *)info, 1);
0e21990a 339 return;
4595f962 340 }
454bbad9 341 smp_call_function_many(cpumask, flush_tlb_func_remote,
a2055abe 342 (void *)info, 1);
c048fdfe 343}
c048fdfe 344
a5102476
DH
345/*
346 * See Documentation/x86/tlb.txt for details. We choose 33
347 * because it is large enough to cover the vast majority (at
348 * least 95%) of allocations, and is small enough that we are
349 * confident it will not cause too much overhead. Each single
350 * flush is about 100 ns, so this caps the maximum overhead at
351 * _about_ 3,000 ns.
352 *
353 * This is in units of pages.
354 */
86426851 355static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
e9f4e0a9 356
611ae8e3
AS
357void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
358 unsigned long end, unsigned long vmflag)
359{
454bbad9 360 int cpu;
ce27374f 361
454bbad9
AL
362 struct flush_tlb_info info = {
363 .mm = mm,
364 };
ce27374f 365
454bbad9 366 cpu = get_cpu();
71b3c126 367
f39681ed 368 /* This is also a barrier that synchronizes with switch_mm(). */
b0579ade 369 info.new_tlb_gen = inc_mm_tlb_gen(mm);
71b3c126 370
454bbad9
AL
371 /* Should we flush just the requested range? */
372 if ((end != TLB_FLUSH_ALL) &&
373 !(vmflag & VM_HUGETLB) &&
374 ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
375 info.start = start;
376 info.end = end;
9824cf97 377 } else {
a2055abe
AL
378 info.start = 0UL;
379 info.end = TLB_FLUSH_ALL;
4995ab9c 380 }
454bbad9 381
bc0d5a89
AL
382 if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
383 VM_WARN_ON(irqs_disabled());
384 local_irq_disable();
454bbad9 385 flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
bc0d5a89
AL
386 local_irq_enable();
387 }
388
454bbad9 389 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
a2055abe 390 flush_tlb_others(mm_cpumask(mm), &info);
94b1b03b 391
454bbad9 392 put_cpu();
c048fdfe
GC
393}
394
a2055abe 395
c048fdfe
GC
396static void do_flush_tlb_all(void *info)
397{
ec659934 398 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
c048fdfe 399 __flush_tlb_all();
c048fdfe
GC
400}
401
402void flush_tlb_all(void)
403{
ec659934 404 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
15c8b6c1 405 on_each_cpu(do_flush_tlb_all, NULL, 1);
c048fdfe 406}
3df3212f 407
effee4b9
AS
408static void do_kernel_range_flush(void *info)
409{
410 struct flush_tlb_info *f = info;
411 unsigned long addr;
412
413 /* flush range by one by one 'invlpg' */
a2055abe 414 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
effee4b9
AS
415 __flush_tlb_single(addr);
416}
417
418void flush_tlb_kernel_range(unsigned long start, unsigned long end)
419{
effee4b9
AS
420
421 /* Balance as user space task's flush, a bit conservative */
e9f4e0a9 422 if (end == TLB_FLUSH_ALL ||
be4ffc0d 423 (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
effee4b9 424 on_each_cpu(do_flush_tlb_all, NULL, 1);
e9f4e0a9
DH
425 } else {
426 struct flush_tlb_info info;
a2055abe
AL
427 info.start = start;
428 info.end = end;
effee4b9
AS
429 on_each_cpu(do_kernel_range_flush, &info, 1);
430 }
431}
2d040a1c 432
e73ad5ff
AL
433void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
434{
a2055abe
AL
435 struct flush_tlb_info info = {
436 .mm = NULL,
437 .start = 0UL,
438 .end = TLB_FLUSH_ALL,
439 };
440
e73ad5ff
AL
441 int cpu = get_cpu();
442
bc0d5a89
AL
443 if (cpumask_test_cpu(cpu, &batch->cpumask)) {
444 VM_WARN_ON(irqs_disabled());
445 local_irq_disable();
3f79e4c7 446 flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
bc0d5a89
AL
447 local_irq_enable();
448 }
449
e73ad5ff 450 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
a2055abe 451 flush_tlb_others(&batch->cpumask, &info);
94b1b03b 452
e73ad5ff
AL
453 cpumask_clear(&batch->cpumask);
454
455 put_cpu();
456}
457
2d040a1c
DH
458static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
459 size_t count, loff_t *ppos)
460{
461 char buf[32];
462 unsigned int len;
463
464 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
465 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
466}
467
468static ssize_t tlbflush_write_file(struct file *file,
469 const char __user *user_buf, size_t count, loff_t *ppos)
470{
471 char buf[32];
472 ssize_t len;
473 int ceiling;
474
475 len = min(count, sizeof(buf) - 1);
476 if (copy_from_user(buf, user_buf, len))
477 return -EFAULT;
478
479 buf[len] = '\0';
480 if (kstrtoint(buf, 0, &ceiling))
481 return -EINVAL;
482
483 if (ceiling < 0)
484 return -EINVAL;
485
486 tlb_single_page_flush_ceiling = ceiling;
487 return count;
488}
489
490static const struct file_operations fops_tlbflush = {
491 .read = tlbflush_read_file,
492 .write = tlbflush_write_file,
493 .llseek = default_llseek,
494};
495
496static int __init create_tlb_single_page_flush_ceiling(void)
497{
498 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
499 arch_debugfs_dir, NULL, &fops_tlbflush);
500 return 0;
501}
502late_initcall(create_tlb_single_page_flush_ceiling);