1 #include <linux/init.h>
4 #include <linux/spinlock.h>
6 #include <linux/interrupt.h>
7 #include <linux/export.h>
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/cache.h>
14 #include <asm/uv/uv.h>
15 #include <linux/debugfs.h>
18 * TLB flushing, formerly SMP-only
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
26 * More scalable flush, from Andi Kleen
28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
32 * We get here when we do something requiring a TLB invalidation
33 * but could not go invalidate all of the contexts. We do the
34 * necessary invalidation by clearing out the 'ctx_id' which
35 * forces a TLB flush when the context is loaded.
37 void clear_asid_other(void)
42 * This is only expected to be set if we have disabled
43 * kernel _PAGE_GLOBAL pages.
45 if (!static_cpu_has(X86_FEATURE_PTI
)) {
50 for (asid
= 0; asid
< TLB_NR_DYN_ASIDS
; asid
++) {
51 /* Do not need to flush the current asid */
52 if (asid
== this_cpu_read(cpu_tlbstate
.loaded_mm_asid
))
55 * Make sure the next time we go to switch to
56 * this asid, we do a flush:
58 this_cpu_write(cpu_tlbstate
.ctxs
[asid
].ctx_id
, 0);
60 this_cpu_write(cpu_tlbstate
.invalidate_other
, false);
63 atomic64_t last_mm_ctx_id
= ATOMIC64_INIT(1);
66 static void choose_new_asid(struct mm_struct
*next
, u64 next_tlb_gen
,
67 u16
*new_asid
, bool *need_flush
)
71 if (!static_cpu_has(X86_FEATURE_PCID
)) {
77 if (this_cpu_read(cpu_tlbstate
.invalidate_other
))
80 for (asid
= 0; asid
< TLB_NR_DYN_ASIDS
; asid
++) {
81 if (this_cpu_read(cpu_tlbstate
.ctxs
[asid
].ctx_id
) !=
86 *need_flush
= (this_cpu_read(cpu_tlbstate
.ctxs
[asid
].tlb_gen
) <
92 * We don't currently own an ASID slot on this CPU.
95 *new_asid
= this_cpu_add_return(cpu_tlbstate
.next_asid
, 1) - 1;
96 if (*new_asid
>= TLB_NR_DYN_ASIDS
) {
98 this_cpu_write(cpu_tlbstate
.next_asid
, 1);
103 static void load_new_mm_cr3(pgd_t
*pgdir
, u16 new_asid
, bool need_flush
)
105 unsigned long new_mm_cr3
;
108 invalidate_user_asid(new_asid
);
109 new_mm_cr3
= build_cr3(pgdir
, new_asid
);
111 new_mm_cr3
= build_cr3_noflush(pgdir
, new_asid
);
115 * Caution: many callers of this function expect
116 * that load_cr3() is serializing and orders TLB
117 * fills with respect to the mm_cpumask writes.
119 write_cr3(new_mm_cr3
);
122 void leave_mm(int cpu
)
124 struct mm_struct
*loaded_mm
= this_cpu_read(cpu_tlbstate
.loaded_mm
);
127 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
128 * If so, our callers still expect us to flush the TLB, but there
129 * aren't any user TLB entries in init_mm to worry about.
131 * This needs to happen before any other sanity checks due to
132 * intel_idle's shenanigans.
134 if (loaded_mm
== &init_mm
)
137 /* Warn if we're not lazy. */
138 WARN_ON(!this_cpu_read(cpu_tlbstate
.is_lazy
));
140 switch_mm(NULL
, &init_mm
, NULL
);
142 EXPORT_SYMBOL_GPL(leave_mm
);
144 void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
145 struct task_struct
*tsk
)
149 local_irq_save(flags
);
150 switch_mm_irqs_off(prev
, next
, tsk
);
151 local_irq_restore(flags
);
154 void switch_mm_irqs_off(struct mm_struct
*prev
, struct mm_struct
*next
,
155 struct task_struct
*tsk
)
157 struct mm_struct
*real_prev
= this_cpu_read(cpu_tlbstate
.loaded_mm
);
158 u16 prev_asid
= this_cpu_read(cpu_tlbstate
.loaded_mm_asid
);
159 unsigned cpu
= smp_processor_id();
163 * NB: The scheduler will call us with prev == next when switching
164 * from lazy TLB mode to normal mode if active_mm isn't changing.
165 * When this happens, we don't assume that CR3 (and hence
166 * cpu_tlbstate.loaded_mm) matches next.
168 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
171 /* We don't want flush_tlb_func_* to run concurrently with us. */
172 if (IS_ENABLED(CONFIG_PROVE_LOCKING
))
173 WARN_ON_ONCE(!irqs_disabled());
176 * Verify that CR3 is what we think it is. This will catch
177 * hypothetical buggy code that directly switches to swapper_pg_dir
178 * without going through leave_mm() / switch_mm_irqs_off() or that
179 * does something like write_cr3(read_cr3_pa()).
181 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
184 #ifdef CONFIG_DEBUG_VM
185 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev
->pgd
, prev_asid
))) {
187 * If we were to BUG here, we'd be very likely to kill
188 * the system so hard that we don't see the call trace.
189 * Try to recover instead by ignoring the error and doing
190 * a global flush to minimize the chance of corruption.
192 * (This is far from being a fully correct recovery.
193 * Architecturally, the CPU could prefetch something
194 * back into an incorrect ASID slot and leave it there
195 * to cause trouble down the road. It's better than
201 this_cpu_write(cpu_tlbstate
.is_lazy
, false);
203 if (real_prev
== next
) {
204 VM_WARN_ON(this_cpu_read(cpu_tlbstate
.ctxs
[prev_asid
].ctx_id
) !=
205 next
->context
.ctx_id
);
208 * We don't currently support having a real mm loaded without
209 * our cpu set in mm_cpumask(). We have all the bookkeeping
210 * in place to figure out whether we would need to flush
211 * if our cpu were cleared in mm_cpumask(), but we don't
214 if (WARN_ON_ONCE(real_prev
!= &init_mm
&&
215 !cpumask_test_cpu(cpu
, mm_cpumask(next
))))
216 cpumask_set_cpu(cpu
, mm_cpumask(next
));
223 if (IS_ENABLED(CONFIG_VMAP_STACK
)) {
225 * If our current stack is in vmalloc space and isn't
226 * mapped in the new pgd, we'll double-fault. Forcibly
229 unsigned int index
= pgd_index(current_stack_pointer
);
230 pgd_t
*pgd
= next
->pgd
+ index
;
232 if (unlikely(pgd_none(*pgd
)))
233 set_pgd(pgd
, init_mm
.pgd
[index
]);
236 /* Stop remote flushes for the previous mm */
237 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu
, mm_cpumask(real_prev
)) &&
238 real_prev
!= &init_mm
);
239 cpumask_clear_cpu(cpu
, mm_cpumask(real_prev
));
242 * Start remote flushes and then read tlb_gen.
244 cpumask_set_cpu(cpu
, mm_cpumask(next
));
245 next_tlb_gen
= atomic64_read(&next
->context
.tlb_gen
);
247 choose_new_asid(next
, next_tlb_gen
, &new_asid
, &need_flush
);
250 this_cpu_write(cpu_tlbstate
.ctxs
[new_asid
].ctx_id
, next
->context
.ctx_id
);
251 this_cpu_write(cpu_tlbstate
.ctxs
[new_asid
].tlb_gen
, next_tlb_gen
);
252 load_new_mm_cr3(next
->pgd
, new_asid
, true);
255 * NB: This gets called via leave_mm() in the idle path
256 * where RCU functions differently. Tracing normally
257 * uses RCU, so we need to use the _rcuidle variant.
259 * (There is no good reason for this. The idle code should
260 * be rearranged to call this before rcu_idle_enter().)
262 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH
, TLB_FLUSH_ALL
);
264 /* The new ASID is already up to date. */
265 load_new_mm_cr3(next
->pgd
, new_asid
, false);
267 /* See above wrt _rcuidle. */
268 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH
, 0);
271 this_cpu_write(cpu_tlbstate
.loaded_mm
, next
);
272 this_cpu_write(cpu_tlbstate
.loaded_mm_asid
, new_asid
);
276 switch_ldt(real_prev
, next
);
280 * Please ignore the name of this function. It should be called
281 * switch_to_kernel_thread().
283 * enter_lazy_tlb() is a hint from the scheduler that we are entering a
284 * kernel thread or other context without an mm. Acceptable implementations
285 * include doing nothing whatsoever, switching to init_mm, or various clever
286 * lazy tricks to try to minimize TLB flushes.
288 * The scheduler reserves the right to call enter_lazy_tlb() several times
289 * in a row. It will notify us that we're going back to a real mm by
290 * calling switch_mm_irqs_off().
292 void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
294 if (this_cpu_read(cpu_tlbstate
.loaded_mm
) == &init_mm
)
297 if (tlb_defer_switch_to_init_mm()) {
299 * There's a significant optimization that may be possible
300 * here. We have accurate enough TLB flush tracking that we
301 * don't need to maintain coherence of TLB per se when we're
302 * lazy. We do, however, need to maintain coherence of
303 * paging-structure caches. We could, in principle, leave our
304 * old mm loaded and only switch to init_mm when
305 * tlb_remove_page() happens.
307 this_cpu_write(cpu_tlbstate
.is_lazy
, true);
309 switch_mm(NULL
, &init_mm
, NULL
);
314 * Call this when reinitializing a CPU. It fixes the following potential
317 * - The ASID changed from what cpu_tlbstate thinks it is (most likely
318 * because the CPU was taken down and came back up with CR3's PCID
319 * bits clear. CPU hotplug can do this.
321 * - The TLB contains junk in slots corresponding to inactive ASIDs.
323 * - The CPU went so far out to lunch that it may have missed a TLB
326 void initialize_tlbstate_and_flush(void)
329 struct mm_struct
*mm
= this_cpu_read(cpu_tlbstate
.loaded_mm
);
330 u64 tlb_gen
= atomic64_read(&init_mm
.context
.tlb_gen
);
331 unsigned long cr3
= __read_cr3();
333 /* Assert that CR3 already references the right mm. */
334 WARN_ON((cr3
& CR3_ADDR_MASK
) != __pa(mm
->pgd
));
337 * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization
338 * doesn't work like other CR4 bits because it can only be set from
341 WARN_ON(boot_cpu_has(X86_FEATURE_PCID
) &&
342 !(cr4_read_shadow() & X86_CR4_PCIDE
));
344 /* Force ASID 0 and force a TLB flush. */
345 write_cr3(build_cr3(mm
->pgd
, 0));
347 /* Reinitialize tlbstate. */
348 this_cpu_write(cpu_tlbstate
.loaded_mm_asid
, 0);
349 this_cpu_write(cpu_tlbstate
.next_asid
, 1);
350 this_cpu_write(cpu_tlbstate
.ctxs
[0].ctx_id
, mm
->context
.ctx_id
);
351 this_cpu_write(cpu_tlbstate
.ctxs
[0].tlb_gen
, tlb_gen
);
353 for (i
= 1; i
< TLB_NR_DYN_ASIDS
; i
++)
354 this_cpu_write(cpu_tlbstate
.ctxs
[i
].ctx_id
, 0);
358 * flush_tlb_func_common()'s memory ordering requirement is that any
359 * TLB fills that happen after we flush the TLB are ordered after we
360 * read active_mm's tlb_gen. We don't need any explicit barriers
361 * because all x86 flush operations are serializing and the
362 * atomic64_read operation won't be reordered by the compiler.
364 static void flush_tlb_func_common(const struct flush_tlb_info
*f
,
365 bool local
, enum tlb_flush_reason reason
)
368 * We have three different tlb_gen values in here. They are:
370 * - mm_tlb_gen: the latest generation.
371 * - local_tlb_gen: the generation that this CPU has already caught
373 * - f->new_tlb_gen: the generation that the requester of the flush
374 * wants us to catch up to.
376 struct mm_struct
*loaded_mm
= this_cpu_read(cpu_tlbstate
.loaded_mm
);
377 u32 loaded_mm_asid
= this_cpu_read(cpu_tlbstate
.loaded_mm_asid
);
378 u64 mm_tlb_gen
= atomic64_read(&loaded_mm
->context
.tlb_gen
);
379 u64 local_tlb_gen
= this_cpu_read(cpu_tlbstate
.ctxs
[loaded_mm_asid
].tlb_gen
);
381 /* This code cannot presently handle being reentered. */
382 VM_WARN_ON(!irqs_disabled());
384 if (unlikely(loaded_mm
== &init_mm
))
387 VM_WARN_ON(this_cpu_read(cpu_tlbstate
.ctxs
[loaded_mm_asid
].ctx_id
) !=
388 loaded_mm
->context
.ctx_id
);
390 if (this_cpu_read(cpu_tlbstate
.is_lazy
)) {
392 * We're in lazy mode. We need to at least flush our
393 * paging-structure cache to avoid speculatively reading
394 * garbage into our TLB. Since switching to init_mm is barely
395 * slower than a minimal flush, just switch to init_mm.
397 switch_mm_irqs_off(NULL
, &init_mm
, NULL
);
401 if (unlikely(local_tlb_gen
== mm_tlb_gen
)) {
403 * There's nothing to do: we're already up to date. This can
404 * happen if two concurrent flushes happen -- the first flush to
405 * be handled can catch us all the way up, leaving no work for
408 trace_tlb_flush(reason
, 0);
412 WARN_ON_ONCE(local_tlb_gen
> mm_tlb_gen
);
413 WARN_ON_ONCE(f
->new_tlb_gen
> mm_tlb_gen
);
416 * If we get to this point, we know that our TLB is out of date.
417 * This does not strictly imply that we need to flush (it's
418 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
419 * going to need to flush in the very near future, so we might
420 * as well get it over with.
422 * The only question is whether to do a full or partial flush.
424 * We do a partial flush if requested and two extra conditions
427 * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that
428 * we've always done all needed flushes to catch up to
429 * local_tlb_gen. If, for example, local_tlb_gen == 2 and
430 * f->new_tlb_gen == 3, then we know that the flush needed to bring
431 * us up to date for tlb_gen 3 is the partial flush we're
434 * As an example of why this check is needed, suppose that there
435 * are two concurrent flushes. The first is a full flush that
436 * changes context.tlb_gen from 1 to 2. The second is a partial
437 * flush that changes context.tlb_gen from 2 to 3. If they get
438 * processed on this CPU in reverse order, we'll see
439 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
440 * If we were to use __flush_tlb_single() and set local_tlb_gen to
441 * 3, we'd be break the invariant: we'd update local_tlb_gen above
442 * 1 without the full flush that's needed for tlb_gen 2.
444 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation.
445 * Partial TLB flushes are not all that much cheaper than full TLB
446 * flushes, so it seems unlikely that it would be a performance win
447 * to do a partial flush if that won't bring our TLB fully up to
448 * date. By doing a full flush instead, we can increase
449 * local_tlb_gen all the way to mm_tlb_gen and we can probably
450 * avoid another flush in the very near future.
452 if (f
->end
!= TLB_FLUSH_ALL
&&
453 f
->new_tlb_gen
== local_tlb_gen
+ 1 &&
454 f
->new_tlb_gen
== mm_tlb_gen
) {
457 unsigned long nr_pages
= (f
->end
- f
->start
) >> PAGE_SHIFT
;
460 while (addr
< f
->end
) {
461 __flush_tlb_single(addr
);
465 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE
, nr_pages
);
466 trace_tlb_flush(reason
, nr_pages
);
471 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL
);
472 trace_tlb_flush(reason
, TLB_FLUSH_ALL
);
475 /* Both paths above update our state to mm_tlb_gen. */
476 this_cpu_write(cpu_tlbstate
.ctxs
[loaded_mm_asid
].tlb_gen
, mm_tlb_gen
);
479 static void flush_tlb_func_local(void *info
, enum tlb_flush_reason reason
)
481 const struct flush_tlb_info
*f
= info
;
483 flush_tlb_func_common(f
, true, reason
);
486 static void flush_tlb_func_remote(void *info
)
488 const struct flush_tlb_info
*f
= info
;
490 inc_irq_stat(irq_tlb_count
);
492 if (f
->mm
&& f
->mm
!= this_cpu_read(cpu_tlbstate
.loaded_mm
))
495 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED
);
496 flush_tlb_func_common(f
, false, TLB_REMOTE_SHOOTDOWN
);
499 void native_flush_tlb_others(const struct cpumask
*cpumask
,
500 const struct flush_tlb_info
*info
)
502 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH
);
503 if (info
->end
== TLB_FLUSH_ALL
)
504 trace_tlb_flush(TLB_REMOTE_SEND_IPI
, TLB_FLUSH_ALL
);
506 trace_tlb_flush(TLB_REMOTE_SEND_IPI
,
507 (info
->end
- info
->start
) >> PAGE_SHIFT
);
509 if (is_uv_system()) {
511 * This whole special case is confused. UV has a "Broadcast
512 * Assist Unit", which seems to be a fancy way to send IPIs.
513 * Back when x86 used an explicit TLB flush IPI, UV was
514 * optimized to use its own mechanism. These days, x86 uses
515 * smp_call_function_many(), but UV still uses a manual IPI,
516 * and that IPI's action is out of date -- it does a manual
517 * flush instead of calling flush_tlb_func_remote(). This
518 * means that the percpu tlb_gen variables won't be updated
519 * and we'll do pointless flushes on future context switches.
521 * Rather than hooking native_flush_tlb_others() here, I think
522 * that UV should be updated so that smp_call_function_many(),
523 * etc, are optimal on UV.
527 cpu
= smp_processor_id();
528 cpumask
= uv_flush_tlb_others(cpumask
, info
);
530 smp_call_function_many(cpumask
, flush_tlb_func_remote
,
534 smp_call_function_many(cpumask
, flush_tlb_func_remote
,
539 * See Documentation/x86/tlb.txt for details. We choose 33
540 * because it is large enough to cover the vast majority (at
541 * least 95%) of allocations, and is small enough that we are
542 * confident it will not cause too much overhead. Each single
543 * flush is about 100 ns, so this caps the maximum overhead at
546 * This is in units of pages.
548 static unsigned long tlb_single_page_flush_ceiling __read_mostly
= 33;
550 void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
551 unsigned long end
, unsigned long vmflag
)
555 struct flush_tlb_info info
= {
561 /* This is also a barrier that synchronizes with switch_mm(). */
562 info
.new_tlb_gen
= inc_mm_tlb_gen(mm
);
564 /* Should we flush just the requested range? */
565 if ((end
!= TLB_FLUSH_ALL
) &&
566 !(vmflag
& VM_HUGETLB
) &&
567 ((end
- start
) >> PAGE_SHIFT
) <= tlb_single_page_flush_ceiling
) {
572 info
.end
= TLB_FLUSH_ALL
;
575 if (mm
== this_cpu_read(cpu_tlbstate
.loaded_mm
)) {
576 VM_WARN_ON(irqs_disabled());
578 flush_tlb_func_local(&info
, TLB_LOCAL_MM_SHOOTDOWN
);
582 if (cpumask_any_but(mm_cpumask(mm
), cpu
) < nr_cpu_ids
)
583 flush_tlb_others(mm_cpumask(mm
), &info
);
589 static void do_flush_tlb_all(void *info
)
591 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED
);
595 void flush_tlb_all(void)
597 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH
);
598 on_each_cpu(do_flush_tlb_all
, NULL
, 1);
601 static void do_kernel_range_flush(void *info
)
603 struct flush_tlb_info
*f
= info
;
606 /* flush range by one by one 'invlpg' */
607 for (addr
= f
->start
; addr
< f
->end
; addr
+= PAGE_SIZE
)
608 __flush_tlb_one(addr
);
611 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
614 /* Balance as user space task's flush, a bit conservative */
615 if (end
== TLB_FLUSH_ALL
||
616 (end
- start
) > tlb_single_page_flush_ceiling
<< PAGE_SHIFT
) {
617 on_each_cpu(do_flush_tlb_all
, NULL
, 1);
619 struct flush_tlb_info info
;
622 on_each_cpu(do_kernel_range_flush
, &info
, 1);
626 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch
*batch
)
628 struct flush_tlb_info info
= {
631 .end
= TLB_FLUSH_ALL
,
636 if (cpumask_test_cpu(cpu
, &batch
->cpumask
)) {
637 VM_WARN_ON(irqs_disabled());
639 flush_tlb_func_local(&info
, TLB_LOCAL_SHOOTDOWN
);
643 if (cpumask_any_but(&batch
->cpumask
, cpu
) < nr_cpu_ids
)
644 flush_tlb_others(&batch
->cpumask
, &info
);
646 cpumask_clear(&batch
->cpumask
);
651 static ssize_t
tlbflush_read_file(struct file
*file
, char __user
*user_buf
,
652 size_t count
, loff_t
*ppos
)
657 len
= sprintf(buf
, "%ld\n", tlb_single_page_flush_ceiling
);
658 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
661 static ssize_t
tlbflush_write_file(struct file
*file
,
662 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
668 len
= min(count
, sizeof(buf
) - 1);
669 if (copy_from_user(buf
, user_buf
, len
))
673 if (kstrtoint(buf
, 0, &ceiling
))
679 tlb_single_page_flush_ceiling
= ceiling
;
683 static const struct file_operations fops_tlbflush
= {
684 .read
= tlbflush_read_file
,
685 .write
= tlbflush_write_file
,
686 .llseek
= default_llseek
,
689 static int __init
create_tlb_single_page_flush_ceiling(void)
691 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR
| S_IWUSR
,
692 arch_debugfs_dir
, NULL
, &fops_tlbflush
);
695 late_initcall(create_tlb_single_page_flush_ceiling
);