1 #include <linux/init.h>
4 #include <linux/spinlock.h>
6 #include <linux/interrupt.h>
7 #include <linux/export.h>
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/cache.h>
14 #include <asm/uv/uv.h>
15 #include <linux/debugfs.h>
18 * TLB flushing, formerly SMP-only
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
26 * More scalable flush, from Andi Kleen
28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
31 atomic64_t last_mm_ctx_id
= ATOMIC64_INIT(1);
34 static void choose_new_asid(struct mm_struct
*next
, u64 next_tlb_gen
,
35 u16
*new_asid
, bool *need_flush
)
39 if (!static_cpu_has(X86_FEATURE_PCID
)) {
45 for (asid
= 0; asid
< TLB_NR_DYN_ASIDS
; asid
++) {
46 if (this_cpu_read(cpu_tlbstate
.ctxs
[asid
].ctx_id
) !=
51 *need_flush
= (this_cpu_read(cpu_tlbstate
.ctxs
[asid
].tlb_gen
) <
57 * We don't currently own an ASID slot on this CPU.
60 *new_asid
= this_cpu_add_return(cpu_tlbstate
.next_asid
, 1) - 1;
61 if (*new_asid
>= TLB_NR_DYN_ASIDS
) {
63 this_cpu_write(cpu_tlbstate
.next_asid
, 1);
68 void leave_mm(int cpu
)
70 struct mm_struct
*loaded_mm
= this_cpu_read(cpu_tlbstate
.loaded_mm
);
73 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
74 * If so, our callers still expect us to flush the TLB, but there
75 * aren't any user TLB entries in init_mm to worry about.
77 * This needs to happen before any other sanity checks due to
78 * intel_idle's shenanigans.
80 if (loaded_mm
== &init_mm
)
83 /* Warn if we're not lazy. */
84 WARN_ON(!this_cpu_read(cpu_tlbstate
.is_lazy
));
86 switch_mm(NULL
, &init_mm
, NULL
);
88 EXPORT_SYMBOL_GPL(leave_mm
);
90 void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
91 struct task_struct
*tsk
)
95 local_irq_save(flags
);
96 switch_mm_irqs_off(prev
, next
, tsk
);
97 local_irq_restore(flags
);
100 void switch_mm_irqs_off(struct mm_struct
*prev
, struct mm_struct
*next
,
101 struct task_struct
*tsk
)
103 struct mm_struct
*real_prev
= this_cpu_read(cpu_tlbstate
.loaded_mm
);
104 u16 prev_asid
= this_cpu_read(cpu_tlbstate
.loaded_mm_asid
);
105 unsigned cpu
= smp_processor_id();
109 * NB: The scheduler will call us with prev == next when switching
110 * from lazy TLB mode to normal mode if active_mm isn't changing.
111 * When this happens, we don't assume that CR3 (and hence
112 * cpu_tlbstate.loaded_mm) matches next.
114 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
117 /* We don't want flush_tlb_func_* to run concurrently with us. */
118 if (IS_ENABLED(CONFIG_PROVE_LOCKING
))
119 WARN_ON_ONCE(!irqs_disabled());
122 * Verify that CR3 is what we think it is. This will catch
123 * hypothetical buggy code that directly switches to swapper_pg_dir
124 * without going through leave_mm() / switch_mm_irqs_off() or that
125 * does something like write_cr3(read_cr3_pa()).
127 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
130 #ifdef CONFIG_DEBUG_VM
131 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev
, prev_asid
))) {
133 * If we were to BUG here, we'd be very likely to kill
134 * the system so hard that we don't see the call trace.
135 * Try to recover instead by ignoring the error and doing
136 * a global flush to minimize the chance of corruption.
138 * (This is far from being a fully correct recovery.
139 * Architecturally, the CPU could prefetch something
140 * back into an incorrect ASID slot and leave it there
141 * to cause trouble down the road. It's better than
147 this_cpu_write(cpu_tlbstate
.is_lazy
, false);
149 if (real_prev
== next
) {
150 VM_WARN_ON(this_cpu_read(cpu_tlbstate
.ctxs
[prev_asid
].ctx_id
) !=
151 next
->context
.ctx_id
);
154 * We don't currently support having a real mm loaded without
155 * our cpu set in mm_cpumask(). We have all the bookkeeping
156 * in place to figure out whether we would need to flush
157 * if our cpu were cleared in mm_cpumask(), but we don't
160 if (WARN_ON_ONCE(real_prev
!= &init_mm
&&
161 !cpumask_test_cpu(cpu
, mm_cpumask(next
))))
162 cpumask_set_cpu(cpu
, mm_cpumask(next
));
169 if (IS_ENABLED(CONFIG_VMAP_STACK
)) {
171 * If our current stack is in vmalloc space and isn't
172 * mapped in the new pgd, we'll double-fault. Forcibly
175 unsigned int index
= pgd_index(current_stack_pointer
);
176 pgd_t
*pgd
= next
->pgd
+ index
;
178 if (unlikely(pgd_none(*pgd
)))
179 set_pgd(pgd
, init_mm
.pgd
[index
]);
182 /* Stop remote flushes for the previous mm */
183 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu
, mm_cpumask(real_prev
)) &&
184 real_prev
!= &init_mm
);
185 cpumask_clear_cpu(cpu
, mm_cpumask(real_prev
));
188 * Start remote flushes and then read tlb_gen.
190 cpumask_set_cpu(cpu
, mm_cpumask(next
));
191 next_tlb_gen
= atomic64_read(&next
->context
.tlb_gen
);
193 choose_new_asid(next
, next_tlb_gen
, &new_asid
, &need_flush
);
196 this_cpu_write(cpu_tlbstate
.ctxs
[new_asid
].ctx_id
, next
->context
.ctx_id
);
197 this_cpu_write(cpu_tlbstate
.ctxs
[new_asid
].tlb_gen
, next_tlb_gen
);
198 write_cr3(build_cr3(next
, new_asid
));
201 * NB: This gets called via leave_mm() in the idle path
202 * where RCU functions differently. Tracing normally
203 * uses RCU, so we need to use the _rcuidle variant.
205 * (There is no good reason for this. The idle code should
206 * be rearranged to call this before rcu_idle_enter().)
208 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH
, TLB_FLUSH_ALL
);
210 /* The new ASID is already up to date. */
211 write_cr3(build_cr3_noflush(next
, new_asid
));
213 /* See above wrt _rcuidle. */
214 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH
, 0);
217 this_cpu_write(cpu_tlbstate
.loaded_mm
, next
);
218 this_cpu_write(cpu_tlbstate
.loaded_mm_asid
, new_asid
);
222 switch_ldt(real_prev
, next
);
226 * Please ignore the name of this function. It should be called
227 * switch_to_kernel_thread().
229 * enter_lazy_tlb() is a hint from the scheduler that we are entering a
230 * kernel thread or other context without an mm. Acceptable implementations
231 * include doing nothing whatsoever, switching to init_mm, or various clever
232 * lazy tricks to try to minimize TLB flushes.
234 * The scheduler reserves the right to call enter_lazy_tlb() several times
235 * in a row. It will notify us that we're going back to a real mm by
236 * calling switch_mm_irqs_off().
238 void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
240 if (this_cpu_read(cpu_tlbstate
.loaded_mm
) == &init_mm
)
243 if (tlb_defer_switch_to_init_mm()) {
245 * There's a significant optimization that may be possible
246 * here. We have accurate enough TLB flush tracking that we
247 * don't need to maintain coherence of TLB per se when we're
248 * lazy. We do, however, need to maintain coherence of
249 * paging-structure caches. We could, in principle, leave our
250 * old mm loaded and only switch to init_mm when
251 * tlb_remove_page() happens.
253 this_cpu_write(cpu_tlbstate
.is_lazy
, true);
255 switch_mm(NULL
, &init_mm
, NULL
);
260 * Call this when reinitializing a CPU. It fixes the following potential
263 * - The ASID changed from what cpu_tlbstate thinks it is (most likely
264 * because the CPU was taken down and came back up with CR3's PCID
265 * bits clear. CPU hotplug can do this.
267 * - The TLB contains junk in slots corresponding to inactive ASIDs.
269 * - The CPU went so far out to lunch that it may have missed a TLB
272 void initialize_tlbstate_and_flush(void)
275 struct mm_struct
*mm
= this_cpu_read(cpu_tlbstate
.loaded_mm
);
276 u64 tlb_gen
= atomic64_read(&init_mm
.context
.tlb_gen
);
277 unsigned long cr3
= __read_cr3();
279 /* Assert that CR3 already references the right mm. */
280 WARN_ON((cr3
& CR3_ADDR_MASK
) != __pa(mm
->pgd
));
283 * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization
284 * doesn't work like other CR4 bits because it can only be set from
287 WARN_ON(boot_cpu_has(X86_FEATURE_PCID
) &&
288 !(cr4_read_shadow() & X86_CR4_PCIDE
));
290 /* Force ASID 0 and force a TLB flush. */
291 write_cr3(build_cr3(mm
, 0));
293 /* Reinitialize tlbstate. */
294 this_cpu_write(cpu_tlbstate
.loaded_mm_asid
, 0);
295 this_cpu_write(cpu_tlbstate
.next_asid
, 1);
296 this_cpu_write(cpu_tlbstate
.ctxs
[0].ctx_id
, mm
->context
.ctx_id
);
297 this_cpu_write(cpu_tlbstate
.ctxs
[0].tlb_gen
, tlb_gen
);
299 for (i
= 1; i
< TLB_NR_DYN_ASIDS
; i
++)
300 this_cpu_write(cpu_tlbstate
.ctxs
[i
].ctx_id
, 0);
304 * flush_tlb_func_common()'s memory ordering requirement is that any
305 * TLB fills that happen after we flush the TLB are ordered after we
306 * read active_mm's tlb_gen. We don't need any explicit barriers
307 * because all x86 flush operations are serializing and the
308 * atomic64_read operation won't be reordered by the compiler.
310 static void flush_tlb_func_common(const struct flush_tlb_info
*f
,
311 bool local
, enum tlb_flush_reason reason
)
314 * We have three different tlb_gen values in here. They are:
316 * - mm_tlb_gen: the latest generation.
317 * - local_tlb_gen: the generation that this CPU has already caught
319 * - f->new_tlb_gen: the generation that the requester of the flush
320 * wants us to catch up to.
322 struct mm_struct
*loaded_mm
= this_cpu_read(cpu_tlbstate
.loaded_mm
);
323 u32 loaded_mm_asid
= this_cpu_read(cpu_tlbstate
.loaded_mm_asid
);
324 u64 mm_tlb_gen
= atomic64_read(&loaded_mm
->context
.tlb_gen
);
325 u64 local_tlb_gen
= this_cpu_read(cpu_tlbstate
.ctxs
[loaded_mm_asid
].tlb_gen
);
327 /* This code cannot presently handle being reentered. */
328 VM_WARN_ON(!irqs_disabled());
330 if (unlikely(loaded_mm
== &init_mm
))
333 VM_WARN_ON(this_cpu_read(cpu_tlbstate
.ctxs
[loaded_mm_asid
].ctx_id
) !=
334 loaded_mm
->context
.ctx_id
);
336 if (this_cpu_read(cpu_tlbstate
.is_lazy
)) {
338 * We're in lazy mode. We need to at least flush our
339 * paging-structure cache to avoid speculatively reading
340 * garbage into our TLB. Since switching to init_mm is barely
341 * slower than a minimal flush, just switch to init_mm.
343 switch_mm_irqs_off(NULL
, &init_mm
, NULL
);
347 if (unlikely(local_tlb_gen
== mm_tlb_gen
)) {
349 * There's nothing to do: we're already up to date. This can
350 * happen if two concurrent flushes happen -- the first flush to
351 * be handled can catch us all the way up, leaving no work for
354 trace_tlb_flush(reason
, 0);
358 WARN_ON_ONCE(local_tlb_gen
> mm_tlb_gen
);
359 WARN_ON_ONCE(f
->new_tlb_gen
> mm_tlb_gen
);
362 * If we get to this point, we know that our TLB is out of date.
363 * This does not strictly imply that we need to flush (it's
364 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
365 * going to need to flush in the very near future, so we might
366 * as well get it over with.
368 * The only question is whether to do a full or partial flush.
370 * We do a partial flush if requested and two extra conditions
373 * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that
374 * we've always done all needed flushes to catch up to
375 * local_tlb_gen. If, for example, local_tlb_gen == 2 and
376 * f->new_tlb_gen == 3, then we know that the flush needed to bring
377 * us up to date for tlb_gen 3 is the partial flush we're
380 * As an example of why this check is needed, suppose that there
381 * are two concurrent flushes. The first is a full flush that
382 * changes context.tlb_gen from 1 to 2. The second is a partial
383 * flush that changes context.tlb_gen from 2 to 3. If they get
384 * processed on this CPU in reverse order, we'll see
385 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
386 * If we were to use __flush_tlb_single() and set local_tlb_gen to
387 * 3, we'd be break the invariant: we'd update local_tlb_gen above
388 * 1 without the full flush that's needed for tlb_gen 2.
390 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation.
391 * Partial TLB flushes are not all that much cheaper than full TLB
392 * flushes, so it seems unlikely that it would be a performance win
393 * to do a partial flush if that won't bring our TLB fully up to
394 * date. By doing a full flush instead, we can increase
395 * local_tlb_gen all the way to mm_tlb_gen and we can probably
396 * avoid another flush in the very near future.
398 if (f
->end
!= TLB_FLUSH_ALL
&&
399 f
->new_tlb_gen
== local_tlb_gen
+ 1 &&
400 f
->new_tlb_gen
== mm_tlb_gen
) {
403 unsigned long nr_pages
= (f
->end
- f
->start
) >> PAGE_SHIFT
;
406 while (addr
< f
->end
) {
407 __flush_tlb_single(addr
);
411 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE
, nr_pages
);
412 trace_tlb_flush(reason
, nr_pages
);
417 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL
);
418 trace_tlb_flush(reason
, TLB_FLUSH_ALL
);
421 /* Both paths above update our state to mm_tlb_gen. */
422 this_cpu_write(cpu_tlbstate
.ctxs
[loaded_mm_asid
].tlb_gen
, mm_tlb_gen
);
425 static void flush_tlb_func_local(void *info
, enum tlb_flush_reason reason
)
427 const struct flush_tlb_info
*f
= info
;
429 flush_tlb_func_common(f
, true, reason
);
432 static void flush_tlb_func_remote(void *info
)
434 const struct flush_tlb_info
*f
= info
;
436 inc_irq_stat(irq_tlb_count
);
438 if (f
->mm
&& f
->mm
!= this_cpu_read(cpu_tlbstate
.loaded_mm
))
441 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED
);
442 flush_tlb_func_common(f
, false, TLB_REMOTE_SHOOTDOWN
);
445 void native_flush_tlb_others(const struct cpumask
*cpumask
,
446 const struct flush_tlb_info
*info
)
448 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH
);
449 if (info
->end
== TLB_FLUSH_ALL
)
450 trace_tlb_flush(TLB_REMOTE_SEND_IPI
, TLB_FLUSH_ALL
);
452 trace_tlb_flush(TLB_REMOTE_SEND_IPI
,
453 (info
->end
- info
->start
) >> PAGE_SHIFT
);
455 if (is_uv_system()) {
457 * This whole special case is confused. UV has a "Broadcast
458 * Assist Unit", which seems to be a fancy way to send IPIs.
459 * Back when x86 used an explicit TLB flush IPI, UV was
460 * optimized to use its own mechanism. These days, x86 uses
461 * smp_call_function_many(), but UV still uses a manual IPI,
462 * and that IPI's action is out of date -- it does a manual
463 * flush instead of calling flush_tlb_func_remote(). This
464 * means that the percpu tlb_gen variables won't be updated
465 * and we'll do pointless flushes on future context switches.
467 * Rather than hooking native_flush_tlb_others() here, I think
468 * that UV should be updated so that smp_call_function_many(),
469 * etc, are optimal on UV.
473 cpu
= smp_processor_id();
474 cpumask
= uv_flush_tlb_others(cpumask
, info
);
476 smp_call_function_many(cpumask
, flush_tlb_func_remote
,
480 smp_call_function_many(cpumask
, flush_tlb_func_remote
,
485 * See Documentation/x86/tlb.txt for details. We choose 33
486 * because it is large enough to cover the vast majority (at
487 * least 95%) of allocations, and is small enough that we are
488 * confident it will not cause too much overhead. Each single
489 * flush is about 100 ns, so this caps the maximum overhead at
492 * This is in units of pages.
494 static unsigned long tlb_single_page_flush_ceiling __read_mostly
= 33;
496 void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
497 unsigned long end
, unsigned long vmflag
)
501 struct flush_tlb_info info
= {
507 /* This is also a barrier that synchronizes with switch_mm(). */
508 info
.new_tlb_gen
= inc_mm_tlb_gen(mm
);
510 /* Should we flush just the requested range? */
511 if ((end
!= TLB_FLUSH_ALL
) &&
512 !(vmflag
& VM_HUGETLB
) &&
513 ((end
- start
) >> PAGE_SHIFT
) <= tlb_single_page_flush_ceiling
) {
518 info
.end
= TLB_FLUSH_ALL
;
521 if (mm
== this_cpu_read(cpu_tlbstate
.loaded_mm
)) {
522 VM_WARN_ON(irqs_disabled());
524 flush_tlb_func_local(&info
, TLB_LOCAL_MM_SHOOTDOWN
);
528 if (cpumask_any_but(mm_cpumask(mm
), cpu
) < nr_cpu_ids
)
529 flush_tlb_others(mm_cpumask(mm
), &info
);
535 static void do_flush_tlb_all(void *info
)
537 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED
);
541 void flush_tlb_all(void)
543 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH
);
544 on_each_cpu(do_flush_tlb_all
, NULL
, 1);
547 static void do_kernel_range_flush(void *info
)
549 struct flush_tlb_info
*f
= info
;
552 /* flush range by one by one 'invlpg' */
553 for (addr
= f
->start
; addr
< f
->end
; addr
+= PAGE_SIZE
)
554 __flush_tlb_single(addr
);
557 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
560 /* Balance as user space task's flush, a bit conservative */
561 if (end
== TLB_FLUSH_ALL
||
562 (end
- start
) > tlb_single_page_flush_ceiling
<< PAGE_SHIFT
) {
563 on_each_cpu(do_flush_tlb_all
, NULL
, 1);
565 struct flush_tlb_info info
;
568 on_each_cpu(do_kernel_range_flush
, &info
, 1);
572 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch
*batch
)
574 struct flush_tlb_info info
= {
577 .end
= TLB_FLUSH_ALL
,
582 if (cpumask_test_cpu(cpu
, &batch
->cpumask
)) {
583 VM_WARN_ON(irqs_disabled());
585 flush_tlb_func_local(&info
, TLB_LOCAL_SHOOTDOWN
);
589 if (cpumask_any_but(&batch
->cpumask
, cpu
) < nr_cpu_ids
)
590 flush_tlb_others(&batch
->cpumask
, &info
);
592 cpumask_clear(&batch
->cpumask
);
597 static ssize_t
tlbflush_read_file(struct file
*file
, char __user
*user_buf
,
598 size_t count
, loff_t
*ppos
)
603 len
= sprintf(buf
, "%ld\n", tlb_single_page_flush_ceiling
);
604 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
607 static ssize_t
tlbflush_write_file(struct file
*file
,
608 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
614 len
= min(count
, sizeof(buf
) - 1);
615 if (copy_from_user(buf
, user_buf
, len
))
619 if (kstrtoint(buf
, 0, &ceiling
))
625 tlb_single_page_flush_ceiling
= ceiling
;
629 static const struct file_operations fops_tlbflush
= {
630 .read
= tlbflush_read_file
,
631 .write
= tlbflush_write_file
,
632 .llseek
= default_llseek
,
635 static int __init
create_tlb_single_page_flush_ceiling(void)
637 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR
| S_IWUSR
,
638 arch_debugfs_dir
, NULL
, &fops_tlbflush
);
641 late_initcall(create_tlb_single_page_flush_ceiling
);