1 #include <linux/init.h>
4 #include <linux/spinlock.h>
6 #include <linux/interrupt.h>
7 #include <linux/export.h>
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/cache.h>
14 #include <asm/uv/uv.h>
15 #include <linux/debugfs.h>
18 * Smarter SMP flushing macros.
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
26 * More scalable flush, from Andi Kleen
28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
34 * We cannot call mmdrop() because we are in interrupt context,
35 * instead update mm->cpu_vm_mask.
37 void leave_mm(int cpu
)
39 struct mm_struct
*active_mm
= this_cpu_read(cpu_tlbstate
.active_mm
);
40 if (this_cpu_read(cpu_tlbstate
.state
) == TLBSTATE_OK
)
42 if (cpumask_test_cpu(cpu
, mm_cpumask(active_mm
))) {
43 cpumask_clear_cpu(cpu
, mm_cpumask(active_mm
));
44 load_cr3(swapper_pg_dir
);
46 * This gets called in the idle path where RCU
47 * functions differently. Tracing normally
48 * uses RCU, so we have to call the tracepoint
51 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH
, TLB_FLUSH_ALL
);
54 EXPORT_SYMBOL_GPL(leave_mm
);
56 #endif /* CONFIG_SMP */
58 void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
59 struct task_struct
*tsk
)
63 local_irq_save(flags
);
64 switch_mm_irqs_off(prev
, next
, tsk
);
65 local_irq_restore(flags
);
68 void switch_mm_irqs_off(struct mm_struct
*prev
, struct mm_struct
*next
,
69 struct task_struct
*tsk
)
71 unsigned cpu
= smp_processor_id();
73 if (likely(prev
!= next
)) {
74 if (IS_ENABLED(CONFIG_VMAP_STACK
)) {
76 * If our current stack is in vmalloc space and isn't
77 * mapped in the new pgd, we'll double-fault. Forcibly
80 unsigned int stack_pgd_index
= pgd_index(current_stack_pointer());
82 pgd_t
*pgd
= next
->pgd
+ stack_pgd_index
;
84 if (unlikely(pgd_none(*pgd
)))
85 set_pgd(pgd
, init_mm
.pgd
[stack_pgd_index
]);
89 this_cpu_write(cpu_tlbstate
.state
, TLBSTATE_OK
);
90 this_cpu_write(cpu_tlbstate
.active_mm
, next
);
93 cpumask_set_cpu(cpu
, mm_cpumask(next
));
96 * Re-load page tables.
98 * This logic has an ordering constraint:
100 * CPU 0: Write to a PTE for 'next'
101 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
102 * CPU 1: set bit 1 in next's mm_cpumask
103 * CPU 1: load from the PTE that CPU 0 writes (implicit)
105 * We need to prevent an outcome in which CPU 1 observes
106 * the new PTE value and CPU 0 observes bit 1 clear in
107 * mm_cpumask. (If that occurs, then the IPI will never
108 * be sent, and CPU 0's TLB will contain a stale entry.)
110 * The bad outcome can occur if either CPU's load is
111 * reordered before that CPU's store, so both CPUs must
112 * execute full barriers to prevent this from happening.
114 * Thus, switch_mm needs a full barrier between the
115 * store to mm_cpumask and any operation that could load
116 * from next->pgd. TLB fills are special and can happen
117 * due to instruction fetches or for no reason at all,
118 * and neither LOCK nor MFENCE orders them.
119 * Fortunately, load_cr3() is serializing and gives the
120 * ordering guarantee we need.
125 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH
, TLB_FLUSH_ALL
);
127 /* Stop flush ipis for the previous mm */
128 cpumask_clear_cpu(cpu
, mm_cpumask(prev
));
130 /* Load per-mm CR4 state */
133 #ifdef CONFIG_MODIFY_LDT_SYSCALL
135 * Load the LDT, if the LDT is different.
137 * It's possible that prev->context.ldt doesn't match
138 * the LDT register. This can happen if leave_mm(prev)
139 * was called and then modify_ldt changed
140 * prev->context.ldt but suppressed an IPI to this CPU.
141 * In this case, prev->context.ldt != NULL, because we
142 * never set context.ldt to NULL while the mm still
143 * exists. That means that next->context.ldt !=
144 * prev->context.ldt, because mms never share an LDT.
146 if (unlikely(prev
->context
.ldt
!= next
->context
.ldt
))
152 this_cpu_write(cpu_tlbstate
.state
, TLBSTATE_OK
);
153 BUG_ON(this_cpu_read(cpu_tlbstate
.active_mm
) != next
);
155 if (!cpumask_test_cpu(cpu
, mm_cpumask(next
))) {
157 * On established mms, the mm_cpumask is only changed
158 * from irq context, from ptep_clear_flush() while in
159 * lazy tlb mode, and here. Irqs are blocked during
160 * schedule, protecting us from simultaneous changes.
162 cpumask_set_cpu(cpu
, mm_cpumask(next
));
165 * We were in lazy tlb mode and leave_mm disabled
166 * tlb flush IPI delivery. We must reload CR3
167 * to make sure to use no freed page tables.
169 * As above, load_cr3() is serializing and orders TLB
170 * fills with respect to the mm_cpumask write.
173 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH
, TLB_FLUSH_ALL
);
184 * The flush IPI assumes that a thread switch happens in this order:
185 * [cpu0: the cpu that switches]
186 * 1) switch_mm() either 1a) or 1b)
187 * 1a) thread switch to a different mm
188 * 1a1) set cpu_tlbstate to TLBSTATE_OK
189 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
190 * if cpu0 was in lazy tlb mode.
191 * 1a2) update cpu active_mm
192 * Now cpu0 accepts tlb flushes for the new mm.
193 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
194 * Now the other cpus will send tlb flush ipis.
196 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
197 * Stop ipi delivery for the old mm. This is not synchronized with
198 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
199 * mm, and in the worst case we perform a superfluous tlb flush.
200 * 1b) thread switch without mm change
201 * cpu active_mm is correct, cpu0 already handles flush ipis.
202 * 1b1) set cpu_tlbstate to TLBSTATE_OK
203 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
204 * Atomically set the bit [other cpus will start sending flush ipis],
206 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
207 * 2) switch %%esp, ie current
209 * The interrupt must handle 2 special cases:
210 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
211 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
212 * runs in kernel space, the cpu could load tlb entries for user space
215 * The good news is that cpu_tlbstate is local to each cpu, no
216 * write/read ordering problems.
220 * TLB flush funcation:
221 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
222 * 2) Leave the mm if we are in the lazy tlb mode.
224 static void flush_tlb_func(void *info
)
226 const struct flush_tlb_info
*f
= info
;
228 inc_irq_stat(irq_tlb_count
);
230 if (f
->mm
&& f
->mm
!= this_cpu_read(cpu_tlbstate
.active_mm
))
233 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED
);
235 if (this_cpu_read(cpu_tlbstate
.state
) != TLBSTATE_OK
) {
236 leave_mm(smp_processor_id());
240 if (f
->end
== TLB_FLUSH_ALL
) {
242 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN
, TLB_FLUSH_ALL
);
245 unsigned long nr_pages
=
246 (f
->end
- f
->start
) / PAGE_SIZE
;
248 while (addr
< f
->end
) {
249 __flush_tlb_single(addr
);
252 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN
, nr_pages
);
256 void native_flush_tlb_others(const struct cpumask
*cpumask
,
257 const struct flush_tlb_info
*info
)
259 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH
);
260 if (info
->end
== TLB_FLUSH_ALL
)
261 trace_tlb_flush(TLB_REMOTE_SEND_IPI
, TLB_FLUSH_ALL
);
263 trace_tlb_flush(TLB_REMOTE_SEND_IPI
,
264 (info
->end
- info
->start
) >> PAGE_SHIFT
);
266 if (is_uv_system()) {
269 cpu
= smp_processor_id();
270 cpumask
= uv_flush_tlb_others(cpumask
, info
);
272 smp_call_function_many(cpumask
, flush_tlb_func
,
276 smp_call_function_many(cpumask
, flush_tlb_func
,
281 * See Documentation/x86/tlb.txt for details. We choose 33
282 * because it is large enough to cover the vast majority (at
283 * least 95%) of allocations, and is small enough that we are
284 * confident it will not cause too much overhead. Each single
285 * flush is about 100 ns, so this caps the maximum overhead at
288 * This is in units of pages.
290 static unsigned long tlb_single_page_flush_ceiling __read_mostly
= 33;
292 void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
293 unsigned long end
, unsigned long vmflag
)
296 struct flush_tlb_info info
;
297 /* do a global flush by default */
298 unsigned long base_pages_to_flush
= TLB_FLUSH_ALL
;
302 if ((end
!= TLB_FLUSH_ALL
) && !(vmflag
& VM_HUGETLB
))
303 base_pages_to_flush
= (end
- start
) >> PAGE_SHIFT
;
304 if (base_pages_to_flush
> tlb_single_page_flush_ceiling
)
305 base_pages_to_flush
= TLB_FLUSH_ALL
;
307 if (current
->active_mm
!= mm
) {
308 /* Synchronize with switch_mm. */
315 leave_mm(smp_processor_id());
317 /* Synchronize with switch_mm. */
324 * Both branches below are implicit full barriers (MOV to CR or
325 * INVLPG) that synchronize with switch_mm.
327 if (base_pages_to_flush
== TLB_FLUSH_ALL
) {
328 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL
);
331 /* flush range by one by one 'invlpg' */
332 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
333 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE
);
334 __flush_tlb_single(addr
);
337 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN
, base_pages_to_flush
);
340 if (base_pages_to_flush
== TLB_FLUSH_ALL
) {
342 info
.end
= TLB_FLUSH_ALL
;
347 if (cpumask_any_but(mm_cpumask(mm
), smp_processor_id()) < nr_cpu_ids
)
348 flush_tlb_others(mm_cpumask(mm
), &info
);
353 static void do_flush_tlb_all(void *info
)
355 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED
);
357 if (this_cpu_read(cpu_tlbstate
.state
) == TLBSTATE_LAZY
)
358 leave_mm(smp_processor_id());
361 void flush_tlb_all(void)
363 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH
);
364 on_each_cpu(do_flush_tlb_all
, NULL
, 1);
367 static void do_kernel_range_flush(void *info
)
369 struct flush_tlb_info
*f
= info
;
372 /* flush range by one by one 'invlpg' */
373 for (addr
= f
->start
; addr
< f
->end
; addr
+= PAGE_SIZE
)
374 __flush_tlb_single(addr
);
377 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
380 /* Balance as user space task's flush, a bit conservative */
381 if (end
== TLB_FLUSH_ALL
||
382 (end
- start
) > tlb_single_page_flush_ceiling
* PAGE_SIZE
) {
383 on_each_cpu(do_flush_tlb_all
, NULL
, 1);
385 struct flush_tlb_info info
;
388 on_each_cpu(do_kernel_range_flush
, &info
, 1);
392 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch
*batch
)
394 struct flush_tlb_info info
= {
397 .end
= TLB_FLUSH_ALL
,
402 if (cpumask_test_cpu(cpu
, &batch
->cpumask
)) {
403 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL
);
405 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN
, TLB_FLUSH_ALL
);
408 if (cpumask_any_but(&batch
->cpumask
, cpu
) < nr_cpu_ids
)
409 flush_tlb_others(&batch
->cpumask
, &info
);
410 cpumask_clear(&batch
->cpumask
);
415 static ssize_t
tlbflush_read_file(struct file
*file
, char __user
*user_buf
,
416 size_t count
, loff_t
*ppos
)
421 len
= sprintf(buf
, "%ld\n", tlb_single_page_flush_ceiling
);
422 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
425 static ssize_t
tlbflush_write_file(struct file
*file
,
426 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
432 len
= min(count
, sizeof(buf
) - 1);
433 if (copy_from_user(buf
, user_buf
, len
))
437 if (kstrtoint(buf
, 0, &ceiling
))
443 tlb_single_page_flush_ceiling
= ceiling
;
447 static const struct file_operations fops_tlbflush
= {
448 .read
= tlbflush_read_file
,
449 .write
= tlbflush_write_file
,
450 .llseek
= default_llseek
,
453 static int __init
create_tlb_single_page_flush_ceiling(void)
455 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR
| S_IWUSR
,
456 arch_debugfs_dir
, NULL
, &fops_tlbflush
);
459 late_initcall(create_tlb_single_page_flush_ceiling
);
461 #endif /* CONFIG_SMP */