]>
Commit | Line | Data |
---|---|---|
c048fdfe GC |
1 | #include <linux/init.h> |
2 | ||
3 | #include <linux/mm.h> | |
c048fdfe GC |
4 | #include <linux/spinlock.h> |
5 | #include <linux/smp.h> | |
c048fdfe | 6 | #include <linux/interrupt.h> |
6dd01bed | 7 | #include <linux/module.h> |
93296720 | 8 | #include <linux/cpu.h> |
c048fdfe | 9 | |
c048fdfe | 10 | #include <asm/tlbflush.h> |
c048fdfe | 11 | #include <asm/mmu_context.h> |
350f8f56 | 12 | #include <asm/cache.h> |
6dd01bed | 13 | #include <asm/apic.h> |
bdbcdd48 | 14 | #include <asm/uv/uv.h> |
3df3212f | 15 | #include <linux/debugfs.h> |
5af5573e | 16 | |
c048fdfe GC |
17 | /* |
18 | * Smarter SMP flushing macros. | |
19 | * c/o Linus Torvalds. | |
20 | * | |
21 | * These mean you can really definitely utterly forget about | |
22 | * writing to user space from interrupts. (Its not allowed anyway). | |
23 | * | |
24 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
25 | * | |
26 | * More scalable flush, from Andi Kleen | |
27 | * | |
52aec330 | 28 | * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi |
c048fdfe GC |
29 | */ |
30 | ||
e1074888 AL |
31 | #ifdef CONFIG_SMP |
32 | ||
52aec330 AS |
33 | struct flush_tlb_info { |
34 | struct mm_struct *flush_mm; | |
35 | unsigned long flush_start; | |
36 | unsigned long flush_end; | |
37 | }; | |
93296720 | 38 | |
c048fdfe GC |
39 | /* |
40 | * We cannot call mmdrop() because we are in interrupt context, | |
41 | * instead update mm->cpu_vm_mask. | |
42 | */ | |
43 | void leave_mm(int cpu) | |
44 | { | |
02171b4a | 45 | struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); |
c6ae41e7 | 46 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
c048fdfe | 47 | BUG(); |
a6fca40f SS |
48 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { |
49 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); | |
50 | load_cr3(swapper_pg_dir); | |
7c7f1547 DH |
51 | /* |
52 | * This gets called in the idle path where RCU | |
53 | * functions differently. Tracing normally | |
54 | * uses RCU, so we have to call the tracepoint | |
55 | * specially here. | |
56 | */ | |
57 | trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | |
a6fca40f | 58 | } |
c048fdfe GC |
59 | } |
60 | EXPORT_SYMBOL_GPL(leave_mm); | |
61 | ||
69c0319a AL |
62 | #endif /* CONFIG_SMP */ |
63 | ||
64 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
65 | struct task_struct *tsk) | |
078194f8 AL |
66 | { |
67 | unsigned long flags; | |
68 | ||
69 | local_irq_save(flags); | |
70 | switch_mm_irqs_off(prev, next, tsk); | |
71 | local_irq_restore(flags); | |
72 | } | |
73 | ||
74 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |
75 | struct task_struct *tsk) | |
69c0319a AL |
76 | { |
77 | unsigned cpu = smp_processor_id(); | |
78 | ||
79 | if (likely(prev != next)) { | |
80 | #ifdef CONFIG_SMP | |
81 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); | |
82 | this_cpu_write(cpu_tlbstate.active_mm, next); | |
83 | #endif | |
84 | cpumask_set_cpu(cpu, mm_cpumask(next)); | |
85 | ||
86 | /* | |
87 | * Re-load page tables. | |
88 | * | |
89 | * This logic has an ordering constraint: | |
90 | * | |
91 | * CPU 0: Write to a PTE for 'next' | |
92 | * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI. | |
93 | * CPU 1: set bit 1 in next's mm_cpumask | |
94 | * CPU 1: load from the PTE that CPU 0 writes (implicit) | |
95 | * | |
96 | * We need to prevent an outcome in which CPU 1 observes | |
97 | * the new PTE value and CPU 0 observes bit 1 clear in | |
98 | * mm_cpumask. (If that occurs, then the IPI will never | |
99 | * be sent, and CPU 0's TLB will contain a stale entry.) | |
100 | * | |
101 | * The bad outcome can occur if either CPU's load is | |
102 | * reordered before that CPU's store, so both CPUs must | |
103 | * execute full barriers to prevent this from happening. | |
104 | * | |
105 | * Thus, switch_mm needs a full barrier between the | |
106 | * store to mm_cpumask and any operation that could load | |
107 | * from next->pgd. TLB fills are special and can happen | |
108 | * due to instruction fetches or for no reason at all, | |
109 | * and neither LOCK nor MFENCE orders them. | |
110 | * Fortunately, load_cr3() is serializing and gives the | |
111 | * ordering guarantee we need. | |
112 | * | |
113 | */ | |
114 | load_cr3(next->pgd); | |
115 | ||
116 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | |
117 | ||
118 | /* Stop flush ipis for the previous mm */ | |
119 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | |
120 | ||
121 | /* Load per-mm CR4 state */ | |
122 | load_mm_cr4(next); | |
123 | ||
124 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | |
125 | /* | |
126 | * Load the LDT, if the LDT is different. | |
127 | * | |
128 | * It's possible that prev->context.ldt doesn't match | |
129 | * the LDT register. This can happen if leave_mm(prev) | |
130 | * was called and then modify_ldt changed | |
131 | * prev->context.ldt but suppressed an IPI to this CPU. | |
132 | * In this case, prev->context.ldt != NULL, because we | |
133 | * never set context.ldt to NULL while the mm still | |
134 | * exists. That means that next->context.ldt != | |
135 | * prev->context.ldt, because mms never share an LDT. | |
136 | */ | |
137 | if (unlikely(prev->context.ldt != next->context.ldt)) | |
138 | load_mm_ldt(next); | |
139 | #endif | |
140 | } | |
141 | #ifdef CONFIG_SMP | |
142 | else { | |
143 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); | |
144 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); | |
145 | ||
146 | if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { | |
147 | /* | |
148 | * On established mms, the mm_cpumask is only changed | |
149 | * from irq context, from ptep_clear_flush() while in | |
150 | * lazy tlb mode, and here. Irqs are blocked during | |
151 | * schedule, protecting us from simultaneous changes. | |
152 | */ | |
153 | cpumask_set_cpu(cpu, mm_cpumask(next)); | |
154 | ||
155 | /* | |
156 | * We were in lazy tlb mode and leave_mm disabled | |
157 | * tlb flush IPI delivery. We must reload CR3 | |
158 | * to make sure to use no freed page tables. | |
159 | * | |
160 | * As above, load_cr3() is serializing and orders TLB | |
161 | * fills with respect to the mm_cpumask write. | |
162 | */ | |
163 | load_cr3(next->pgd); | |
164 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | |
165 | load_mm_cr4(next); | |
166 | load_mm_ldt(next); | |
167 | } | |
168 | } | |
169 | #endif | |
170 | } | |
171 | ||
172 | #ifdef CONFIG_SMP | |
173 | ||
c048fdfe | 174 | /* |
c048fdfe GC |
175 | * The flush IPI assumes that a thread switch happens in this order: |
176 | * [cpu0: the cpu that switches] | |
177 | * 1) switch_mm() either 1a) or 1b) | |
178 | * 1a) thread switch to a different mm | |
52aec330 AS |
179 | * 1a1) set cpu_tlbstate to TLBSTATE_OK |
180 | * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm | |
181 | * if cpu0 was in lazy tlb mode. | |
182 | * 1a2) update cpu active_mm | |
c048fdfe | 183 | * Now cpu0 accepts tlb flushes for the new mm. |
52aec330 | 184 | * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); |
c048fdfe GC |
185 | * Now the other cpus will send tlb flush ipis. |
186 | * 1a4) change cr3. | |
52aec330 AS |
187 | * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); |
188 | * Stop ipi delivery for the old mm. This is not synchronized with | |
189 | * the other cpus, but flush_tlb_func ignore flush ipis for the wrong | |
190 | * mm, and in the worst case we perform a superfluous tlb flush. | |
c048fdfe | 191 | * 1b) thread switch without mm change |
52aec330 AS |
192 | * cpu active_mm is correct, cpu0 already handles flush ipis. |
193 | * 1b1) set cpu_tlbstate to TLBSTATE_OK | |
c048fdfe GC |
194 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. |
195 | * Atomically set the bit [other cpus will start sending flush ipis], | |
196 | * and test the bit. | |
197 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
198 | * 2) switch %%esp, ie current | |
199 | * | |
200 | * The interrupt must handle 2 special cases: | |
201 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
202 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
203 | * runs in kernel space, the cpu could load tlb entries for user space | |
204 | * pages. | |
205 | * | |
52aec330 | 206 | * The good news is that cpu_tlbstate is local to each cpu, no |
c048fdfe GC |
207 | * write/read ordering problems. |
208 | */ | |
209 | ||
210 | /* | |
52aec330 | 211 | * TLB flush funcation: |
c048fdfe GC |
212 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. |
213 | * 2) Leave the mm if we are in the lazy tlb mode. | |
02cf94c3 | 214 | */ |
52aec330 | 215 | static void flush_tlb_func(void *info) |
c048fdfe | 216 | { |
52aec330 | 217 | struct flush_tlb_info *f = info; |
c048fdfe | 218 | |
fd0f5869 TS |
219 | inc_irq_stat(irq_tlb_count); |
220 | ||
858eaaa7 | 221 | if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) |
52aec330 | 222 | return; |
c048fdfe | 223 | |
ec659934 | 224 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
52aec330 | 225 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { |
d17d8f9d | 226 | if (f->flush_end == TLB_FLUSH_ALL) { |
52aec330 | 227 | local_flush_tlb(); |
d17d8f9d DH |
228 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); |
229 | } else { | |
52aec330 | 230 | unsigned long addr; |
d17d8f9d | 231 | unsigned long nr_pages = |
bbc03778 | 232 | (f->flush_end - f->flush_start) / PAGE_SIZE; |
52aec330 AS |
233 | addr = f->flush_start; |
234 | while (addr < f->flush_end) { | |
235 | __flush_tlb_single(addr); | |
236 | addr += PAGE_SIZE; | |
e7b52ffd | 237 | } |
d17d8f9d | 238 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); |
52aec330 AS |
239 | } |
240 | } else | |
241 | leave_mm(smp_processor_id()); | |
c048fdfe | 242 | |
c048fdfe GC |
243 | } |
244 | ||
4595f962 | 245 | void native_flush_tlb_others(const struct cpumask *cpumask, |
e7b52ffd AS |
246 | struct mm_struct *mm, unsigned long start, |
247 | unsigned long end) | |
4595f962 | 248 | { |
52aec330 | 249 | struct flush_tlb_info info; |
18c98243 NA |
250 | |
251 | if (end == 0) | |
252 | end = start + PAGE_SIZE; | |
52aec330 AS |
253 | info.flush_mm = mm; |
254 | info.flush_start = start; | |
255 | info.flush_end = end; | |
256 | ||
ec659934 | 257 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
18c98243 NA |
258 | if (end == TLB_FLUSH_ALL) |
259 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); | |
260 | else | |
261 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, | |
262 | (end - start) >> PAGE_SHIFT); | |
263 | ||
4595f962 | 264 | if (is_uv_system()) { |
bdbcdd48 | 265 | unsigned int cpu; |
0e21990a | 266 | |
25542c64 | 267 | cpu = smp_processor_id(); |
e7b52ffd | 268 | cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); |
bdbcdd48 | 269 | if (cpumask) |
52aec330 AS |
270 | smp_call_function_many(cpumask, flush_tlb_func, |
271 | &info, 1); | |
0e21990a | 272 | return; |
4595f962 | 273 | } |
52aec330 | 274 | smp_call_function_many(cpumask, flush_tlb_func, &info, 1); |
c048fdfe | 275 | } |
c048fdfe GC |
276 | |
277 | void flush_tlb_current_task(void) | |
278 | { | |
279 | struct mm_struct *mm = current->mm; | |
c048fdfe GC |
280 | |
281 | preempt_disable(); | |
c048fdfe | 282 | |
ec659934 | 283 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
71b3c126 AL |
284 | |
285 | /* This is an implicit full barrier that synchronizes with switch_mm. */ | |
c048fdfe | 286 | local_flush_tlb(); |
71b3c126 | 287 | |
d17d8f9d | 288 | trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); |
78f1c4d6 | 289 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
e7b52ffd | 290 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); |
c048fdfe GC |
291 | preempt_enable(); |
292 | } | |
293 | ||
a5102476 DH |
294 | /* |
295 | * See Documentation/x86/tlb.txt for details. We choose 33 | |
296 | * because it is large enough to cover the vast majority (at | |
297 | * least 95%) of allocations, and is small enough that we are | |
298 | * confident it will not cause too much overhead. Each single | |
299 | * flush is about 100 ns, so this caps the maximum overhead at | |
300 | * _about_ 3,000 ns. | |
301 | * | |
302 | * This is in units of pages. | |
303 | */ | |
86426851 | 304 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
e9f4e0a9 | 305 | |
611ae8e3 AS |
306 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
307 | unsigned long end, unsigned long vmflag) | |
308 | { | |
309 | unsigned long addr; | |
9dfa6dee DH |
310 | /* do a global flush by default */ |
311 | unsigned long base_pages_to_flush = TLB_FLUSH_ALL; | |
e7b52ffd AS |
312 | |
313 | preempt_disable(); | |
71b3c126 AL |
314 | if (current->active_mm != mm) { |
315 | /* Synchronize with switch_mm. */ | |
316 | smp_mb(); | |
317 | ||
4995ab9c | 318 | goto out; |
71b3c126 | 319 | } |
e7b52ffd | 320 | |
611ae8e3 AS |
321 | if (!current->mm) { |
322 | leave_mm(smp_processor_id()); | |
71b3c126 AL |
323 | |
324 | /* Synchronize with switch_mm. */ | |
325 | smp_mb(); | |
326 | ||
4995ab9c | 327 | goto out; |
611ae8e3 | 328 | } |
c048fdfe | 329 | |
9dfa6dee DH |
330 | if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) |
331 | base_pages_to_flush = (end - start) >> PAGE_SHIFT; | |
e7b52ffd | 332 | |
71b3c126 AL |
333 | /* |
334 | * Both branches below are implicit full barriers (MOV to CR or | |
335 | * INVLPG) that synchronize with switch_mm. | |
336 | */ | |
9dfa6dee DH |
337 | if (base_pages_to_flush > tlb_single_page_flush_ceiling) { |
338 | base_pages_to_flush = TLB_FLUSH_ALL; | |
ec659934 | 339 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
611ae8e3 | 340 | local_flush_tlb(); |
9824cf97 | 341 | } else { |
611ae8e3 | 342 | /* flush range by one by one 'invlpg' */ |
9824cf97 | 343 | for (addr = start; addr < end; addr += PAGE_SIZE) { |
ec659934 | 344 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
611ae8e3 | 345 | __flush_tlb_single(addr); |
9824cf97 | 346 | } |
e7b52ffd | 347 | } |
d17d8f9d | 348 | trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); |
4995ab9c | 349 | out: |
9dfa6dee | 350 | if (base_pages_to_flush == TLB_FLUSH_ALL) { |
4995ab9c DH |
351 | start = 0UL; |
352 | end = TLB_FLUSH_ALL; | |
353 | } | |
e7b52ffd | 354 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
4995ab9c | 355 | flush_tlb_others(mm_cpumask(mm), mm, start, end); |
c048fdfe GC |
356 | preempt_enable(); |
357 | } | |
358 | ||
e7b52ffd | 359 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) |
c048fdfe GC |
360 | { |
361 | struct mm_struct *mm = vma->vm_mm; | |
c048fdfe GC |
362 | |
363 | preempt_disable(); | |
c048fdfe GC |
364 | |
365 | if (current->active_mm == mm) { | |
71b3c126 AL |
366 | if (current->mm) { |
367 | /* | |
368 | * Implicit full barrier (INVLPG) that synchronizes | |
369 | * with switch_mm. | |
370 | */ | |
e7b52ffd | 371 | __flush_tlb_one(start); |
71b3c126 | 372 | } else { |
c048fdfe | 373 | leave_mm(smp_processor_id()); |
71b3c126 AL |
374 | |
375 | /* Synchronize with switch_mm. */ | |
376 | smp_mb(); | |
377 | } | |
c048fdfe GC |
378 | } |
379 | ||
78f1c4d6 | 380 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
e7b52ffd | 381 | flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); |
c048fdfe GC |
382 | |
383 | preempt_enable(); | |
384 | } | |
385 | ||
386 | static void do_flush_tlb_all(void *info) | |
387 | { | |
ec659934 | 388 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
c048fdfe | 389 | __flush_tlb_all(); |
c6ae41e7 | 390 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) |
3f8afb77 | 391 | leave_mm(smp_processor_id()); |
c048fdfe GC |
392 | } |
393 | ||
394 | void flush_tlb_all(void) | |
395 | { | |
ec659934 | 396 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
15c8b6c1 | 397 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
c048fdfe | 398 | } |
3df3212f | 399 | |
effee4b9 AS |
400 | static void do_kernel_range_flush(void *info) |
401 | { | |
402 | struct flush_tlb_info *f = info; | |
403 | unsigned long addr; | |
404 | ||
405 | /* flush range by one by one 'invlpg' */ | |
6df46865 | 406 | for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) |
effee4b9 AS |
407 | __flush_tlb_single(addr); |
408 | } | |
409 | ||
410 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
411 | { | |
effee4b9 AS |
412 | |
413 | /* Balance as user space task's flush, a bit conservative */ | |
e9f4e0a9 DH |
414 | if (end == TLB_FLUSH_ALL || |
415 | (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { | |
effee4b9 | 416 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
e9f4e0a9 DH |
417 | } else { |
418 | struct flush_tlb_info info; | |
effee4b9 AS |
419 | info.flush_start = start; |
420 | info.flush_end = end; | |
421 | on_each_cpu(do_kernel_range_flush, &info, 1); | |
422 | } | |
423 | } | |
2d040a1c DH |
424 | |
425 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, | |
426 | size_t count, loff_t *ppos) | |
427 | { | |
428 | char buf[32]; | |
429 | unsigned int len; | |
430 | ||
431 | len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); | |
432 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | |
433 | } | |
434 | ||
435 | static ssize_t tlbflush_write_file(struct file *file, | |
436 | const char __user *user_buf, size_t count, loff_t *ppos) | |
437 | { | |
438 | char buf[32]; | |
439 | ssize_t len; | |
440 | int ceiling; | |
441 | ||
442 | len = min(count, sizeof(buf) - 1); | |
443 | if (copy_from_user(buf, user_buf, len)) | |
444 | return -EFAULT; | |
445 | ||
446 | buf[len] = '\0'; | |
447 | if (kstrtoint(buf, 0, &ceiling)) | |
448 | return -EINVAL; | |
449 | ||
450 | if (ceiling < 0) | |
451 | return -EINVAL; | |
452 | ||
453 | tlb_single_page_flush_ceiling = ceiling; | |
454 | return count; | |
455 | } | |
456 | ||
457 | static const struct file_operations fops_tlbflush = { | |
458 | .read = tlbflush_read_file, | |
459 | .write = tlbflush_write_file, | |
460 | .llseek = default_llseek, | |
461 | }; | |
462 | ||
463 | static int __init create_tlb_single_page_flush_ceiling(void) | |
464 | { | |
465 | debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, | |
466 | arch_debugfs_dir, NULL, &fops_tlbflush); | |
467 | return 0; | |
468 | } | |
469 | late_initcall(create_tlb_single_page_flush_ceiling); | |
e1074888 AL |
470 | |
471 | #endif /* CONFIG_SMP */ |