]>
Commit | Line | Data |
---|---|---|
c048fdfe GC |
1 | #include <linux/init.h> |
2 | ||
3 | #include <linux/mm.h> | |
c048fdfe GC |
4 | #include <linux/spinlock.h> |
5 | #include <linux/smp.h> | |
c048fdfe | 6 | #include <linux/interrupt.h> |
4b599fed | 7 | #include <linux/export.h> |
93296720 | 8 | #include <linux/cpu.h> |
c048fdfe | 9 | |
c048fdfe | 10 | #include <asm/tlbflush.h> |
c048fdfe | 11 | #include <asm/mmu_context.h> |
350f8f56 | 12 | #include <asm/cache.h> |
6dd01bed | 13 | #include <asm/apic.h> |
bdbcdd48 | 14 | #include <asm/uv/uv.h> |
3df3212f | 15 | #include <linux/debugfs.h> |
5af5573e | 16 | |
c048fdfe GC |
17 | /* |
18 | * Smarter SMP flushing macros. | |
19 | * c/o Linus Torvalds. | |
20 | * | |
21 | * These mean you can really definitely utterly forget about | |
22 | * writing to user space from interrupts. (Its not allowed anyway). | |
23 | * | |
24 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
25 | * | |
26 | * More scalable flush, from Andi Kleen | |
27 | * | |
52aec330 | 28 | * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi |
c048fdfe GC |
29 | */ |
30 | ||
e1074888 AL |
31 | #ifdef CONFIG_SMP |
32 | ||
52aec330 AS |
33 | struct flush_tlb_info { |
34 | struct mm_struct *flush_mm; | |
35 | unsigned long flush_start; | |
36 | unsigned long flush_end; | |
37 | }; | |
93296720 | 38 | |
c048fdfe GC |
39 | /* |
40 | * We cannot call mmdrop() because we are in interrupt context, | |
41 | * instead update mm->cpu_vm_mask. | |
42 | */ | |
43 | void leave_mm(int cpu) | |
44 | { | |
02171b4a | 45 | struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); |
c6ae41e7 | 46 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
c048fdfe | 47 | BUG(); |
a6fca40f SS |
48 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { |
49 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); | |
50 | load_cr3(swapper_pg_dir); | |
7c7f1547 DH |
51 | /* |
52 | * This gets called in the idle path where RCU | |
53 | * functions differently. Tracing normally | |
54 | * uses RCU, so we have to call the tracepoint | |
55 | * specially here. | |
56 | */ | |
57 | trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | |
a6fca40f | 58 | } |
c048fdfe GC |
59 | } |
60 | EXPORT_SYMBOL_GPL(leave_mm); | |
61 | ||
69c0319a AL |
62 | #endif /* CONFIG_SMP */ |
63 | ||
64 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
65 | struct task_struct *tsk) | |
078194f8 AL |
66 | { |
67 | unsigned long flags; | |
68 | ||
69 | local_irq_save(flags); | |
70 | switch_mm_irqs_off(prev, next, tsk); | |
71 | local_irq_restore(flags); | |
72 | } | |
73 | ||
74 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |
75 | struct task_struct *tsk) | |
69c0319a AL |
76 | { |
77 | unsigned cpu = smp_processor_id(); | |
78 | ||
79 | if (likely(prev != next)) { | |
e37e43a4 AL |
80 | if (IS_ENABLED(CONFIG_VMAP_STACK)) { |
81 | /* | |
82 | * If our current stack is in vmalloc space and isn't | |
83 | * mapped in the new pgd, we'll double-fault. Forcibly | |
84 | * map it. | |
85 | */ | |
86 | unsigned int stack_pgd_index = pgd_index(current_stack_pointer()); | |
87 | ||
88 | pgd_t *pgd = next->pgd + stack_pgd_index; | |
89 | ||
90 | if (unlikely(pgd_none(*pgd))) | |
91 | set_pgd(pgd, init_mm.pgd[stack_pgd_index]); | |
92 | } | |
93 | ||
69c0319a AL |
94 | #ifdef CONFIG_SMP |
95 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); | |
96 | this_cpu_write(cpu_tlbstate.active_mm, next); | |
97 | #endif | |
e37e43a4 | 98 | |
69c0319a AL |
99 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
100 | ||
101 | /* | |
102 | * Re-load page tables. | |
103 | * | |
104 | * This logic has an ordering constraint: | |
105 | * | |
106 | * CPU 0: Write to a PTE for 'next' | |
107 | * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI. | |
108 | * CPU 1: set bit 1 in next's mm_cpumask | |
109 | * CPU 1: load from the PTE that CPU 0 writes (implicit) | |
110 | * | |
111 | * We need to prevent an outcome in which CPU 1 observes | |
112 | * the new PTE value and CPU 0 observes bit 1 clear in | |
113 | * mm_cpumask. (If that occurs, then the IPI will never | |
114 | * be sent, and CPU 0's TLB will contain a stale entry.) | |
115 | * | |
116 | * The bad outcome can occur if either CPU's load is | |
117 | * reordered before that CPU's store, so both CPUs must | |
118 | * execute full barriers to prevent this from happening. | |
119 | * | |
120 | * Thus, switch_mm needs a full barrier between the | |
121 | * store to mm_cpumask and any operation that could load | |
122 | * from next->pgd. TLB fills are special and can happen | |
123 | * due to instruction fetches or for no reason at all, | |
124 | * and neither LOCK nor MFENCE orders them. | |
125 | * Fortunately, load_cr3() is serializing and gives the | |
126 | * ordering guarantee we need. | |
127 | * | |
128 | */ | |
129 | load_cr3(next->pgd); | |
130 | ||
131 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | |
132 | ||
133 | /* Stop flush ipis for the previous mm */ | |
134 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | |
135 | ||
136 | /* Load per-mm CR4 state */ | |
137 | load_mm_cr4(next); | |
138 | ||
139 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | |
140 | /* | |
141 | * Load the LDT, if the LDT is different. | |
142 | * | |
143 | * It's possible that prev->context.ldt doesn't match | |
144 | * the LDT register. This can happen if leave_mm(prev) | |
145 | * was called and then modify_ldt changed | |
146 | * prev->context.ldt but suppressed an IPI to this CPU. | |
147 | * In this case, prev->context.ldt != NULL, because we | |
148 | * never set context.ldt to NULL while the mm still | |
149 | * exists. That means that next->context.ldt != | |
150 | * prev->context.ldt, because mms never share an LDT. | |
151 | */ | |
152 | if (unlikely(prev->context.ldt != next->context.ldt)) | |
153 | load_mm_ldt(next); | |
154 | #endif | |
155 | } | |
156 | #ifdef CONFIG_SMP | |
157 | else { | |
158 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); | |
159 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); | |
160 | ||
161 | if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { | |
162 | /* | |
163 | * On established mms, the mm_cpumask is only changed | |
164 | * from irq context, from ptep_clear_flush() while in | |
165 | * lazy tlb mode, and here. Irqs are blocked during | |
166 | * schedule, protecting us from simultaneous changes. | |
167 | */ | |
168 | cpumask_set_cpu(cpu, mm_cpumask(next)); | |
169 | ||
170 | /* | |
171 | * We were in lazy tlb mode and leave_mm disabled | |
172 | * tlb flush IPI delivery. We must reload CR3 | |
173 | * to make sure to use no freed page tables. | |
174 | * | |
175 | * As above, load_cr3() is serializing and orders TLB | |
176 | * fills with respect to the mm_cpumask write. | |
177 | */ | |
178 | load_cr3(next->pgd); | |
179 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | |
180 | load_mm_cr4(next); | |
181 | load_mm_ldt(next); | |
182 | } | |
183 | } | |
184 | #endif | |
185 | } | |
186 | ||
187 | #ifdef CONFIG_SMP | |
188 | ||
c048fdfe | 189 | /* |
c048fdfe GC |
190 | * The flush IPI assumes that a thread switch happens in this order: |
191 | * [cpu0: the cpu that switches] | |
192 | * 1) switch_mm() either 1a) or 1b) | |
193 | * 1a) thread switch to a different mm | |
52aec330 AS |
194 | * 1a1) set cpu_tlbstate to TLBSTATE_OK |
195 | * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm | |
196 | * if cpu0 was in lazy tlb mode. | |
197 | * 1a2) update cpu active_mm | |
c048fdfe | 198 | * Now cpu0 accepts tlb flushes for the new mm. |
52aec330 | 199 | * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); |
c048fdfe GC |
200 | * Now the other cpus will send tlb flush ipis. |
201 | * 1a4) change cr3. | |
52aec330 AS |
202 | * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); |
203 | * Stop ipi delivery for the old mm. This is not synchronized with | |
204 | * the other cpus, but flush_tlb_func ignore flush ipis for the wrong | |
205 | * mm, and in the worst case we perform a superfluous tlb flush. | |
c048fdfe | 206 | * 1b) thread switch without mm change |
52aec330 AS |
207 | * cpu active_mm is correct, cpu0 already handles flush ipis. |
208 | * 1b1) set cpu_tlbstate to TLBSTATE_OK | |
c048fdfe GC |
209 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. |
210 | * Atomically set the bit [other cpus will start sending flush ipis], | |
211 | * and test the bit. | |
212 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
213 | * 2) switch %%esp, ie current | |
214 | * | |
215 | * The interrupt must handle 2 special cases: | |
216 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
217 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
218 | * runs in kernel space, the cpu could load tlb entries for user space | |
219 | * pages. | |
220 | * | |
52aec330 | 221 | * The good news is that cpu_tlbstate is local to each cpu, no |
c048fdfe GC |
222 | * write/read ordering problems. |
223 | */ | |
224 | ||
225 | /* | |
52aec330 | 226 | * TLB flush funcation: |
c048fdfe GC |
227 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. |
228 | * 2) Leave the mm if we are in the lazy tlb mode. | |
02cf94c3 | 229 | */ |
52aec330 | 230 | static void flush_tlb_func(void *info) |
c048fdfe | 231 | { |
52aec330 | 232 | struct flush_tlb_info *f = info; |
c048fdfe | 233 | |
fd0f5869 TS |
234 | inc_irq_stat(irq_tlb_count); |
235 | ||
858eaaa7 | 236 | if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) |
52aec330 | 237 | return; |
c048fdfe | 238 | |
ec659934 | 239 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
52aec330 | 240 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { |
d17d8f9d | 241 | if (f->flush_end == TLB_FLUSH_ALL) { |
52aec330 | 242 | local_flush_tlb(); |
d17d8f9d DH |
243 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); |
244 | } else { | |
52aec330 | 245 | unsigned long addr; |
d17d8f9d | 246 | unsigned long nr_pages = |
bbc03778 | 247 | (f->flush_end - f->flush_start) / PAGE_SIZE; |
52aec330 AS |
248 | addr = f->flush_start; |
249 | while (addr < f->flush_end) { | |
250 | __flush_tlb_single(addr); | |
251 | addr += PAGE_SIZE; | |
e7b52ffd | 252 | } |
d17d8f9d | 253 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); |
52aec330 AS |
254 | } |
255 | } else | |
256 | leave_mm(smp_processor_id()); | |
c048fdfe | 257 | |
c048fdfe GC |
258 | } |
259 | ||
4595f962 | 260 | void native_flush_tlb_others(const struct cpumask *cpumask, |
e7b52ffd AS |
261 | struct mm_struct *mm, unsigned long start, |
262 | unsigned long end) | |
4595f962 | 263 | { |
52aec330 | 264 | struct flush_tlb_info info; |
18c98243 NA |
265 | |
266 | if (end == 0) | |
267 | end = start + PAGE_SIZE; | |
52aec330 AS |
268 | info.flush_mm = mm; |
269 | info.flush_start = start; | |
270 | info.flush_end = end; | |
271 | ||
ec659934 | 272 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
18c98243 NA |
273 | if (end == TLB_FLUSH_ALL) |
274 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); | |
275 | else | |
276 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, | |
277 | (end - start) >> PAGE_SHIFT); | |
278 | ||
4595f962 | 279 | if (is_uv_system()) { |
bdbcdd48 | 280 | unsigned int cpu; |
0e21990a | 281 | |
25542c64 | 282 | cpu = smp_processor_id(); |
e7b52ffd | 283 | cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); |
bdbcdd48 | 284 | if (cpumask) |
52aec330 AS |
285 | smp_call_function_many(cpumask, flush_tlb_func, |
286 | &info, 1); | |
0e21990a | 287 | return; |
4595f962 | 288 | } |
52aec330 | 289 | smp_call_function_many(cpumask, flush_tlb_func, &info, 1); |
c048fdfe | 290 | } |
c048fdfe GC |
291 | |
292 | void flush_tlb_current_task(void) | |
293 | { | |
294 | struct mm_struct *mm = current->mm; | |
c048fdfe GC |
295 | |
296 | preempt_disable(); | |
c048fdfe | 297 | |
ec659934 | 298 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
71b3c126 AL |
299 | |
300 | /* This is an implicit full barrier that synchronizes with switch_mm. */ | |
c048fdfe | 301 | local_flush_tlb(); |
71b3c126 | 302 | |
d17d8f9d | 303 | trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); |
78f1c4d6 | 304 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
e7b52ffd | 305 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); |
c048fdfe GC |
306 | preempt_enable(); |
307 | } | |
308 | ||
a5102476 DH |
309 | /* |
310 | * See Documentation/x86/tlb.txt for details. We choose 33 | |
311 | * because it is large enough to cover the vast majority (at | |
312 | * least 95%) of allocations, and is small enough that we are | |
313 | * confident it will not cause too much overhead. Each single | |
314 | * flush is about 100 ns, so this caps the maximum overhead at | |
315 | * _about_ 3,000 ns. | |
316 | * | |
317 | * This is in units of pages. | |
318 | */ | |
86426851 | 319 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
e9f4e0a9 | 320 | |
611ae8e3 AS |
321 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
322 | unsigned long end, unsigned long vmflag) | |
323 | { | |
324 | unsigned long addr; | |
9dfa6dee DH |
325 | /* do a global flush by default */ |
326 | unsigned long base_pages_to_flush = TLB_FLUSH_ALL; | |
e7b52ffd AS |
327 | |
328 | preempt_disable(); | |
71b3c126 AL |
329 | if (current->active_mm != mm) { |
330 | /* Synchronize with switch_mm. */ | |
331 | smp_mb(); | |
332 | ||
4995ab9c | 333 | goto out; |
71b3c126 | 334 | } |
e7b52ffd | 335 | |
611ae8e3 AS |
336 | if (!current->mm) { |
337 | leave_mm(smp_processor_id()); | |
71b3c126 AL |
338 | |
339 | /* Synchronize with switch_mm. */ | |
340 | smp_mb(); | |
341 | ||
4995ab9c | 342 | goto out; |
611ae8e3 | 343 | } |
c048fdfe | 344 | |
9dfa6dee DH |
345 | if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) |
346 | base_pages_to_flush = (end - start) >> PAGE_SHIFT; | |
e7b52ffd | 347 | |
71b3c126 AL |
348 | /* |
349 | * Both branches below are implicit full barriers (MOV to CR or | |
350 | * INVLPG) that synchronize with switch_mm. | |
351 | */ | |
9dfa6dee DH |
352 | if (base_pages_to_flush > tlb_single_page_flush_ceiling) { |
353 | base_pages_to_flush = TLB_FLUSH_ALL; | |
ec659934 | 354 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
611ae8e3 | 355 | local_flush_tlb(); |
9824cf97 | 356 | } else { |
611ae8e3 | 357 | /* flush range by one by one 'invlpg' */ |
9824cf97 | 358 | for (addr = start; addr < end; addr += PAGE_SIZE) { |
ec659934 | 359 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
611ae8e3 | 360 | __flush_tlb_single(addr); |
9824cf97 | 361 | } |
e7b52ffd | 362 | } |
d17d8f9d | 363 | trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); |
4995ab9c | 364 | out: |
9dfa6dee | 365 | if (base_pages_to_flush == TLB_FLUSH_ALL) { |
4995ab9c DH |
366 | start = 0UL; |
367 | end = TLB_FLUSH_ALL; | |
368 | } | |
e7b52ffd | 369 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
4995ab9c | 370 | flush_tlb_others(mm_cpumask(mm), mm, start, end); |
c048fdfe GC |
371 | preempt_enable(); |
372 | } | |
373 | ||
e7b52ffd | 374 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) |
c048fdfe GC |
375 | { |
376 | struct mm_struct *mm = vma->vm_mm; | |
c048fdfe GC |
377 | |
378 | preempt_disable(); | |
c048fdfe GC |
379 | |
380 | if (current->active_mm == mm) { | |
71b3c126 AL |
381 | if (current->mm) { |
382 | /* | |
383 | * Implicit full barrier (INVLPG) that synchronizes | |
384 | * with switch_mm. | |
385 | */ | |
e7b52ffd | 386 | __flush_tlb_one(start); |
71b3c126 | 387 | } else { |
c048fdfe | 388 | leave_mm(smp_processor_id()); |
71b3c126 AL |
389 | |
390 | /* Synchronize with switch_mm. */ | |
391 | smp_mb(); | |
392 | } | |
c048fdfe GC |
393 | } |
394 | ||
78f1c4d6 | 395 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
e7b52ffd | 396 | flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); |
c048fdfe GC |
397 | |
398 | preempt_enable(); | |
399 | } | |
400 | ||
401 | static void do_flush_tlb_all(void *info) | |
402 | { | |
ec659934 | 403 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
c048fdfe | 404 | __flush_tlb_all(); |
c6ae41e7 | 405 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) |
3f8afb77 | 406 | leave_mm(smp_processor_id()); |
c048fdfe GC |
407 | } |
408 | ||
409 | void flush_tlb_all(void) | |
410 | { | |
ec659934 | 411 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
15c8b6c1 | 412 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
c048fdfe | 413 | } |
3df3212f | 414 | |
effee4b9 AS |
415 | static void do_kernel_range_flush(void *info) |
416 | { | |
417 | struct flush_tlb_info *f = info; | |
418 | unsigned long addr; | |
419 | ||
420 | /* flush range by one by one 'invlpg' */ | |
6df46865 | 421 | for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) |
effee4b9 AS |
422 | __flush_tlb_single(addr); |
423 | } | |
424 | ||
425 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
426 | { | |
effee4b9 AS |
427 | |
428 | /* Balance as user space task's flush, a bit conservative */ | |
e9f4e0a9 DH |
429 | if (end == TLB_FLUSH_ALL || |
430 | (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { | |
effee4b9 | 431 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
e9f4e0a9 DH |
432 | } else { |
433 | struct flush_tlb_info info; | |
effee4b9 AS |
434 | info.flush_start = start; |
435 | info.flush_end = end; | |
436 | on_each_cpu(do_kernel_range_flush, &info, 1); | |
437 | } | |
438 | } | |
2d040a1c DH |
439 | |
440 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, | |
441 | size_t count, loff_t *ppos) | |
442 | { | |
443 | char buf[32]; | |
444 | unsigned int len; | |
445 | ||
446 | len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); | |
447 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | |
448 | } | |
449 | ||
450 | static ssize_t tlbflush_write_file(struct file *file, | |
451 | const char __user *user_buf, size_t count, loff_t *ppos) | |
452 | { | |
453 | char buf[32]; | |
454 | ssize_t len; | |
455 | int ceiling; | |
456 | ||
457 | len = min(count, sizeof(buf) - 1); | |
458 | if (copy_from_user(buf, user_buf, len)) | |
459 | return -EFAULT; | |
460 | ||
461 | buf[len] = '\0'; | |
462 | if (kstrtoint(buf, 0, &ceiling)) | |
463 | return -EINVAL; | |
464 | ||
465 | if (ceiling < 0) | |
466 | return -EINVAL; | |
467 | ||
468 | tlb_single_page_flush_ceiling = ceiling; | |
469 | return count; | |
470 | } | |
471 | ||
472 | static const struct file_operations fops_tlbflush = { | |
473 | .read = tlbflush_read_file, | |
474 | .write = tlbflush_write_file, | |
475 | .llseek = default_llseek, | |
476 | }; | |
477 | ||
478 | static int __init create_tlb_single_page_flush_ceiling(void) | |
479 | { | |
480 | debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, | |
481 | arch_debugfs_dir, NULL, &fops_tlbflush); | |
482 | return 0; | |
483 | } | |
484 | late_initcall(create_tlb_single_page_flush_ceiling); | |
e1074888 AL |
485 | |
486 | #endif /* CONFIG_SMP */ |