]>
Commit | Line | Data |
---|---|---|
c048fdfe GC |
1 | #include <linux/init.h> |
2 | ||
3 | #include <linux/mm.h> | |
c048fdfe GC |
4 | #include <linux/spinlock.h> |
5 | #include <linux/smp.h> | |
c048fdfe | 6 | #include <linux/interrupt.h> |
6dd01bed | 7 | #include <linux/module.h> |
c048fdfe | 8 | |
c048fdfe | 9 | #include <asm/tlbflush.h> |
c048fdfe | 10 | #include <asm/mmu_context.h> |
6dd01bed | 11 | #include <asm/apic.h> |
bdbcdd48 | 12 | #include <asm/uv/uv.h> |
5af5573e | 13 | |
9eb912d1 BG |
14 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) |
15 | = { &init_mm, 0, }; | |
16 | ||
c048fdfe GC |
17 | /* |
18 | * Smarter SMP flushing macros. | |
19 | * c/o Linus Torvalds. | |
20 | * | |
21 | * These mean you can really definitely utterly forget about | |
22 | * writing to user space from interrupts. (Its not allowed anyway). | |
23 | * | |
24 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
25 | * | |
26 | * More scalable flush, from Andi Kleen | |
27 | * | |
28 | * To avoid global state use 8 different call vectors. | |
29 | * Each CPU uses a specific vector to trigger flushes on other | |
30 | * CPUs. Depending on the received vector the target CPUs look into | |
09b3ec73 | 31 | * the right array slot for the flush data. |
c048fdfe GC |
32 | * |
33 | * With more than 8 CPUs they are hashed to the 8 available | |
34 | * vectors. The limited global vector space forces us to this right now. | |
35 | * In future when interrupts are split into per CPU domains this could be | |
36 | * fixed, at the cost of triggering multiple IPIs in some cases. | |
37 | */ | |
38 | ||
39 | union smp_flush_state { | |
40 | struct { | |
c048fdfe GC |
41 | struct mm_struct *flush_mm; |
42 | unsigned long flush_va; | |
43 | spinlock_t tlbstate_lock; | |
4595f962 | 44 | DECLARE_BITMAP(flush_cpumask, NR_CPUS); |
c048fdfe | 45 | }; |
09b3ec73 FD |
46 | char pad[CONFIG_X86_INTERNODE_CACHE_BYTES]; |
47 | } ____cacheline_internodealigned_in_smp; | |
c048fdfe GC |
48 | |
49 | /* State is put into the per CPU data section, but padded | |
50 | to a full cache line because other CPUs can access it and we don't | |
51 | want false sharing in the per cpu data segment. */ | |
09b3ec73 | 52 | static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; |
c048fdfe GC |
53 | |
54 | /* | |
55 | * We cannot call mmdrop() because we are in interrupt context, | |
56 | * instead update mm->cpu_vm_mask. | |
57 | */ | |
58 | void leave_mm(int cpu) | |
59 | { | |
9eb912d1 | 60 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
c048fdfe | 61 | BUG(); |
9eb912d1 | 62 | cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); |
c048fdfe GC |
63 | load_cr3(swapper_pg_dir); |
64 | } | |
65 | EXPORT_SYMBOL_GPL(leave_mm); | |
66 | ||
67 | /* | |
68 | * | |
69 | * The flush IPI assumes that a thread switch happens in this order: | |
70 | * [cpu0: the cpu that switches] | |
71 | * 1) switch_mm() either 1a) or 1b) | |
72 | * 1a) thread switch to a different mm | |
73 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | |
74 | * Stop ipi delivery for the old mm. This is not synchronized with | |
75 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | |
76 | * for the wrong mm, and in the worst case we perform a superfluous | |
77 | * tlb flush. | |
78 | * 1a2) set cpu mmu_state to TLBSTATE_OK | |
79 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | |
80 | * was in lazy tlb mode. | |
81 | * 1a3) update cpu active_mm | |
82 | * Now cpu0 accepts tlb flushes for the new mm. | |
83 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | |
84 | * Now the other cpus will send tlb flush ipis. | |
85 | * 1a4) change cr3. | |
86 | * 1b) thread switch without mm change | |
87 | * cpu active_mm is correct, cpu0 already handles | |
88 | * flush ipis. | |
89 | * 1b1) set cpu mmu_state to TLBSTATE_OK | |
90 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | |
91 | * Atomically set the bit [other cpus will start sending flush ipis], | |
92 | * and test the bit. | |
93 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
94 | * 2) switch %%esp, ie current | |
95 | * | |
96 | * The interrupt must handle 2 special cases: | |
97 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
98 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
99 | * runs in kernel space, the cpu could load tlb entries for user space | |
100 | * pages. | |
101 | * | |
102 | * The good news is that cpu mmu_state is local to each cpu, no | |
103 | * write/read ordering problems. | |
104 | */ | |
105 | ||
106 | /* | |
107 | * TLB flush IPI: | |
108 | * | |
109 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | |
110 | * 2) Leave the mm if we are in the lazy tlb mode. | |
111 | * | |
112 | * Interrupts are disabled. | |
113 | */ | |
114 | ||
02cf94c3 TH |
115 | /* |
116 | * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop | |
117 | * but still used for documentation purpose but the usage is slightly | |
118 | * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt | |
119 | * entry calls in with the first parameter in %eax. Maybe define | |
120 | * intrlinkage? | |
121 | */ | |
122 | #ifdef CONFIG_X86_64 | |
123 | asmlinkage | |
124 | #endif | |
125 | void smp_invalidate_interrupt(struct pt_regs *regs) | |
c048fdfe | 126 | { |
6dd01bed TH |
127 | unsigned int cpu; |
128 | unsigned int sender; | |
c048fdfe GC |
129 | union smp_flush_state *f; |
130 | ||
131 | cpu = smp_processor_id(); | |
132 | /* | |
133 | * orig_rax contains the negated interrupt vector. | |
134 | * Use that to determine where the sender put the data. | |
135 | */ | |
136 | sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; | |
09b3ec73 | 137 | f = &flush_state[sender]; |
c048fdfe | 138 | |
4595f962 | 139 | if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) |
c048fdfe GC |
140 | goto out; |
141 | /* | |
142 | * This was a BUG() but until someone can quote me the | |
143 | * line from the intel manual that guarantees an IPI to | |
144 | * multiple CPUs is retried _only_ on the erroring CPUs | |
145 | * its staying as a return | |
146 | * | |
147 | * BUG(); | |
148 | */ | |
149 | ||
9eb912d1 BG |
150 | if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { |
151 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { | |
c048fdfe GC |
152 | if (f->flush_va == TLB_FLUSH_ALL) |
153 | local_flush_tlb(); | |
154 | else | |
155 | __flush_tlb_one(f->flush_va); | |
156 | } else | |
157 | leave_mm(cpu); | |
158 | } | |
159 | out: | |
160 | ack_APIC_irq(); | |
6dd01bed | 161 | smp_mb__before_clear_bit(); |
4595f962 | 162 | cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); |
6dd01bed | 163 | smp_mb__after_clear_bit(); |
8ae93669 | 164 | inc_irq_stat(irq_tlb_count); |
c048fdfe GC |
165 | } |
166 | ||
4595f962 RR |
167 | static void flush_tlb_others_ipi(const struct cpumask *cpumask, |
168 | struct mm_struct *mm, unsigned long va) | |
c048fdfe | 169 | { |
6dd01bed | 170 | unsigned int sender; |
c048fdfe | 171 | union smp_flush_state *f; |
1812924b | 172 | |
c048fdfe GC |
173 | /* Caller has disabled preemption */ |
174 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; | |
09b3ec73 | 175 | f = &flush_state[sender]; |
c048fdfe GC |
176 | |
177 | /* | |
178 | * Could avoid this lock when | |
179 | * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is | |
180 | * probably not worth checking this for a cache-hot lock. | |
181 | */ | |
182 | spin_lock(&f->tlbstate_lock); | |
183 | ||
184 | f->flush_mm = mm; | |
185 | f->flush_va = va; | |
4595f962 RR |
186 | cpumask_andnot(to_cpumask(f->flush_cpumask), |
187 | cpumask, cpumask_of(smp_processor_id())); | |
c048fdfe | 188 | |
d6f0f39b SS |
189 | /* |
190 | * Make the above memory operations globally visible before | |
191 | * sending the IPI. | |
192 | */ | |
193 | smp_mb(); | |
c048fdfe GC |
194 | /* |
195 | * We have to send the IPI only to | |
196 | * CPUs affected. | |
197 | */ | |
dac5f412 | 198 | apic->send_IPI_mask(to_cpumask(f->flush_cpumask), |
54da5b3d | 199 | INVALIDATE_TLB_VECTOR_START + sender); |
c048fdfe | 200 | |
4595f962 | 201 | while (!cpumask_empty(to_cpumask(f->flush_cpumask))) |
c048fdfe GC |
202 | cpu_relax(); |
203 | ||
204 | f->flush_mm = NULL; | |
205 | f->flush_va = 0; | |
206 | spin_unlock(&f->tlbstate_lock); | |
207 | } | |
208 | ||
4595f962 RR |
209 | void native_flush_tlb_others(const struct cpumask *cpumask, |
210 | struct mm_struct *mm, unsigned long va) | |
211 | { | |
212 | if (is_uv_system()) { | |
bdbcdd48 | 213 | unsigned int cpu; |
0e21990a | 214 | |
bdbcdd48 TH |
215 | cpu = get_cpu(); |
216 | cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); | |
217 | if (cpumask) | |
218 | flush_tlb_others_ipi(cpumask, mm, va); | |
219 | put_cpu(); | |
0e21990a | 220 | return; |
4595f962 RR |
221 | } |
222 | flush_tlb_others_ipi(cpumask, mm, va); | |
223 | } | |
224 | ||
a4928cff | 225 | static int __cpuinit init_smp_flush(void) |
c048fdfe GC |
226 | { |
227 | int i; | |
228 | ||
09b3ec73 FD |
229 | for (i = 0; i < ARRAY_SIZE(flush_state); i++) |
230 | spin_lock_init(&flush_state[i].tlbstate_lock); | |
7c04e64a | 231 | |
c048fdfe GC |
232 | return 0; |
233 | } | |
234 | core_initcall(init_smp_flush); | |
235 | ||
236 | void flush_tlb_current_task(void) | |
237 | { | |
238 | struct mm_struct *mm = current->mm; | |
c048fdfe GC |
239 | |
240 | preempt_disable(); | |
c048fdfe GC |
241 | |
242 | local_flush_tlb(); | |
4595f962 RR |
243 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) |
244 | flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); | |
c048fdfe GC |
245 | preempt_enable(); |
246 | } | |
247 | ||
248 | void flush_tlb_mm(struct mm_struct *mm) | |
249 | { | |
c048fdfe | 250 | preempt_disable(); |
c048fdfe GC |
251 | |
252 | if (current->active_mm == mm) { | |
253 | if (current->mm) | |
254 | local_flush_tlb(); | |
255 | else | |
256 | leave_mm(smp_processor_id()); | |
257 | } | |
4595f962 RR |
258 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) |
259 | flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); | |
c048fdfe GC |
260 | |
261 | preempt_enable(); | |
262 | } | |
263 | ||
264 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |
265 | { | |
266 | struct mm_struct *mm = vma->vm_mm; | |
c048fdfe GC |
267 | |
268 | preempt_disable(); | |
c048fdfe GC |
269 | |
270 | if (current->active_mm == mm) { | |
271 | if (current->mm) | |
272 | __flush_tlb_one(va); | |
273 | else | |
274 | leave_mm(smp_processor_id()); | |
275 | } | |
276 | ||
4595f962 RR |
277 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) |
278 | flush_tlb_others(&mm->cpu_vm_mask, mm, va); | |
c048fdfe GC |
279 | |
280 | preempt_enable(); | |
281 | } | |
282 | ||
283 | static void do_flush_tlb_all(void *info) | |
284 | { | |
285 | unsigned long cpu = smp_processor_id(); | |
286 | ||
287 | __flush_tlb_all(); | |
9eb912d1 | 288 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) |
c048fdfe GC |
289 | leave_mm(cpu); |
290 | } | |
291 | ||
292 | void flush_tlb_all(void) | |
293 | { | |
15c8b6c1 | 294 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
c048fdfe | 295 | } |