]>
Commit | Line | Data |
---|---|---|
c048fdfe GC |
1 | #include <linux/init.h> |
2 | ||
3 | #include <linux/mm.h> | |
4 | #include <linux/delay.h> | |
5 | #include <linux/spinlock.h> | |
6 | #include <linux/smp.h> | |
7 | #include <linux/kernel_stat.h> | |
8 | #include <linux/mc146818rtc.h> | |
9 | #include <linux/interrupt.h> | |
10 | ||
11 | #include <asm/mtrr.h> | |
12 | #include <asm/pgalloc.h> | |
13 | #include <asm/tlbflush.h> | |
14 | #include <asm/mach_apic.h> | |
15 | #include <asm/mmu_context.h> | |
16 | #include <asm/proto.h> | |
17 | #include <asm/apicdef.h> | |
18 | #include <asm/idle.h> | |
19 | /* | |
20 | * Smarter SMP flushing macros. | |
21 | * c/o Linus Torvalds. | |
22 | * | |
23 | * These mean you can really definitely utterly forget about | |
24 | * writing to user space from interrupts. (Its not allowed anyway). | |
25 | * | |
26 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
27 | * | |
28 | * More scalable flush, from Andi Kleen | |
29 | * | |
30 | * To avoid global state use 8 different call vectors. | |
31 | * Each CPU uses a specific vector to trigger flushes on other | |
32 | * CPUs. Depending on the received vector the target CPUs look into | |
33 | * the right per cpu variable for the flush data. | |
34 | * | |
35 | * With more than 8 CPUs they are hashed to the 8 available | |
36 | * vectors. The limited global vector space forces us to this right now. | |
37 | * In future when interrupts are split into per CPU domains this could be | |
38 | * fixed, at the cost of triggering multiple IPIs in some cases. | |
39 | */ | |
40 | ||
41 | union smp_flush_state { | |
42 | struct { | |
43 | cpumask_t flush_cpumask; | |
44 | struct mm_struct *flush_mm; | |
45 | unsigned long flush_va; | |
46 | spinlock_t tlbstate_lock; | |
47 | }; | |
48 | char pad[SMP_CACHE_BYTES]; | |
49 | } ____cacheline_aligned; | |
50 | ||
51 | /* State is put into the per CPU data section, but padded | |
52 | to a full cache line because other CPUs can access it and we don't | |
53 | want false sharing in the per cpu data segment. */ | |
54 | static DEFINE_PER_CPU(union smp_flush_state, flush_state); | |
55 | ||
56 | /* | |
57 | * We cannot call mmdrop() because we are in interrupt context, | |
58 | * instead update mm->cpu_vm_mask. | |
59 | */ | |
60 | void leave_mm(int cpu) | |
61 | { | |
62 | if (read_pda(mmu_state) == TLBSTATE_OK) | |
63 | BUG(); | |
64 | cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); | |
65 | load_cr3(swapper_pg_dir); | |
66 | } | |
67 | EXPORT_SYMBOL_GPL(leave_mm); | |
68 | ||
69 | /* | |
70 | * | |
71 | * The flush IPI assumes that a thread switch happens in this order: | |
72 | * [cpu0: the cpu that switches] | |
73 | * 1) switch_mm() either 1a) or 1b) | |
74 | * 1a) thread switch to a different mm | |
75 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | |
76 | * Stop ipi delivery for the old mm. This is not synchronized with | |
77 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | |
78 | * for the wrong mm, and in the worst case we perform a superfluous | |
79 | * tlb flush. | |
80 | * 1a2) set cpu mmu_state to TLBSTATE_OK | |
81 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | |
82 | * was in lazy tlb mode. | |
83 | * 1a3) update cpu active_mm | |
84 | * Now cpu0 accepts tlb flushes for the new mm. | |
85 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | |
86 | * Now the other cpus will send tlb flush ipis. | |
87 | * 1a4) change cr3. | |
88 | * 1b) thread switch without mm change | |
89 | * cpu active_mm is correct, cpu0 already handles | |
90 | * flush ipis. | |
91 | * 1b1) set cpu mmu_state to TLBSTATE_OK | |
92 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | |
93 | * Atomically set the bit [other cpus will start sending flush ipis], | |
94 | * and test the bit. | |
95 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
96 | * 2) switch %%esp, ie current | |
97 | * | |
98 | * The interrupt must handle 2 special cases: | |
99 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
100 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
101 | * runs in kernel space, the cpu could load tlb entries for user space | |
102 | * pages. | |
103 | * | |
104 | * The good news is that cpu mmu_state is local to each cpu, no | |
105 | * write/read ordering problems. | |
106 | */ | |
107 | ||
108 | /* | |
109 | * TLB flush IPI: | |
110 | * | |
111 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | |
112 | * 2) Leave the mm if we are in the lazy tlb mode. | |
113 | * | |
114 | * Interrupts are disabled. | |
115 | */ | |
116 | ||
117 | asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) | |
118 | { | |
119 | int cpu; | |
120 | int sender; | |
121 | union smp_flush_state *f; | |
122 | ||
123 | cpu = smp_processor_id(); | |
124 | /* | |
125 | * orig_rax contains the negated interrupt vector. | |
126 | * Use that to determine where the sender put the data. | |
127 | */ | |
128 | sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; | |
129 | f = &per_cpu(flush_state, sender); | |
130 | ||
131 | if (!cpu_isset(cpu, f->flush_cpumask)) | |
132 | goto out; | |
133 | /* | |
134 | * This was a BUG() but until someone can quote me the | |
135 | * line from the intel manual that guarantees an IPI to | |
136 | * multiple CPUs is retried _only_ on the erroring CPUs | |
137 | * its staying as a return | |
138 | * | |
139 | * BUG(); | |
140 | */ | |
141 | ||
142 | if (f->flush_mm == read_pda(active_mm)) { | |
143 | if (read_pda(mmu_state) == TLBSTATE_OK) { | |
144 | if (f->flush_va == TLB_FLUSH_ALL) | |
145 | local_flush_tlb(); | |
146 | else | |
147 | __flush_tlb_one(f->flush_va); | |
148 | } else | |
149 | leave_mm(cpu); | |
150 | } | |
151 | out: | |
152 | ack_APIC_irq(); | |
153 | cpu_clear(cpu, f->flush_cpumask); | |
154 | add_pda(irq_tlb_count, 1); | |
155 | } | |
156 | ||
157 | void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |
158 | unsigned long va) | |
159 | { | |
160 | int sender; | |
161 | union smp_flush_state *f; | |
162 | cpumask_t cpumask = *cpumaskp; | |
163 | ||
164 | /* Caller has disabled preemption */ | |
165 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; | |
166 | f = &per_cpu(flush_state, sender); | |
167 | ||
168 | /* | |
169 | * Could avoid this lock when | |
170 | * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is | |
171 | * probably not worth checking this for a cache-hot lock. | |
172 | */ | |
173 | spin_lock(&f->tlbstate_lock); | |
174 | ||
175 | f->flush_mm = mm; | |
176 | f->flush_va = va; | |
177 | cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); | |
178 | ||
179 | /* | |
180 | * We have to send the IPI only to | |
181 | * CPUs affected. | |
182 | */ | |
183 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); | |
184 | ||
185 | while (!cpus_empty(f->flush_cpumask)) | |
186 | cpu_relax(); | |
187 | ||
188 | f->flush_mm = NULL; | |
189 | f->flush_va = 0; | |
190 | spin_unlock(&f->tlbstate_lock); | |
191 | } | |
192 | ||
193 | int __cpuinit init_smp_flush(void) | |
194 | { | |
195 | int i; | |
196 | ||
197 | for_each_cpu_mask(i, cpu_possible_map) { | |
198 | spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); | |
199 | } | |
200 | return 0; | |
201 | } | |
202 | core_initcall(init_smp_flush); | |
203 | ||
204 | void flush_tlb_current_task(void) | |
205 | { | |
206 | struct mm_struct *mm = current->mm; | |
207 | cpumask_t cpu_mask; | |
208 | ||
209 | preempt_disable(); | |
210 | cpu_mask = mm->cpu_vm_mask; | |
211 | cpu_clear(smp_processor_id(), cpu_mask); | |
212 | ||
213 | local_flush_tlb(); | |
214 | if (!cpus_empty(cpu_mask)) | |
215 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | |
216 | preempt_enable(); | |
217 | } | |
218 | ||
219 | void flush_tlb_mm(struct mm_struct *mm) | |
220 | { | |
221 | cpumask_t cpu_mask; | |
222 | ||
223 | preempt_disable(); | |
224 | cpu_mask = mm->cpu_vm_mask; | |
225 | cpu_clear(smp_processor_id(), cpu_mask); | |
226 | ||
227 | if (current->active_mm == mm) { | |
228 | if (current->mm) | |
229 | local_flush_tlb(); | |
230 | else | |
231 | leave_mm(smp_processor_id()); | |
232 | } | |
233 | if (!cpus_empty(cpu_mask)) | |
234 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | |
235 | ||
236 | preempt_enable(); | |
237 | } | |
238 | ||
239 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |
240 | { | |
241 | struct mm_struct *mm = vma->vm_mm; | |
242 | cpumask_t cpu_mask; | |
243 | ||
244 | preempt_disable(); | |
245 | cpu_mask = mm->cpu_vm_mask; | |
246 | cpu_clear(smp_processor_id(), cpu_mask); | |
247 | ||
248 | if (current->active_mm == mm) { | |
249 | if (current->mm) | |
250 | __flush_tlb_one(va); | |
251 | else | |
252 | leave_mm(smp_processor_id()); | |
253 | } | |
254 | ||
255 | if (!cpus_empty(cpu_mask)) | |
256 | flush_tlb_others(cpu_mask, mm, va); | |
257 | ||
258 | preempt_enable(); | |
259 | } | |
260 | ||
261 | static void do_flush_tlb_all(void *info) | |
262 | { | |
263 | unsigned long cpu = smp_processor_id(); | |
264 | ||
265 | __flush_tlb_all(); | |
266 | if (read_pda(mmu_state) == TLBSTATE_LAZY) | |
267 | leave_mm(cpu); | |
268 | } | |
269 | ||
270 | void flush_tlb_all(void) | |
271 | { | |
272 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | |
273 | } |