]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * x86 SMP booting functions | |
3 | * | |
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | |
5 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Much of the core SMP work is based on previous work by Thomas Radke, to | |
8 | * whom a great many thanks are extended. | |
9 | * | |
10 | * Thanks to Intel for making available several different Pentium, | |
11 | * Pentium Pro and Pentium-II/Xeon MP machines. | |
12 | * Original development of Linux SMP code supported by Caldera. | |
13 | * | |
14 | * This code is released under the GNU General Public License version 2 or | |
15 | * later. | |
16 | * | |
17 | * Fixes | |
18 | * Felix Koop : NR_CPUS used properly | |
19 | * Jose Renau : Handle single CPU case. | |
20 | * Alan Cox : By repeated request 8) - Total BogoMIPS report. | |
21 | * Greg Wright : Fix for kernel stacks panic. | |
22 | * Erich Boleyn : MP v1.4 and additional changes. | |
23 | * Matthias Sattler : Changes for 2.1 kernel map. | |
24 | * Michel Lespinasse : Changes for 2.1 kernel map. | |
25 | * Michael Chastain : Change trampoline.S to gnu as. | |
26 | * Alan Cox : Dumb bug: 'B' step PPro's are fine | |
27 | * Ingo Molnar : Added APIC timers, based on code | |
28 | * from Jose Renau | |
29 | * Ingo Molnar : various cleanups and rewrites | |
30 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. | |
31 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs | |
32 | * Martin J. Bligh : Added support for multi-quad systems | |
33 | * Dave Jones : Report invalid combinations of Athlon CPUs. | |
34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. */ | |
35 | ||
36 | #include <linux/module.h> | |
1da177e4 LT |
37 | #include <linux/init.h> |
38 | #include <linux/kernel.h> | |
39 | ||
40 | #include <linux/mm.h> | |
41 | #include <linux/sched.h> | |
42 | #include <linux/kernel_stat.h> | |
43 | #include <linux/smp_lock.h> | |
1da177e4 | 44 | #include <linux/bootmem.h> |
f3705136 ZM |
45 | #include <linux/notifier.h> |
46 | #include <linux/cpu.h> | |
47 | #include <linux/percpu.h> | |
d04f41e3 | 48 | #include <linux/nmi.h> |
1da177e4 LT |
49 | |
50 | #include <linux/delay.h> | |
51 | #include <linux/mc146818rtc.h> | |
52 | #include <asm/tlbflush.h> | |
53 | #include <asm/desc.h> | |
54 | #include <asm/arch_hooks.h> | |
3e4ff115 | 55 | #include <asm/nmi.h> |
1da177e4 LT |
56 | |
57 | #include <mach_apic.h> | |
58 | #include <mach_wakecpu.h> | |
59 | #include <smpboot_hooks.h> | |
7ce0bcfd | 60 | #include <asm/vmi.h> |
1da177e4 LT |
61 | |
62 | /* Set if we find a B stepping CPU */ | |
0bb3184d | 63 | static int __devinitdata smp_b_stepping; |
1da177e4 LT |
64 | |
65 | /* Number of siblings per CPU package */ | |
66 | int smp_num_siblings = 1; | |
129f6946 | 67 | EXPORT_SYMBOL(smp_num_siblings); |
d720803a | 68 | |
1e9f28fa SS |
69 | /* Last level cache ID of each logical CPU */ |
70 | int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; | |
71 | ||
94605eff | 72 | /* representing HT siblings of each logical CPU */ |
6c036527 | 73 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; |
d720803a LS |
74 | EXPORT_SYMBOL(cpu_sibling_map); |
75 | ||
94605eff | 76 | /* representing HT and core siblings of each logical CPU */ |
6c036527 | 77 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly; |
d720803a LS |
78 | EXPORT_SYMBOL(cpu_core_map); |
79 | ||
1da177e4 | 80 | /* bitmap of online cpus */ |
6c036527 | 81 | cpumask_t cpu_online_map __read_mostly; |
129f6946 | 82 | EXPORT_SYMBOL(cpu_online_map); |
1da177e4 LT |
83 | |
84 | cpumask_t cpu_callin_map; | |
85 | cpumask_t cpu_callout_map; | |
129f6946 | 86 | EXPORT_SYMBOL(cpu_callout_map); |
4ad8d383 ZM |
87 | cpumask_t cpu_possible_map; |
88 | EXPORT_SYMBOL(cpu_possible_map); | |
1da177e4 LT |
89 | static cpumask_t smp_commenced_mask; |
90 | ||
91 | /* Per CPU bogomips and other parameters */ | |
92 | struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; | |
129f6946 | 93 | EXPORT_SYMBOL(cpu_data); |
1da177e4 | 94 | |
6c036527 | 95 | u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = |
1da177e4 LT |
96 | { [0 ... NR_CPUS-1] = 0xff }; |
97 | EXPORT_SYMBOL(x86_cpu_to_apicid); | |
98 | ||
3b08606d | 99 | u8 apicid_2_node[MAX_APICID]; |
100 | ||
7c3576d2 JF |
101 | DEFINE_PER_CPU(unsigned long, this_cpu_off); |
102 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); | |
103 | ||
1da177e4 LT |
104 | /* |
105 | * Trampoline 80x86 program as an array. | |
106 | */ | |
107 | ||
108 | extern unsigned char trampoline_data []; | |
109 | extern unsigned char trampoline_end []; | |
110 | static unsigned char *trampoline_base; | |
111 | static int trampoline_exec; | |
112 | ||
113 | static void map_cpu_to_logical_apicid(void); | |
114 | ||
f3705136 ZM |
115 | /* State of each CPU. */ |
116 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
117 | ||
1da177e4 LT |
118 | /* |
119 | * Currently trivial. Write the real->protected mode | |
120 | * bootstrap into the page concerned. The caller | |
121 | * has made sure it's suitably aligned. | |
122 | */ | |
123 | ||
0bb3184d | 124 | static unsigned long __devinit setup_trampoline(void) |
1da177e4 LT |
125 | { |
126 | memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); | |
127 | return virt_to_phys(trampoline_base); | |
128 | } | |
129 | ||
130 | /* | |
131 | * We are called very early to get the low memory for the | |
132 | * SMP bootup trampoline page. | |
133 | */ | |
134 | void __init smp_alloc_memory(void) | |
135 | { | |
136 | trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE); | |
137 | /* | |
138 | * Has to be in very low memory so we can execute | |
139 | * real-mode AP code. | |
140 | */ | |
141 | if (__pa(trampoline_base) >= 0x9F000) | |
142 | BUG(); | |
143 | /* | |
144 | * Make the SMP trampoline executable: | |
145 | */ | |
146 | trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1); | |
147 | } | |
148 | ||
149 | /* | |
150 | * The bootstrap kernel entry code has set these up. Save them for | |
151 | * a given CPU | |
152 | */ | |
153 | ||
4a5d107a | 154 | static void __cpuinit smp_store_cpu_info(int id) |
1da177e4 LT |
155 | { |
156 | struct cpuinfo_x86 *c = cpu_data + id; | |
157 | ||
158 | *c = boot_cpu_data; | |
159 | if (id!=0) | |
a6c4e076 | 160 | identify_secondary_cpu(c); |
1da177e4 LT |
161 | /* |
162 | * Mask B, Pentium, but not Pentium MMX | |
163 | */ | |
164 | if (c->x86_vendor == X86_VENDOR_INTEL && | |
165 | c->x86 == 5 && | |
166 | c->x86_mask >= 1 && c->x86_mask <= 4 && | |
167 | c->x86_model <= 3) | |
168 | /* | |
169 | * Remember we have B step Pentia with bugs | |
170 | */ | |
171 | smp_b_stepping = 1; | |
172 | ||
173 | /* | |
174 | * Certain Athlons might work (for various values of 'work') in SMP | |
175 | * but they are not certified as MP capable. | |
176 | */ | |
177 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { | |
178 | ||
3ca113ea DJ |
179 | if (num_possible_cpus() == 1) |
180 | goto valid_k7; | |
181 | ||
1da177e4 LT |
182 | /* Athlon 660/661 is valid. */ |
183 | if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1))) | |
184 | goto valid_k7; | |
185 | ||
186 | /* Duron 670 is valid */ | |
187 | if ((c->x86_model==7) && (c->x86_mask==0)) | |
188 | goto valid_k7; | |
189 | ||
190 | /* | |
191 | * Athlon 662, Duron 671, and Athlon >model 7 have capability bit. | |
192 | * It's worth noting that the A5 stepping (662) of some Athlon XP's | |
193 | * have the MP bit set. | |
194 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more. | |
195 | */ | |
196 | if (((c->x86_model==6) && (c->x86_mask>=2)) || | |
197 | ((c->x86_model==7) && (c->x86_mask>=1)) || | |
198 | (c->x86_model> 7)) | |
199 | if (cpu_has_mp) | |
200 | goto valid_k7; | |
201 | ||
202 | /* If we get here, it's not a certified SMP capable AMD system. */ | |
9f158333 | 203 | add_taint(TAINT_UNSAFE_SMP); |
1da177e4 LT |
204 | } |
205 | ||
206 | valid_k7: | |
207 | ; | |
208 | } | |
209 | ||
1da177e4 LT |
210 | extern void calibrate_delay(void); |
211 | ||
212 | static atomic_t init_deasserted; | |
213 | ||
4a5d107a | 214 | static void __cpuinit smp_callin(void) |
1da177e4 LT |
215 | { |
216 | int cpuid, phys_id; | |
217 | unsigned long timeout; | |
218 | ||
219 | /* | |
220 | * If waken up by an INIT in an 82489DX configuration | |
221 | * we may get here before an INIT-deassert IPI reaches | |
222 | * our local APIC. We have to wait for the IPI or we'll | |
223 | * lock up on an APIC access. | |
224 | */ | |
225 | wait_for_init_deassert(&init_deasserted); | |
226 | ||
227 | /* | |
228 | * (This works even if the APIC is not enabled.) | |
229 | */ | |
230 | phys_id = GET_APIC_ID(apic_read(APIC_ID)); | |
231 | cpuid = smp_processor_id(); | |
232 | if (cpu_isset(cpuid, cpu_callin_map)) { | |
233 | printk("huh, phys CPU#%d, CPU#%d already present??\n", | |
234 | phys_id, cpuid); | |
235 | BUG(); | |
236 | } | |
237 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); | |
238 | ||
239 | /* | |
240 | * STARTUP IPIs are fragile beasts as they might sometimes | |
241 | * trigger some glue motherboard logic. Complete APIC bus | |
242 | * silence for 1 second, this overestimates the time the | |
243 | * boot CPU is spending to send the up to 2 STARTUP IPIs | |
244 | * by a factor of two. This should be enough. | |
245 | */ | |
246 | ||
247 | /* | |
248 | * Waiting 2s total for startup (udelay is not yet working) | |
249 | */ | |
250 | timeout = jiffies + 2*HZ; | |
251 | while (time_before(jiffies, timeout)) { | |
252 | /* | |
253 | * Has the boot CPU finished it's STARTUP sequence? | |
254 | */ | |
255 | if (cpu_isset(cpuid, cpu_callout_map)) | |
256 | break; | |
257 | rep_nop(); | |
258 | } | |
259 | ||
260 | if (!time_before(jiffies, timeout)) { | |
261 | printk("BUG: CPU%d started up but did not get a callout!\n", | |
262 | cpuid); | |
263 | BUG(); | |
264 | } | |
265 | ||
266 | /* | |
267 | * the boot CPU has finished the init stage and is spinning | |
268 | * on callin_map until we finish. We are free to set up this | |
269 | * CPU, first the APIC. (this is probably redundant on most | |
270 | * boards) | |
271 | */ | |
272 | ||
273 | Dprintk("CALLIN, before setup_local_APIC().\n"); | |
274 | smp_callin_clear_local_apic(); | |
275 | setup_local_APIC(); | |
276 | map_cpu_to_logical_apicid(); | |
277 | ||
278 | /* | |
279 | * Get our bogomips. | |
280 | */ | |
281 | calibrate_delay(); | |
282 | Dprintk("Stack at about %p\n",&cpuid); | |
283 | ||
284 | /* | |
285 | * Save our processor parameters | |
286 | */ | |
e9e2cdb4 | 287 | smp_store_cpu_info(cpuid); |
1da177e4 LT |
288 | |
289 | /* | |
290 | * Allow the master to continue. | |
291 | */ | |
292 | cpu_set(cpuid, cpu_callin_map); | |
1da177e4 LT |
293 | } |
294 | ||
295 | static int cpucount; | |
296 | ||
1e9f28fa SS |
297 | /* maps the cpu to the sched domain representing multi-core */ |
298 | cpumask_t cpu_coregroup_map(int cpu) | |
299 | { | |
300 | struct cpuinfo_x86 *c = cpu_data + cpu; | |
301 | /* | |
302 | * For perf, we return last level cache shared map. | |
5c45bf27 | 303 | * And for power savings, we return cpu_core_map |
1e9f28fa | 304 | */ |
5c45bf27 SS |
305 | if (sched_mc_power_savings || sched_smt_power_savings) |
306 | return cpu_core_map[cpu]; | |
307 | else | |
308 | return c->llc_shared_map; | |
1e9f28fa SS |
309 | } |
310 | ||
94605eff SS |
311 | /* representing cpus for which sibling maps can be computed */ |
312 | static cpumask_t cpu_sibling_setup_map; | |
313 | ||
d720803a LS |
314 | static inline void |
315 | set_cpu_sibling_map(int cpu) | |
316 | { | |
317 | int i; | |
94605eff SS |
318 | struct cpuinfo_x86 *c = cpu_data; |
319 | ||
320 | cpu_set(cpu, cpu_sibling_setup_map); | |
d720803a LS |
321 | |
322 | if (smp_num_siblings > 1) { | |
94605eff | 323 | for_each_cpu_mask(i, cpu_sibling_setup_map) { |
4b89aff9 RS |
324 | if (c[cpu].phys_proc_id == c[i].phys_proc_id && |
325 | c[cpu].cpu_core_id == c[i].cpu_core_id) { | |
d720803a LS |
326 | cpu_set(i, cpu_sibling_map[cpu]); |
327 | cpu_set(cpu, cpu_sibling_map[i]); | |
94605eff SS |
328 | cpu_set(i, cpu_core_map[cpu]); |
329 | cpu_set(cpu, cpu_core_map[i]); | |
1e9f28fa SS |
330 | cpu_set(i, c[cpu].llc_shared_map); |
331 | cpu_set(cpu, c[i].llc_shared_map); | |
d720803a LS |
332 | } |
333 | } | |
334 | } else { | |
335 | cpu_set(cpu, cpu_sibling_map[cpu]); | |
336 | } | |
337 | ||
1e9f28fa SS |
338 | cpu_set(cpu, c[cpu].llc_shared_map); |
339 | ||
94605eff | 340 | if (current_cpu_data.x86_max_cores == 1) { |
d720803a | 341 | cpu_core_map[cpu] = cpu_sibling_map[cpu]; |
94605eff SS |
342 | c[cpu].booted_cores = 1; |
343 | return; | |
344 | } | |
345 | ||
346 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | |
1e9f28fa SS |
347 | if (cpu_llc_id[cpu] != BAD_APICID && |
348 | cpu_llc_id[cpu] == cpu_llc_id[i]) { | |
349 | cpu_set(i, c[cpu].llc_shared_map); | |
350 | cpu_set(cpu, c[i].llc_shared_map); | |
351 | } | |
4b89aff9 | 352 | if (c[cpu].phys_proc_id == c[i].phys_proc_id) { |
94605eff SS |
353 | cpu_set(i, cpu_core_map[cpu]); |
354 | cpu_set(cpu, cpu_core_map[i]); | |
355 | /* | |
356 | * Does this new cpu bringup a new core? | |
357 | */ | |
358 | if (cpus_weight(cpu_sibling_map[cpu]) == 1) { | |
359 | /* | |
360 | * for each core in package, increment | |
361 | * the booted_cores for this new cpu | |
362 | */ | |
363 | if (first_cpu(cpu_sibling_map[i]) == i) | |
364 | c[cpu].booted_cores++; | |
365 | /* | |
366 | * increment the core count for all | |
367 | * the other cpus in this package | |
368 | */ | |
369 | if (i != cpu) | |
370 | c[i].booted_cores++; | |
371 | } else if (i != cpu && !c[cpu].booted_cores) | |
372 | c[cpu].booted_cores = c[i].booted_cores; | |
373 | } | |
d720803a LS |
374 | } |
375 | } | |
376 | ||
1da177e4 LT |
377 | /* |
378 | * Activate a secondary processor. | |
379 | */ | |
4a5d107a | 380 | static void __cpuinit start_secondary(void *unused) |
1da177e4 LT |
381 | { |
382 | /* | |
d2cbcc49 RR |
383 | * Don't put *anything* before cpu_init(), SMP booting is too |
384 | * fragile that we want to limit the things done here to the | |
385 | * most necessary things. | |
1da177e4 | 386 | */ |
7ce0bcfd ZA |
387 | #ifdef CONFIG_VMI |
388 | vmi_bringup(); | |
389 | #endif | |
d2cbcc49 | 390 | cpu_init(); |
5bfb5d69 | 391 | preempt_disable(); |
1da177e4 LT |
392 | smp_callin(); |
393 | while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) | |
394 | rep_nop(); | |
95492e46 IM |
395 | /* |
396 | * Check TSC synchronization with the BP: | |
397 | */ | |
398 | check_tsc_sync_target(); | |
399 | ||
bbab4f3b | 400 | setup_secondary_clock(); |
1da177e4 LT |
401 | if (nmi_watchdog == NMI_IO_APIC) { |
402 | disable_8259A_irq(0); | |
403 | enable_NMI_through_LVT0(NULL); | |
404 | enable_8259A_irq(0); | |
405 | } | |
1da177e4 LT |
406 | /* |
407 | * low-memory mappings have been cleared, flush them from | |
408 | * the local TLBs too. | |
409 | */ | |
410 | local_flush_tlb(); | |
6fe940d6 | 411 | |
d720803a LS |
412 | /* This must be done before setting cpu_online_map */ |
413 | set_cpu_sibling_map(raw_smp_processor_id()); | |
414 | wmb(); | |
415 | ||
6fe940d6 LS |
416 | /* |
417 | * We need to hold call_lock, so there is no inconsistency | |
418 | * between the time smp_call_function() determines number of | |
419 | * IPI receipients, and the time when the determination is made | |
420 | * for which cpus receive the IPI. Holding this | |
421 | * lock helps us to not include this cpu in a currently in progress | |
422 | * smp_call_function(). | |
423 | */ | |
424 | lock_ipi_call_lock(); | |
1da177e4 | 425 | cpu_set(smp_processor_id(), cpu_online_map); |
6fe940d6 | 426 | unlock_ipi_call_lock(); |
e1367daf | 427 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
1da177e4 LT |
428 | |
429 | /* We can take interrupts now: we're officially "up". */ | |
430 | local_irq_enable(); | |
431 | ||
432 | wmb(); | |
433 | cpu_idle(); | |
434 | } | |
435 | ||
436 | /* | |
437 | * Everything has been set up for the secondary | |
438 | * CPUs - they just need to reload everything | |
439 | * from the task structure | |
440 | * This function must not return. | |
441 | */ | |
0bb3184d | 442 | void __devinit initialize_secondary(void) |
1da177e4 LT |
443 | { |
444 | /* | |
445 | * We don't actually need to load the full TSS, | |
446 | * basically just the stack pointer and the eip. | |
447 | */ | |
448 | ||
449 | asm volatile( | |
450 | "movl %0,%%esp\n\t" | |
451 | "jmp *%1" | |
452 | : | |
62111195 | 453 | :"m" (current->thread.esp),"m" (current->thread.eip)); |
1da177e4 LT |
454 | } |
455 | ||
62111195 | 456 | /* Static state in head.S used to set up a CPU */ |
1da177e4 LT |
457 | extern struct { |
458 | void * esp; | |
459 | unsigned short ss; | |
460 | } stack_start; | |
461 | ||
462 | #ifdef CONFIG_NUMA | |
463 | ||
464 | /* which logical CPUs are on which nodes */ | |
6c036527 | 465 | cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly = |
1da177e4 | 466 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; |
a406c366 | 467 | EXPORT_SYMBOL(node_2_cpu_mask); |
1da177e4 | 468 | /* which node each logical CPU is on */ |
6c036527 | 469 | int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; |
1da177e4 LT |
470 | EXPORT_SYMBOL(cpu_2_node); |
471 | ||
472 | /* set up a mapping between cpu and node. */ | |
473 | static inline void map_cpu_to_node(int cpu, int node) | |
474 | { | |
475 | printk("Mapping cpu %d to node %d\n", cpu, node); | |
476 | cpu_set(cpu, node_2_cpu_mask[node]); | |
477 | cpu_2_node[cpu] = node; | |
478 | } | |
479 | ||
480 | /* undo a mapping between cpu and node. */ | |
481 | static inline void unmap_cpu_to_node(int cpu) | |
482 | { | |
483 | int node; | |
484 | ||
485 | printk("Unmapping cpu %d from all nodes\n", cpu); | |
486 | for (node = 0; node < MAX_NUMNODES; node ++) | |
487 | cpu_clear(cpu, node_2_cpu_mask[node]); | |
488 | cpu_2_node[cpu] = 0; | |
489 | } | |
490 | #else /* !CONFIG_NUMA */ | |
491 | ||
492 | #define map_cpu_to_node(cpu, node) ({}) | |
493 | #define unmap_cpu_to_node(cpu) ({}) | |
494 | ||
495 | #endif /* CONFIG_NUMA */ | |
496 | ||
6c036527 | 497 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; |
1da177e4 LT |
498 | |
499 | static void map_cpu_to_logical_apicid(void) | |
500 | { | |
501 | int cpu = smp_processor_id(); | |
502 | int apicid = logical_smp_processor_id(); | |
78b656b8 | 503 | int node = apicid_to_node(apicid); |
bfa0e9a0 | 504 | |
505 | if (!node_online(node)) | |
506 | node = first_online_node; | |
1da177e4 LT |
507 | |
508 | cpu_2_logical_apicid[cpu] = apicid; | |
bfa0e9a0 | 509 | map_cpu_to_node(cpu, node); |
1da177e4 LT |
510 | } |
511 | ||
512 | static void unmap_cpu_to_logical_apicid(int cpu) | |
513 | { | |
514 | cpu_2_logical_apicid[cpu] = BAD_APICID; | |
515 | unmap_cpu_to_node(cpu); | |
516 | } | |
517 | ||
518 | #if APIC_DEBUG | |
519 | static inline void __inquire_remote_apic(int apicid) | |
520 | { | |
521 | int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | |
522 | char *names[] = { "ID", "VERSION", "SPIV" }; | |
523 | int timeout, status; | |
524 | ||
525 | printk("Inquiring remote APIC #%d...\n", apicid); | |
526 | ||
38e548ee | 527 | for (i = 0; i < ARRAY_SIZE(regs); i++) { |
1da177e4 LT |
528 | printk("... APIC #%d %s: ", apicid, names[i]); |
529 | ||
530 | /* | |
531 | * Wait for idle. | |
532 | */ | |
533 | apic_wait_icr_idle(); | |
534 | ||
535 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); | |
536 | apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); | |
537 | ||
538 | timeout = 0; | |
539 | do { | |
540 | udelay(100); | |
541 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | |
542 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | |
543 | ||
544 | switch (status) { | |
545 | case APIC_ICR_RR_VALID: | |
546 | status = apic_read(APIC_RRR); | |
547 | printk("%08x\n", status); | |
548 | break; | |
549 | default: | |
550 | printk("failed\n"); | |
551 | } | |
552 | } | |
553 | } | |
554 | #endif | |
555 | ||
556 | #ifdef WAKE_SECONDARY_VIA_NMI | |
557 | /* | |
558 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal | |
559 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | |
560 | * won't ... remember to clear down the APIC, etc later. | |
561 | */ | |
0bb3184d | 562 | static int __devinit |
1da177e4 LT |
563 | wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) |
564 | { | |
565 | unsigned long send_status = 0, accept_status = 0; | |
566 | int timeout, maxlvt; | |
567 | ||
568 | /* Target chip */ | |
569 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); | |
570 | ||
571 | /* Boot on the stack */ | |
572 | /* Kick the second */ | |
573 | apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); | |
574 | ||
575 | Dprintk("Waiting for send to finish...\n"); | |
576 | timeout = 0; | |
577 | do { | |
578 | Dprintk("+"); | |
579 | udelay(100); | |
580 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; | |
581 | } while (send_status && (timeout++ < 1000)); | |
582 | ||
583 | /* | |
584 | * Give the other CPU some time to accept the IPI. | |
585 | */ | |
586 | udelay(200); | |
587 | /* | |
588 | * Due to the Pentium erratum 3AP. | |
589 | */ | |
e05d723f | 590 | maxlvt = lapic_get_maxlvt(); |
1da177e4 LT |
591 | if (maxlvt > 3) { |
592 | apic_read_around(APIC_SPIV); | |
593 | apic_write(APIC_ESR, 0); | |
594 | } | |
595 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
596 | Dprintk("NMI sent.\n"); | |
597 | ||
598 | if (send_status) | |
599 | printk("APIC never delivered???\n"); | |
600 | if (accept_status) | |
601 | printk("APIC delivery error (%lx).\n", accept_status); | |
602 | ||
603 | return (send_status | accept_status); | |
604 | } | |
605 | #endif /* WAKE_SECONDARY_VIA_NMI */ | |
606 | ||
607 | #ifdef WAKE_SECONDARY_VIA_INIT | |
0bb3184d | 608 | static int __devinit |
1da177e4 LT |
609 | wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) |
610 | { | |
611 | unsigned long send_status = 0, accept_status = 0; | |
612 | int maxlvt, timeout, num_starts, j; | |
613 | ||
614 | /* | |
615 | * Be paranoid about clearing APIC errors. | |
616 | */ | |
617 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { | |
618 | apic_read_around(APIC_SPIV); | |
619 | apic_write(APIC_ESR, 0); | |
620 | apic_read(APIC_ESR); | |
621 | } | |
622 | ||
623 | Dprintk("Asserting INIT.\n"); | |
624 | ||
625 | /* | |
626 | * Turn INIT on target chip | |
627 | */ | |
628 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
629 | ||
630 | /* | |
631 | * Send IPI | |
632 | */ | |
633 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT | |
634 | | APIC_DM_INIT); | |
635 | ||
636 | Dprintk("Waiting for send to finish...\n"); | |
637 | timeout = 0; | |
638 | do { | |
639 | Dprintk("+"); | |
640 | udelay(100); | |
641 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; | |
642 | } while (send_status && (timeout++ < 1000)); | |
643 | ||
644 | mdelay(10); | |
645 | ||
646 | Dprintk("Deasserting INIT.\n"); | |
647 | ||
648 | /* Target chip */ | |
649 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
650 | ||
651 | /* Send IPI */ | |
652 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); | |
653 | ||
654 | Dprintk("Waiting for send to finish...\n"); | |
655 | timeout = 0; | |
656 | do { | |
657 | Dprintk("+"); | |
658 | udelay(100); | |
659 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; | |
660 | } while (send_status && (timeout++ < 1000)); | |
661 | ||
662 | atomic_set(&init_deasserted, 1); | |
663 | ||
664 | /* | |
665 | * Should we send STARTUP IPIs ? | |
666 | * | |
667 | * Determine this based on the APIC version. | |
668 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. | |
669 | */ | |
670 | if (APIC_INTEGRATED(apic_version[phys_apicid])) | |
671 | num_starts = 2; | |
672 | else | |
673 | num_starts = 0; | |
674 | ||
ae5da273 ZA |
675 | /* |
676 | * Paravirt / VMI wants a startup IPI hook here to set up the | |
677 | * target processor state. | |
678 | */ | |
679 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, | |
680 | (unsigned long) stack_start.esp); | |
681 | ||
1da177e4 LT |
682 | /* |
683 | * Run STARTUP IPI loop. | |
684 | */ | |
685 | Dprintk("#startup loops: %d.\n", num_starts); | |
686 | ||
e05d723f | 687 | maxlvt = lapic_get_maxlvt(); |
1da177e4 LT |
688 | |
689 | for (j = 1; j <= num_starts; j++) { | |
690 | Dprintk("Sending STARTUP #%d.\n",j); | |
691 | apic_read_around(APIC_SPIV); | |
692 | apic_write(APIC_ESR, 0); | |
693 | apic_read(APIC_ESR); | |
694 | Dprintk("After apic_write.\n"); | |
695 | ||
696 | /* | |
697 | * STARTUP IPI | |
698 | */ | |
699 | ||
700 | /* Target chip */ | |
701 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
702 | ||
703 | /* Boot on the stack */ | |
704 | /* Kick the second */ | |
705 | apic_write_around(APIC_ICR, APIC_DM_STARTUP | |
706 | | (start_eip >> 12)); | |
707 | ||
708 | /* | |
709 | * Give the other CPU some time to accept the IPI. | |
710 | */ | |
711 | udelay(300); | |
712 | ||
713 | Dprintk("Startup point 1.\n"); | |
714 | ||
715 | Dprintk("Waiting for send to finish...\n"); | |
716 | timeout = 0; | |
717 | do { | |
718 | Dprintk("+"); | |
719 | udelay(100); | |
720 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; | |
721 | } while (send_status && (timeout++ < 1000)); | |
722 | ||
723 | /* | |
724 | * Give the other CPU some time to accept the IPI. | |
725 | */ | |
726 | udelay(200); | |
727 | /* | |
728 | * Due to the Pentium erratum 3AP. | |
729 | */ | |
730 | if (maxlvt > 3) { | |
731 | apic_read_around(APIC_SPIV); | |
732 | apic_write(APIC_ESR, 0); | |
733 | } | |
734 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
735 | if (send_status || accept_status) | |
736 | break; | |
737 | } | |
738 | Dprintk("After Startup.\n"); | |
739 | ||
740 | if (send_status) | |
741 | printk("APIC never delivered???\n"); | |
742 | if (accept_status) | |
743 | printk("APIC delivery error (%lx).\n", accept_status); | |
744 | ||
745 | return (send_status | accept_status); | |
746 | } | |
747 | #endif /* WAKE_SECONDARY_VIA_INIT */ | |
748 | ||
749 | extern cpumask_t cpu_initialized; | |
e1367daf LS |
750 | static inline int alloc_cpu_id(void) |
751 | { | |
752 | cpumask_t tmp_map; | |
753 | int cpu; | |
754 | cpus_complement(tmp_map, cpu_present_map); | |
755 | cpu = first_cpu(tmp_map); | |
756 | if (cpu >= NR_CPUS) | |
757 | return -ENODEV; | |
758 | return cpu; | |
759 | } | |
760 | ||
761 | #ifdef CONFIG_HOTPLUG_CPU | |
762 | static struct task_struct * __devinitdata cpu_idle_tasks[NR_CPUS]; | |
763 | static inline struct task_struct * alloc_idle_task(int cpu) | |
764 | { | |
765 | struct task_struct *idle; | |
766 | ||
767 | if ((idle = cpu_idle_tasks[cpu]) != NULL) { | |
768 | /* initialize thread_struct. we really want to avoid destroy | |
769 | * idle tread | |
770 | */ | |
07b047fc | 771 | idle->thread.esp = (unsigned long)task_pt_regs(idle); |
e1367daf LS |
772 | init_idle(idle, cpu); |
773 | return idle; | |
774 | } | |
775 | idle = fork_idle(cpu); | |
776 | ||
777 | if (!IS_ERR(idle)) | |
778 | cpu_idle_tasks[cpu] = idle; | |
779 | return idle; | |
780 | } | |
781 | #else | |
782 | #define alloc_idle_task(cpu) fork_idle(cpu) | |
783 | #endif | |
1da177e4 | 784 | |
bf504672 RR |
785 | /* Initialize the CPU's GDT. This is either the boot CPU doing itself |
786 | (still using the master per-cpu area), or a CPU doing it for a | |
787 | secondary which will soon come up. */ | |
7c3576d2 | 788 | static __cpuinit void init_gdt(int cpu) |
bf504672 | 789 | { |
4fbb5968 | 790 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); |
bf504672 | 791 | |
7c3576d2 JF |
792 | pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a, |
793 | (u32 *)&gdt[GDT_ENTRY_PERCPU].b, | |
794 | __per_cpu_offset[cpu], 0xFFFFF, | |
795 | 0x80 | DESCTYPE_S | 0x2, 0x8); | |
bf504672 | 796 | |
7c3576d2 JF |
797 | per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; |
798 | per_cpu(cpu_number, cpu) = cpu; | |
bf504672 RR |
799 | } |
800 | ||
801 | /* Defined in head.S */ | |
802 | extern struct Xgt_desc_struct early_gdt_descr; | |
803 | ||
4a5d107a | 804 | static int __cpuinit do_boot_cpu(int apicid, int cpu) |
1da177e4 LT |
805 | /* |
806 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | |
807 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | |
808 | * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. | |
809 | */ | |
810 | { | |
811 | struct task_struct *idle; | |
812 | unsigned long boot_error; | |
e1367daf | 813 | int timeout; |
1da177e4 LT |
814 | unsigned long start_eip; |
815 | unsigned short nmi_high = 0, nmi_low = 0; | |
816 | ||
1da177e4 LT |
817 | /* |
818 | * We can't use kernel_thread since we must avoid to | |
819 | * reschedule the child. | |
820 | */ | |
e1367daf | 821 | idle = alloc_idle_task(cpu); |
1da177e4 LT |
822 | if (IS_ERR(idle)) |
823 | panic("failed fork for CPU %d", cpu); | |
62111195 | 824 | |
7c3576d2 JF |
825 | init_gdt(cpu); |
826 | per_cpu(current_task, cpu) = idle; | |
bf504672 | 827 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
62111195 | 828 | |
1da177e4 LT |
829 | idle->thread.eip = (unsigned long) start_secondary; |
830 | /* start_eip had better be page-aligned! */ | |
831 | start_eip = setup_trampoline(); | |
832 | ||
62111195 JF |
833 | ++cpucount; |
834 | alternatives_smp_switch(1); | |
835 | ||
1da177e4 LT |
836 | /* So we see what's up */ |
837 | printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip); | |
838 | /* Stack for startup_32 can be just as for start_secondary onwards */ | |
839 | stack_start.esp = (void *) idle->thread.esp; | |
840 | ||
841 | irq_ctx_init(cpu); | |
842 | ||
3b08606d | 843 | x86_cpu_to_apicid[cpu] = apicid; |
1da177e4 LT |
844 | /* |
845 | * This grunge runs the startup process for | |
846 | * the targeted processor. | |
847 | */ | |
848 | ||
849 | atomic_set(&init_deasserted, 0); | |
850 | ||
851 | Dprintk("Setting warm reset code and vector.\n"); | |
852 | ||
853 | store_NMI_vector(&nmi_high, &nmi_low); | |
854 | ||
855 | smpboot_setup_warm_reset_vector(start_eip); | |
856 | ||
857 | /* | |
858 | * Starting actual IPI sequence... | |
859 | */ | |
860 | boot_error = wakeup_secondary_cpu(apicid, start_eip); | |
861 | ||
862 | if (!boot_error) { | |
863 | /* | |
864 | * allow APs to start initializing. | |
865 | */ | |
866 | Dprintk("Before Callout %d.\n", cpu); | |
867 | cpu_set(cpu, cpu_callout_map); | |
868 | Dprintk("After Callout %d.\n", cpu); | |
869 | ||
870 | /* | |
871 | * Wait 5s total for a response | |
872 | */ | |
873 | for (timeout = 0; timeout < 50000; timeout++) { | |
874 | if (cpu_isset(cpu, cpu_callin_map)) | |
875 | break; /* It has booted */ | |
876 | udelay(100); | |
877 | } | |
878 | ||
879 | if (cpu_isset(cpu, cpu_callin_map)) { | |
880 | /* number CPUs logically, starting from 1 (BSP is 0) */ | |
881 | Dprintk("OK.\n"); | |
882 | printk("CPU%d: ", cpu); | |
883 | print_cpu_info(&cpu_data[cpu]); | |
884 | Dprintk("CPU has booted.\n"); | |
885 | } else { | |
886 | boot_error= 1; | |
887 | if (*((volatile unsigned char *)trampoline_base) | |
888 | == 0xA5) | |
889 | /* trampoline started but...? */ | |
890 | printk("Stuck ??\n"); | |
891 | else | |
892 | /* trampoline code not run */ | |
893 | printk("Not responding.\n"); | |
894 | inquire_remote_apic(apicid); | |
895 | } | |
896 | } | |
e1367daf | 897 | |
1da177e4 LT |
898 | if (boot_error) { |
899 | /* Try to put things back the way they were before ... */ | |
900 | unmap_cpu_to_logical_apicid(cpu); | |
901 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ | |
902 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ | |
903 | cpucount--; | |
e1367daf LS |
904 | } else { |
905 | x86_cpu_to_apicid[cpu] = apicid; | |
906 | cpu_set(cpu, cpu_present_map); | |
1da177e4 LT |
907 | } |
908 | ||
909 | /* mark "stuck" area as not stuck */ | |
910 | *((volatile unsigned long *)trampoline_base) = 0; | |
911 | ||
912 | return boot_error; | |
913 | } | |
914 | ||
e1367daf LS |
915 | #ifdef CONFIG_HOTPLUG_CPU |
916 | void cpu_exit_clear(void) | |
917 | { | |
918 | int cpu = raw_smp_processor_id(); | |
919 | ||
920 | idle_task_exit(); | |
921 | ||
922 | cpucount --; | |
923 | cpu_uninit(); | |
924 | irq_ctx_exit(cpu); | |
925 | ||
926 | cpu_clear(cpu, cpu_callout_map); | |
927 | cpu_clear(cpu, cpu_callin_map); | |
e1367daf LS |
928 | |
929 | cpu_clear(cpu, smp_commenced_mask); | |
930 | unmap_cpu_to_logical_apicid(cpu); | |
931 | } | |
932 | ||
933 | struct warm_boot_cpu_info { | |
934 | struct completion *complete; | |
c4028958 | 935 | struct work_struct task; |
e1367daf LS |
936 | int apicid; |
937 | int cpu; | |
938 | }; | |
939 | ||
c4028958 | 940 | static void __cpuinit do_warm_boot_cpu(struct work_struct *work) |
e1367daf | 941 | { |
c4028958 DH |
942 | struct warm_boot_cpu_info *info = |
943 | container_of(work, struct warm_boot_cpu_info, task); | |
e1367daf LS |
944 | do_boot_cpu(info->apicid, info->cpu); |
945 | complete(info->complete); | |
946 | } | |
947 | ||
34f361ad | 948 | static int __cpuinit __smp_prepare_cpu(int cpu) |
e1367daf | 949 | { |
6e9a4738 | 950 | DECLARE_COMPLETION_ONSTACK(done); |
e1367daf | 951 | struct warm_boot_cpu_info info; |
e1367daf LS |
952 | int apicid, ret; |
953 | ||
e1367daf LS |
954 | apicid = x86_cpu_to_apicid[cpu]; |
955 | if (apicid == BAD_APICID) { | |
956 | ret = -ENODEV; | |
957 | goto exit; | |
958 | } | |
959 | ||
960 | info.complete = &done; | |
961 | info.apicid = apicid; | |
962 | info.cpu = cpu; | |
c4028958 | 963 | INIT_WORK(&info.task, do_warm_boot_cpu); |
e1367daf | 964 | |
e1367daf | 965 | /* init low mem mapping */ |
d7271b14 | 966 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, |
3b1bdf4e | 967 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); |
e1367daf | 968 | flush_tlb_all(); |
c4028958 | 969 | schedule_work(&info.task); |
e1367daf LS |
970 | wait_for_completion(&done); |
971 | ||
e1367daf LS |
972 | zap_low_mappings(); |
973 | ret = 0; | |
974 | exit: | |
e1367daf LS |
975 | return ret; |
976 | } | |
977 | #endif | |
978 | ||
d9408cef | 979 | static void smp_tune_scheduling(void) |
1da177e4 LT |
980 | { |
981 | unsigned long cachesize; /* kB */ | |
1da177e4 | 982 | |
d9408cef | 983 | if (cpu_khz) { |
1da177e4 | 984 | cachesize = boot_cpu_data.x86_cache_size; |
d9408cef AB |
985 | |
986 | if (cachesize > 0) | |
987 | max_cache_size = cachesize * 1024; | |
1da177e4 LT |
988 | } |
989 | } | |
990 | ||
991 | /* | |
992 | * Cycle through the processors sending APIC IPIs to boot each. | |
993 | */ | |
994 | ||
995 | static int boot_cpu_logical_apicid; | |
996 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ | |
997 | void *xquad_portio; | |
129f6946 AD |
998 | #ifdef CONFIG_X86_NUMAQ |
999 | EXPORT_SYMBOL(xquad_portio); | |
1000 | #endif | |
1da177e4 | 1001 | |
1da177e4 LT |
1002 | static void __init smp_boot_cpus(unsigned int max_cpus) |
1003 | { | |
1004 | int apicid, cpu, bit, kicked; | |
1005 | unsigned long bogosum = 0; | |
1006 | ||
1007 | /* | |
1008 | * Setup boot CPU information | |
1009 | */ | |
1010 | smp_store_cpu_info(0); /* Final full version of the data */ | |
1011 | printk("CPU%d: ", 0); | |
1012 | print_cpu_info(&cpu_data[0]); | |
1013 | ||
1e4c85f9 | 1014 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); |
1da177e4 LT |
1015 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
1016 | x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; | |
1017 | ||
1018 | current_thread_info()->cpu = 0; | |
1019 | smp_tune_scheduling(); | |
1da177e4 | 1020 | |
94605eff | 1021 | set_cpu_sibling_map(0); |
3dd9d514 | 1022 | |
1da177e4 LT |
1023 | /* |
1024 | * If we couldn't find an SMP configuration at boot time, | |
1025 | * get out of here now! | |
1026 | */ | |
1027 | if (!smp_found_config && !acpi_lapic) { | |
1028 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); | |
1e4c85f9 LT |
1029 | smpboot_clear_io_apic_irqs(); |
1030 | phys_cpu_present_map = physid_mask_of_physid(0); | |
1031 | if (APIC_init_uniprocessor()) | |
1032 | printk(KERN_NOTICE "Local APIC not detected." | |
1033 | " Using dummy APIC emulation.\n"); | |
1034 | map_cpu_to_logical_apicid(); | |
1035 | cpu_set(0, cpu_sibling_map[0]); | |
1036 | cpu_set(0, cpu_core_map[0]); | |
1037 | return; | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * Should not be necessary because the MP table should list the boot | |
1042 | * CPU too, but we do it for the sake of robustness anyway. | |
1043 | * Makes no sense to do this check in clustered apic mode, so skip it | |
1044 | */ | |
1045 | if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { | |
1046 | printk("weird, boot CPU (#%d) not listed by the BIOS.\n", | |
1047 | boot_cpu_physical_apicid); | |
1048 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | |
1049 | } | |
1050 | ||
1051 | /* | |
1052 | * If we couldn't find a local APIC, then get out of here now! | |
1053 | */ | |
1054 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) { | |
1055 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | |
1056 | boot_cpu_physical_apicid); | |
1057 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); | |
1058 | smpboot_clear_io_apic_irqs(); | |
1059 | phys_cpu_present_map = physid_mask_of_physid(0); | |
1060 | cpu_set(0, cpu_sibling_map[0]); | |
1061 | cpu_set(0, cpu_core_map[0]); | |
1da177e4 LT |
1062 | return; |
1063 | } | |
1064 | ||
1e4c85f9 LT |
1065 | verify_local_APIC(); |
1066 | ||
1da177e4 LT |
1067 | /* |
1068 | * If SMP should be disabled, then really disable it! | |
1069 | */ | |
1e4c85f9 LT |
1070 | if (!max_cpus) { |
1071 | smp_found_config = 0; | |
1072 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); | |
1073 | smpboot_clear_io_apic_irqs(); | |
1074 | phys_cpu_present_map = physid_mask_of_physid(0); | |
1075 | cpu_set(0, cpu_sibling_map[0]); | |
1076 | cpu_set(0, cpu_core_map[0]); | |
1da177e4 LT |
1077 | return; |
1078 | } | |
1079 | ||
1e4c85f9 LT |
1080 | connect_bsp_APIC(); |
1081 | setup_local_APIC(); | |
1082 | map_cpu_to_logical_apicid(); | |
1083 | ||
1084 | ||
1da177e4 LT |
1085 | setup_portio_remap(); |
1086 | ||
1087 | /* | |
1088 | * Scan the CPU present map and fire up the other CPUs via do_boot_cpu | |
1089 | * | |
1090 | * In clustered apic mode, phys_cpu_present_map is a constructed thus: | |
1091 | * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the | |
1092 | * clustered apic ID. | |
1093 | */ | |
1094 | Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map)); | |
1095 | ||
1096 | kicked = 1; | |
1097 | for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) { | |
1098 | apicid = cpu_present_to_apicid(bit); | |
1099 | /* | |
1100 | * Don't even attempt to start the boot CPU! | |
1101 | */ | |
1102 | if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID)) | |
1103 | continue; | |
1104 | ||
1105 | if (!check_apicid_present(bit)) | |
1106 | continue; | |
1107 | if (max_cpus <= cpucount+1) | |
1108 | continue; | |
1109 | ||
e1367daf | 1110 | if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu)) |
1da177e4 LT |
1111 | printk("CPU #%d not responding - cannot use it.\n", |
1112 | apicid); | |
1113 | else | |
1114 | ++kicked; | |
1115 | } | |
1116 | ||
1117 | /* | |
1118 | * Cleanup possible dangling ends... | |
1119 | */ | |
1120 | smpboot_restore_warm_reset_vector(); | |
1121 | ||
1122 | /* | |
1123 | * Allow the user to impress friends. | |
1124 | */ | |
1125 | Dprintk("Before bogomips.\n"); | |
1126 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
1127 | if (cpu_isset(cpu, cpu_callout_map)) | |
1128 | bogosum += cpu_data[cpu].loops_per_jiffy; | |
1129 | printk(KERN_INFO | |
1130 | "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | |
1131 | cpucount+1, | |
1132 | bogosum/(500000/HZ), | |
1133 | (bogosum/(5000/HZ))%100); | |
1134 | ||
1135 | Dprintk("Before bogocount - setting activated=1.\n"); | |
1136 | ||
1137 | if (smp_b_stepping) | |
1138 | printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n"); | |
1139 | ||
1140 | /* | |
1141 | * Don't taint if we are running SMP kernel on a single non-MP | |
1142 | * approved Athlon | |
1143 | */ | |
1144 | if (tainted & TAINT_UNSAFE_SMP) { | |
1145 | if (cpucount) | |
1146 | printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n"); | |
1147 | else | |
1148 | tainted &= ~TAINT_UNSAFE_SMP; | |
1149 | } | |
1150 | ||
1151 | Dprintk("Boot done.\n"); | |
1152 | ||
1153 | /* | |
1154 | * construct cpu_sibling_map[], so that we can tell sibling CPUs | |
1155 | * efficiently. | |
1156 | */ | |
3dd9d514 | 1157 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
1da177e4 | 1158 | cpus_clear(cpu_sibling_map[cpu]); |
3dd9d514 AK |
1159 | cpus_clear(cpu_core_map[cpu]); |
1160 | } | |
1da177e4 | 1161 | |
d720803a LS |
1162 | cpu_set(0, cpu_sibling_map[0]); |
1163 | cpu_set(0, cpu_core_map[0]); | |
1da177e4 | 1164 | |
1e4c85f9 LT |
1165 | smpboot_setup_io_apic(); |
1166 | ||
bbab4f3b | 1167 | setup_boot_clock(); |
1da177e4 LT |
1168 | } |
1169 | ||
1170 | /* These are wrappers to interface to the new boot process. Someone | |
1171 | who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ | |
01a2f435 | 1172 | void __init native_smp_prepare_cpus(unsigned int max_cpus) |
1da177e4 | 1173 | { |
f3705136 ZM |
1174 | smp_commenced_mask = cpumask_of_cpu(0); |
1175 | cpu_callin_map = cpumask_of_cpu(0); | |
1176 | mb(); | |
1da177e4 LT |
1177 | smp_boot_cpus(max_cpus); |
1178 | } | |
1179 | ||
bf504672 RR |
1180 | /* Current gdt points %fs at the "master" per-cpu area: after this, |
1181 | * it's on the real one. */ | |
1182 | static inline void switch_to_new_gdt(void) | |
1da177e4 | 1183 | { |
4fbb5968 RR |
1184 | struct Xgt_desc_struct gdt_descr; |
1185 | ||
1186 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | |
1187 | gdt_descr.size = GDT_SIZE - 1; | |
1188 | load_gdt(&gdt_descr); | |
7c3576d2 | 1189 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); |
bf504672 RR |
1190 | } |
1191 | ||
01a2f435 | 1192 | void __init native_smp_prepare_boot_cpu(void) |
bf504672 RR |
1193 | { |
1194 | unsigned int cpu = smp_processor_id(); | |
1195 | ||
7c3576d2 | 1196 | init_gdt(cpu); |
bf504672 RR |
1197 | switch_to_new_gdt(); |
1198 | ||
1199 | cpu_set(cpu, cpu_online_map); | |
1200 | cpu_set(cpu, cpu_callout_map); | |
1201 | cpu_set(cpu, cpu_present_map); | |
1202 | cpu_set(cpu, cpu_possible_map); | |
1203 | __get_cpu_var(cpu_state) = CPU_ONLINE; | |
1da177e4 LT |
1204 | } |
1205 | ||
f3705136 | 1206 | #ifdef CONFIG_HOTPLUG_CPU |
e1367daf LS |
1207 | static void |
1208 | remove_siblinginfo(int cpu) | |
1da177e4 | 1209 | { |
e1367daf | 1210 | int sibling; |
94605eff | 1211 | struct cpuinfo_x86 *c = cpu_data; |
e1367daf | 1212 | |
94605eff SS |
1213 | for_each_cpu_mask(sibling, cpu_core_map[cpu]) { |
1214 | cpu_clear(cpu, cpu_core_map[sibling]); | |
1215 | /* | |
1216 | * last thread sibling in this cpu core going down | |
1217 | */ | |
1218 | if (cpus_weight(cpu_sibling_map[cpu]) == 1) | |
1219 | c[sibling].booted_cores--; | |
1220 | } | |
1221 | ||
e1367daf LS |
1222 | for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) |
1223 | cpu_clear(cpu, cpu_sibling_map[sibling]); | |
e1367daf LS |
1224 | cpus_clear(cpu_sibling_map[cpu]); |
1225 | cpus_clear(cpu_core_map[cpu]); | |
4b89aff9 RS |
1226 | c[cpu].phys_proc_id = 0; |
1227 | c[cpu].cpu_core_id = 0; | |
94605eff | 1228 | cpu_clear(cpu, cpu_sibling_setup_map); |
f3705136 ZM |
1229 | } |
1230 | ||
1231 | int __cpu_disable(void) | |
1232 | { | |
1233 | cpumask_t map = cpu_online_map; | |
1234 | int cpu = smp_processor_id(); | |
1235 | ||
1236 | /* | |
1237 | * Perhaps use cpufreq to drop frequency, but that could go | |
1238 | * into generic code. | |
1239 | * | |
1240 | * We won't take down the boot processor on i386 due to some | |
1241 | * interrupts only being able to be serviced by the BSP. | |
1242 | * Especially so if we're not using an IOAPIC -zwane | |
1243 | */ | |
1244 | if (cpu == 0) | |
1245 | return -EBUSY; | |
4038f901 SL |
1246 | if (nmi_watchdog == NMI_LOCAL_APIC) |
1247 | stop_apic_nmi_watchdog(NULL); | |
5e9ef02e | 1248 | clear_local_APIC(); |
f3705136 ZM |
1249 | /* Allow any queued timer interrupts to get serviced */ |
1250 | local_irq_enable(); | |
1251 | mdelay(1); | |
1252 | local_irq_disable(); | |
1253 | ||
e1367daf LS |
1254 | remove_siblinginfo(cpu); |
1255 | ||
f3705136 ZM |
1256 | cpu_clear(cpu, map); |
1257 | fixup_irqs(map); | |
1258 | /* It's now safe to remove this processor from the online map */ | |
1259 | cpu_clear(cpu, cpu_online_map); | |
1260 | return 0; | |
1261 | } | |
1262 | ||
1263 | void __cpu_die(unsigned int cpu) | |
1264 | { | |
1265 | /* We don't do anything here: idle task is faking death itself. */ | |
1266 | unsigned int i; | |
1267 | ||
1268 | for (i = 0; i < 10; i++) { | |
1269 | /* They ack this in play_dead by setting CPU_DEAD */ | |
e1367daf LS |
1270 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
1271 | printk ("CPU %d is now offline\n", cpu); | |
9a0b5817 GH |
1272 | if (1 == num_online_cpus()) |
1273 | alternatives_smp_switch(0); | |
f3705136 | 1274 | return; |
e1367daf | 1275 | } |
aeb8397b | 1276 | msleep(100); |
1da177e4 | 1277 | } |
f3705136 ZM |
1278 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
1279 | } | |
1280 | #else /* ... !CONFIG_HOTPLUG_CPU */ | |
1281 | int __cpu_disable(void) | |
1282 | { | |
1283 | return -ENOSYS; | |
1284 | } | |
1da177e4 | 1285 | |
f3705136 ZM |
1286 | void __cpu_die(unsigned int cpu) |
1287 | { | |
1288 | /* We said "no" in __cpu_disable */ | |
1289 | BUG(); | |
1290 | } | |
1291 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1292 | ||
01a2f435 | 1293 | int __cpuinit native_cpu_up(unsigned int cpu) |
f3705136 | 1294 | { |
d04f41e3 | 1295 | unsigned long flags; |
34f361ad | 1296 | #ifdef CONFIG_HOTPLUG_CPU |
d04f41e3 | 1297 | int ret = 0; |
34f361ad AR |
1298 | |
1299 | /* | |
1300 | * We do warm boot only on cpus that had booted earlier | |
1301 | * Otherwise cold boot is all handled from smp_boot_cpus(). | |
1302 | * cpu_callin_map is set during AP kickstart process. Its reset | |
1303 | * when a cpu is taken offline from cpu_exit_clear(). | |
1304 | */ | |
1305 | if (!cpu_isset(cpu, cpu_callin_map)) | |
1306 | ret = __smp_prepare_cpu(cpu); | |
1307 | ||
1308 | if (ret) | |
1309 | return -EIO; | |
1310 | #endif | |
1311 | ||
1da177e4 LT |
1312 | /* In case one didn't come up */ |
1313 | if (!cpu_isset(cpu, cpu_callin_map)) { | |
f3705136 | 1314 | printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); |
1da177e4 LT |
1315 | return -EIO; |
1316 | } | |
1317 | ||
e1367daf | 1318 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
1da177e4 LT |
1319 | /* Unleash the CPU! */ |
1320 | cpu_set(cpu, smp_commenced_mask); | |
95492e46 IM |
1321 | |
1322 | /* | |
d04f41e3 IM |
1323 | * Check TSC synchronization with the AP (keep irqs disabled |
1324 | * while doing so): | |
95492e46 | 1325 | */ |
d04f41e3 | 1326 | local_irq_save(flags); |
95492e46 | 1327 | check_tsc_sync_source(cpu); |
d04f41e3 | 1328 | local_irq_restore(flags); |
95492e46 | 1329 | |
d04f41e3 | 1330 | while (!cpu_isset(cpu, cpu_online_map)) { |
18698917 | 1331 | cpu_relax(); |
d04f41e3 IM |
1332 | touch_nmi_watchdog(); |
1333 | } | |
b0d0a4ba | 1334 | |
1da177e4 LT |
1335 | return 0; |
1336 | } | |
1337 | ||
01a2f435 | 1338 | void __init native_smp_cpus_done(unsigned int max_cpus) |
1da177e4 LT |
1339 | { |
1340 | #ifdef CONFIG_X86_IO_APIC | |
1341 | setup_ioapic_dest(); | |
1342 | #endif | |
1343 | zap_low_mappings(); | |
e1367daf | 1344 | #ifndef CONFIG_HOTPLUG_CPU |
1da177e4 LT |
1345 | /* |
1346 | * Disable executability of the SMP trampoline: | |
1347 | */ | |
1348 | set_kernel_exec((unsigned long)trampoline_base, trampoline_exec); | |
e1367daf | 1349 | #endif |
1da177e4 LT |
1350 | } |
1351 | ||
1352 | void __init smp_intr_init(void) | |
1353 | { | |
1354 | /* | |
1355 | * IRQ0 must be given a fixed assignment and initialized, | |
1356 | * because it's used before the IO-APIC is set up. | |
1357 | */ | |
1358 | set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); | |
1359 | ||
1360 | /* | |
1361 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | |
1362 | * IPI, driven by wakeup. | |
1363 | */ | |
1364 | set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | |
1365 | ||
1366 | /* IPI for invalidation */ | |
1367 | set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); | |
1368 | ||
1369 | /* IPI for generic function call */ | |
1370 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | |
1371 | } | |
1a3f239d RR |
1372 | |
1373 | /* | |
1374 | * If the BIOS enumerates physical processors before logical, | |
1375 | * maxcpus=N at enumeration-time can be used to disable HT. | |
1376 | */ | |
1377 | static int __init parse_maxcpus(char *arg) | |
1378 | { | |
1379 | extern unsigned int maxcpus; | |
1380 | ||
1381 | maxcpus = simple_strtoul(arg, NULL, 0); | |
1382 | return 0; | |
1383 | } | |
1384 | early_param("maxcpus", parse_maxcpus); |