]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/init.h> | |
2 | #include <linux/string.h> | |
3 | #include <linux/delay.h> | |
4 | #include <linux/smp.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/percpu.h> | |
7 | #include <linux/bootmem.h> | |
8 | #include <asm/semaphore.h> | |
9 | #include <asm/processor.h> | |
10 | #include <asm/i387.h> | |
11 | #include <asm/msr.h> | |
12 | #include <asm/io.h> | |
13 | #include <asm/mmu_context.h> | |
14 | #include <asm/mtrr.h> | |
15 | #include <asm/mce.h> | |
16 | #ifdef CONFIG_X86_LOCAL_APIC | |
17 | #include <asm/mpspec.h> | |
18 | #include <asm/apic.h> | |
19 | #include <mach_apic.h> | |
20 | #endif | |
21 | ||
22 | #include "cpu.h" | |
23 | ||
24 | DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); | |
25 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); | |
26 | ||
27 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | |
28 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); | |
29 | ||
30 | static int cachesize_override __cpuinitdata = -1; | |
31 | static int disable_x86_fxsr __cpuinitdata; | |
32 | static int disable_x86_serial_nr __cpuinitdata = 1; | |
33 | static int disable_x86_sep __cpuinitdata; | |
34 | ||
35 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | |
36 | ||
37 | extern int disable_pse; | |
38 | ||
39 | static void default_init(struct cpuinfo_x86 * c) | |
40 | { | |
41 | /* Not much we can do here... */ | |
42 | /* Check if at least it has cpuid */ | |
43 | if (c->cpuid_level == -1) { | |
44 | /* No cpuid. It must be an ancient CPU */ | |
45 | if (c->x86 == 4) | |
46 | strcpy(c->x86_model_id, "486"); | |
47 | else if (c->x86 == 3) | |
48 | strcpy(c->x86_model_id, "386"); | |
49 | } | |
50 | } | |
51 | ||
52 | static struct cpu_dev default_cpu = { | |
53 | .c_init = default_init, | |
54 | .c_vendor = "Unknown", | |
55 | }; | |
56 | static struct cpu_dev * this_cpu = &default_cpu; | |
57 | ||
58 | static int __init cachesize_setup(char *str) | |
59 | { | |
60 | get_option (&str, &cachesize_override); | |
61 | return 1; | |
62 | } | |
63 | __setup("cachesize=", cachesize_setup); | |
64 | ||
65 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |
66 | { | |
67 | unsigned int *v; | |
68 | char *p, *q; | |
69 | ||
70 | if (cpuid_eax(0x80000000) < 0x80000004) | |
71 | return 0; | |
72 | ||
73 | v = (unsigned int *) c->x86_model_id; | |
74 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | |
75 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | |
76 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | |
77 | c->x86_model_id[48] = 0; | |
78 | ||
79 | /* Intel chips right-justify this string for some dumb reason; | |
80 | undo that brain damage */ | |
81 | p = q = &c->x86_model_id[0]; | |
82 | while ( *p == ' ' ) | |
83 | p++; | |
84 | if ( p != q ) { | |
85 | while ( *p ) | |
86 | *q++ = *p++; | |
87 | while ( q <= &c->x86_model_id[48] ) | |
88 | *q++ = '\0'; /* Zero-pad the rest */ | |
89 | } | |
90 | ||
91 | return 1; | |
92 | } | |
93 | ||
94 | ||
95 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |
96 | { | |
97 | unsigned int n, dummy, ecx, edx, l2size; | |
98 | ||
99 | n = cpuid_eax(0x80000000); | |
100 | ||
101 | if (n >= 0x80000005) { | |
102 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | |
103 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | |
104 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | |
105 | c->x86_cache_size=(ecx>>24)+(edx>>24); | |
106 | } | |
107 | ||
108 | if (n < 0x80000006) /* Some chips just has a large L1. */ | |
109 | return; | |
110 | ||
111 | ecx = cpuid_ecx(0x80000006); | |
112 | l2size = ecx >> 16; | |
113 | ||
114 | /* do processor-specific cache resizing */ | |
115 | if (this_cpu->c_size_cache) | |
116 | l2size = this_cpu->c_size_cache(c,l2size); | |
117 | ||
118 | /* Allow user to override all this if necessary. */ | |
119 | if (cachesize_override != -1) | |
120 | l2size = cachesize_override; | |
121 | ||
122 | if ( l2size == 0 ) | |
123 | return; /* Again, no L2 cache is possible */ | |
124 | ||
125 | c->x86_cache_size = l2size; | |
126 | ||
127 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | |
128 | l2size, ecx & 0xFF); | |
129 | } | |
130 | ||
131 | /* Naming convention should be: <Name> [(<Codename>)] */ | |
132 | /* This table only is used unless init_<vendor>() below doesn't set it; */ | |
133 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | |
134 | ||
135 | /* Look up CPU names by table lookup. */ | |
136 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | |
137 | { | |
138 | struct cpu_model_info *info; | |
139 | ||
140 | if ( c->x86_model >= 16 ) | |
141 | return NULL; /* Range check */ | |
142 | ||
143 | if (!this_cpu) | |
144 | return NULL; | |
145 | ||
146 | info = this_cpu->c_models; | |
147 | ||
148 | while (info && info->family) { | |
149 | if (info->family == c->x86) | |
150 | return info->model_names[c->x86_model]; | |
151 | info++; | |
152 | } | |
153 | return NULL; /* Not found */ | |
154 | } | |
155 | ||
156 | ||
157 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |
158 | { | |
159 | char *v = c->x86_vendor_id; | |
160 | int i; | |
161 | static int printed; | |
162 | ||
163 | for (i = 0; i < X86_VENDOR_NUM; i++) { | |
164 | if (cpu_devs[i]) { | |
165 | if (!strcmp(v,cpu_devs[i]->c_ident[0]) || | |
166 | (cpu_devs[i]->c_ident[1] && | |
167 | !strcmp(v,cpu_devs[i]->c_ident[1]))) { | |
168 | c->x86_vendor = i; | |
169 | if (!early) | |
170 | this_cpu = cpu_devs[i]; | |
171 | return; | |
172 | } | |
173 | } | |
174 | } | |
175 | if (!printed) { | |
176 | printed++; | |
177 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | |
178 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | |
179 | } | |
180 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
181 | this_cpu = &default_cpu; | |
182 | } | |
183 | ||
184 | ||
185 | static int __init x86_fxsr_setup(char * s) | |
186 | { | |
187 | disable_x86_fxsr = 1; | |
188 | return 1; | |
189 | } | |
190 | __setup("nofxsr", x86_fxsr_setup); | |
191 | ||
192 | ||
193 | static int __init x86_sep_setup(char * s) | |
194 | { | |
195 | disable_x86_sep = 1; | |
196 | return 1; | |
197 | } | |
198 | __setup("nosep", x86_sep_setup); | |
199 | ||
200 | ||
201 | /* Standard macro to see if a specific flag is changeable */ | |
202 | static inline int flag_is_changeable_p(u32 flag) | |
203 | { | |
204 | u32 f1, f2; | |
205 | ||
206 | asm("pushfl\n\t" | |
207 | "pushfl\n\t" | |
208 | "popl %0\n\t" | |
209 | "movl %0,%1\n\t" | |
210 | "xorl %2,%0\n\t" | |
211 | "pushl %0\n\t" | |
212 | "popfl\n\t" | |
213 | "pushfl\n\t" | |
214 | "popl %0\n\t" | |
215 | "popfl\n\t" | |
216 | : "=&r" (f1), "=&r" (f2) | |
217 | : "ir" (flag)); | |
218 | ||
219 | return ((f1^f2) & flag) != 0; | |
220 | } | |
221 | ||
222 | ||
223 | /* Probe for the CPUID instruction */ | |
224 | static int __cpuinit have_cpuid_p(void) | |
225 | { | |
226 | return flag_is_changeable_p(X86_EFLAGS_ID); | |
227 | } | |
228 | ||
229 | /* Do minimum CPU detection early. | |
230 | Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. | |
231 | The others are not touched to avoid unwanted side effects. | |
232 | ||
233 | WARNING: this function is only called on the BP. Don't add code here | |
234 | that is supposed to run on all CPUs. */ | |
235 | static void __init early_cpu_detect(void) | |
236 | { | |
237 | struct cpuinfo_x86 *c = &boot_cpu_data; | |
238 | ||
239 | c->x86_cache_alignment = 32; | |
240 | ||
241 | if (!have_cpuid_p()) | |
242 | return; | |
243 | ||
244 | /* Get vendor name */ | |
245 | cpuid(0x00000000, &c->cpuid_level, | |
246 | (int *)&c->x86_vendor_id[0], | |
247 | (int *)&c->x86_vendor_id[8], | |
248 | (int *)&c->x86_vendor_id[4]); | |
249 | ||
250 | get_cpu_vendor(c, 1); | |
251 | ||
252 | c->x86 = 4; | |
253 | if (c->cpuid_level >= 0x00000001) { | |
254 | u32 junk, tfms, cap0, misc; | |
255 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | |
256 | c->x86 = (tfms >> 8) & 15; | |
257 | c->x86_model = (tfms >> 4) & 15; | |
258 | if (c->x86 == 0xf) | |
259 | c->x86 += (tfms >> 20) & 0xff; | |
260 | if (c->x86 >= 0x6) | |
261 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | |
262 | c->x86_mask = tfms & 15; | |
263 | if (cap0 & (1<<19)) | |
264 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | |
265 | } | |
266 | } | |
267 | ||
268 | void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |
269 | { | |
270 | u32 tfms, xlvl; | |
271 | int ebx; | |
272 | ||
273 | if (have_cpuid_p()) { | |
274 | /* Get vendor name */ | |
275 | cpuid(0x00000000, &c->cpuid_level, | |
276 | (int *)&c->x86_vendor_id[0], | |
277 | (int *)&c->x86_vendor_id[8], | |
278 | (int *)&c->x86_vendor_id[4]); | |
279 | ||
280 | get_cpu_vendor(c, 0); | |
281 | /* Initialize the standard set of capabilities */ | |
282 | /* Note that the vendor-specific code below might override */ | |
283 | ||
284 | /* Intel-defined flags: level 0x00000001 */ | |
285 | if ( c->cpuid_level >= 0x00000001 ) { | |
286 | u32 capability, excap; | |
287 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | |
288 | c->x86_capability[0] = capability; | |
289 | c->x86_capability[4] = excap; | |
290 | c->x86 = (tfms >> 8) & 15; | |
291 | c->x86_model = (tfms >> 4) & 15; | |
292 | if (c->x86 == 0xf) | |
293 | c->x86 += (tfms >> 20) & 0xff; | |
294 | if (c->x86 >= 0x6) | |
295 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | |
296 | c->x86_mask = tfms & 15; | |
297 | #ifdef CONFIG_SMP | |
298 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); | |
299 | #else | |
300 | c->apicid = (ebx >> 24) & 0xFF; | |
301 | #endif | |
302 | } else { | |
303 | /* Have CPUID level 0 only - unheard of */ | |
304 | c->x86 = 4; | |
305 | } | |
306 | ||
307 | /* AMD-defined flags: level 0x80000001 */ | |
308 | xlvl = cpuid_eax(0x80000000); | |
309 | if ( (xlvl & 0xffff0000) == 0x80000000 ) { | |
310 | if ( xlvl >= 0x80000001 ) { | |
311 | c->x86_capability[1] = cpuid_edx(0x80000001); | |
312 | c->x86_capability[6] = cpuid_ecx(0x80000001); | |
313 | } | |
314 | if ( xlvl >= 0x80000004 ) | |
315 | get_model_name(c); /* Default name */ | |
316 | } | |
317 | } | |
318 | ||
319 | early_intel_workaround(c); | |
320 | ||
321 | #ifdef CONFIG_X86_HT | |
322 | phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; | |
323 | #endif | |
324 | } | |
325 | ||
326 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |
327 | { | |
328 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | |
329 | /* Disable processor serial number */ | |
330 | unsigned long lo,hi; | |
331 | rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | |
332 | lo |= 0x200000; | |
333 | wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | |
334 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | |
335 | clear_bit(X86_FEATURE_PN, c->x86_capability); | |
336 | ||
337 | /* Disabling the serial number may affect the cpuid level */ | |
338 | c->cpuid_level = cpuid_eax(0); | |
339 | } | |
340 | } | |
341 | ||
342 | static int __init x86_serial_nr_setup(char *s) | |
343 | { | |
344 | disable_x86_serial_nr = 0; | |
345 | return 1; | |
346 | } | |
347 | __setup("serialnumber", x86_serial_nr_setup); | |
348 | ||
349 | ||
350 | ||
351 | /* | |
352 | * This does the hard work of actually picking apart the CPU stuff... | |
353 | */ | |
354 | void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |
355 | { | |
356 | int i; | |
357 | ||
358 | c->loops_per_jiffy = loops_per_jiffy; | |
359 | c->x86_cache_size = -1; | |
360 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
361 | c->cpuid_level = -1; /* CPUID not detected */ | |
362 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | |
363 | c->x86_vendor_id[0] = '\0'; /* Unset */ | |
364 | c->x86_model_id[0] = '\0'; /* Unset */ | |
365 | c->x86_max_cores = 1; | |
366 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | |
367 | ||
368 | if (!have_cpuid_p()) { | |
369 | /* First of all, decide if this is a 486 or higher */ | |
370 | /* It's a 486 if we can modify the AC flag */ | |
371 | if ( flag_is_changeable_p(X86_EFLAGS_AC) ) | |
372 | c->x86 = 4; | |
373 | else | |
374 | c->x86 = 3; | |
375 | } | |
376 | ||
377 | generic_identify(c); | |
378 | ||
379 | printk(KERN_DEBUG "CPU: After generic identify, caps:"); | |
380 | for (i = 0; i < NCAPINTS; i++) | |
381 | printk(" %08lx", c->x86_capability[i]); | |
382 | printk("\n"); | |
383 | ||
384 | if (this_cpu->c_identify) { | |
385 | this_cpu->c_identify(c); | |
386 | ||
387 | printk(KERN_DEBUG "CPU: After vendor identify, caps:"); | |
388 | for (i = 0; i < NCAPINTS; i++) | |
389 | printk(" %08lx", c->x86_capability[i]); | |
390 | printk("\n"); | |
391 | } | |
392 | ||
393 | /* | |
394 | * Vendor-specific initialization. In this section we | |
395 | * canonicalize the feature flags, meaning if there are | |
396 | * features a certain CPU supports which CPUID doesn't | |
397 | * tell us, CPUID claiming incorrect flags, or other bugs, | |
398 | * we handle them here. | |
399 | * | |
400 | * At the end of this section, c->x86_capability better | |
401 | * indicate the features this CPU genuinely supports! | |
402 | */ | |
403 | if (this_cpu->c_init) | |
404 | this_cpu->c_init(c); | |
405 | ||
406 | /* Disable the PN if appropriate */ | |
407 | squash_the_stupid_serial_number(c); | |
408 | ||
409 | /* | |
410 | * The vendor-specific functions might have changed features. Now | |
411 | * we do "generic changes." | |
412 | */ | |
413 | ||
414 | /* TSC disabled? */ | |
415 | if ( tsc_disable ) | |
416 | clear_bit(X86_FEATURE_TSC, c->x86_capability); | |
417 | ||
418 | /* FXSR disabled? */ | |
419 | if (disable_x86_fxsr) { | |
420 | clear_bit(X86_FEATURE_FXSR, c->x86_capability); | |
421 | clear_bit(X86_FEATURE_XMM, c->x86_capability); | |
422 | } | |
423 | ||
424 | /* SEP disabled? */ | |
425 | if (disable_x86_sep) | |
426 | clear_bit(X86_FEATURE_SEP, c->x86_capability); | |
427 | ||
428 | if (disable_pse) | |
429 | clear_bit(X86_FEATURE_PSE, c->x86_capability); | |
430 | ||
431 | /* If the model name is still unset, do table lookup. */ | |
432 | if ( !c->x86_model_id[0] ) { | |
433 | char *p; | |
434 | p = table_lookup_model(c); | |
435 | if ( p ) | |
436 | strcpy(c->x86_model_id, p); | |
437 | else | |
438 | /* Last resort... */ | |
439 | sprintf(c->x86_model_id, "%02x/%02x", | |
440 | c->x86, c->x86_model); | |
441 | } | |
442 | ||
443 | /* Now the feature flags better reflect actual CPU features! */ | |
444 | ||
445 | printk(KERN_DEBUG "CPU: After all inits, caps:"); | |
446 | for (i = 0; i < NCAPINTS; i++) | |
447 | printk(" %08lx", c->x86_capability[i]); | |
448 | printk("\n"); | |
449 | ||
450 | /* | |
451 | * On SMP, boot_cpu_data holds the common feature set between | |
452 | * all CPUs; so make sure that we indicate which features are | |
453 | * common between the CPUs. The first time this routine gets | |
454 | * executed, c == &boot_cpu_data. | |
455 | */ | |
456 | if ( c != &boot_cpu_data ) { | |
457 | /* AND the already accumulated flags with these */ | |
458 | for ( i = 0 ; i < NCAPINTS ; i++ ) | |
459 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | |
460 | } | |
461 | ||
462 | /* Init Machine Check Exception if available. */ | |
463 | mcheck_init(c); | |
464 | ||
465 | if (c == &boot_cpu_data) | |
466 | sysenter_setup(); | |
467 | enable_sep_cpu(); | |
468 | ||
469 | if (c == &boot_cpu_data) | |
470 | mtrr_bp_init(); | |
471 | else | |
472 | mtrr_ap_init(); | |
473 | } | |
474 | ||
475 | #ifdef CONFIG_X86_HT | |
476 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |
477 | { | |
478 | u32 eax, ebx, ecx, edx; | |
479 | int index_msb, core_bits; | |
480 | int cpu = smp_processor_id(); | |
481 | ||
482 | cpuid(1, &eax, &ebx, &ecx, &edx); | |
483 | ||
484 | ||
485 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | |
486 | return; | |
487 | ||
488 | smp_num_siblings = (ebx & 0xff0000) >> 16; | |
489 | ||
490 | if (smp_num_siblings == 1) { | |
491 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | |
492 | } else if (smp_num_siblings > 1 ) { | |
493 | ||
494 | if (smp_num_siblings > NR_CPUS) { | |
495 | printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); | |
496 | smp_num_siblings = 1; | |
497 | return; | |
498 | } | |
499 | ||
500 | index_msb = get_count_order(smp_num_siblings); | |
501 | phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); | |
502 | ||
503 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | |
504 | phys_proc_id[cpu]); | |
505 | ||
506 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | |
507 | ||
508 | index_msb = get_count_order(smp_num_siblings) ; | |
509 | ||
510 | core_bits = get_count_order(c->x86_max_cores); | |
511 | ||
512 | cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & | |
513 | ((1 << core_bits) - 1); | |
514 | ||
515 | if (c->x86_max_cores > 1) | |
516 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | |
517 | cpu_core_id[cpu]); | |
518 | } | |
519 | } | |
520 | #endif | |
521 | ||
522 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |
523 | { | |
524 | char *vendor = NULL; | |
525 | ||
526 | if (c->x86_vendor < X86_VENDOR_NUM) | |
527 | vendor = this_cpu->c_vendor; | |
528 | else if (c->cpuid_level >= 0) | |
529 | vendor = c->x86_vendor_id; | |
530 | ||
531 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | |
532 | printk("%s ", vendor); | |
533 | ||
534 | if (!c->x86_model_id[0]) | |
535 | printk("%d86", c->x86); | |
536 | else | |
537 | printk("%s", c->x86_model_id); | |
538 | ||
539 | if (c->x86_mask || c->cpuid_level >= 0) | |
540 | printk(" stepping %02x\n", c->x86_mask); | |
541 | else | |
542 | printk("\n"); | |
543 | } | |
544 | ||
545 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | |
546 | ||
547 | /* This is hacky. :) | |
548 | * We're emulating future behavior. | |
549 | * In the future, the cpu-specific init functions will be called implicitly | |
550 | * via the magic of initcalls. | |
551 | * They will insert themselves into the cpu_devs structure. | |
552 | * Then, when cpu_init() is called, we can just iterate over that array. | |
553 | */ | |
554 | ||
555 | extern int intel_cpu_init(void); | |
556 | extern int cyrix_init_cpu(void); | |
557 | extern int nsc_init_cpu(void); | |
558 | extern int amd_init_cpu(void); | |
559 | extern int centaur_init_cpu(void); | |
560 | extern int transmeta_init_cpu(void); | |
561 | extern int rise_init_cpu(void); | |
562 | extern int nexgen_init_cpu(void); | |
563 | extern int umc_init_cpu(void); | |
564 | ||
565 | void __init early_cpu_init(void) | |
566 | { | |
567 | intel_cpu_init(); | |
568 | cyrix_init_cpu(); | |
569 | nsc_init_cpu(); | |
570 | amd_init_cpu(); | |
571 | centaur_init_cpu(); | |
572 | transmeta_init_cpu(); | |
573 | rise_init_cpu(); | |
574 | nexgen_init_cpu(); | |
575 | umc_init_cpu(); | |
576 | early_cpu_detect(); | |
577 | ||
578 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
579 | /* pse is not compatible with on-the-fly unmapping, | |
580 | * disable it even if the cpus claim to support it. | |
581 | */ | |
582 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | |
583 | disable_pse = 1; | |
584 | #endif | |
585 | } | |
586 | /* | |
587 | * cpu_init() initializes state that is per-CPU. Some data is already | |
588 | * initialized (naturally) in the bootstrap process, such as the GDT | |
589 | * and IDT. We reload them nevertheless, this function acts as a | |
590 | * 'CPU state barrier', nothing should get across. | |
591 | */ | |
592 | void __cpuinit cpu_init(void) | |
593 | { | |
594 | int cpu = smp_processor_id(); | |
595 | struct tss_struct * t = &per_cpu(init_tss, cpu); | |
596 | struct thread_struct *thread = ¤t->thread; | |
597 | struct desc_struct *gdt; | |
598 | __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); | |
599 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | |
600 | ||
601 | if (cpu_test_and_set(cpu, cpu_initialized)) { | |
602 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | |
603 | for (;;) local_irq_enable(); | |
604 | } | |
605 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | |
606 | ||
607 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | |
608 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | |
609 | if (tsc_disable && cpu_has_tsc) { | |
610 | printk(KERN_NOTICE "Disabling TSC...\n"); | |
611 | /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/ | |
612 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); | |
613 | set_in_cr4(X86_CR4_TSD); | |
614 | } | |
615 | ||
616 | /* The CPU hotplug case */ | |
617 | if (cpu_gdt_descr->address) { | |
618 | gdt = (struct desc_struct *)cpu_gdt_descr->address; | |
619 | memset(gdt, 0, PAGE_SIZE); | |
620 | goto old_gdt; | |
621 | } | |
622 | /* | |
623 | * This is a horrible hack to allocate the GDT. The problem | |
624 | * is that cpu_init() is called really early for the boot CPU | |
625 | * (and hence needs bootmem) but much later for the secondary | |
626 | * CPUs, when bootmem will have gone away | |
627 | */ | |
628 | if (NODE_DATA(0)->bdata->node_bootmem_map) { | |
629 | gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE); | |
630 | /* alloc_bootmem_pages panics on failure, so no check */ | |
631 | memset(gdt, 0, PAGE_SIZE); | |
632 | } else { | |
633 | gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL); | |
634 | if (unlikely(!gdt)) { | |
635 | printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu); | |
636 | for (;;) | |
637 | local_irq_enable(); | |
638 | } | |
639 | } | |
640 | old_gdt: | |
641 | /* | |
642 | * Initialize the per-CPU GDT with the boot GDT, | |
643 | * and set up the GDT descriptor: | |
644 | */ | |
645 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); | |
646 | ||
647 | /* Set up GDT entry for 16bit stack */ | |
648 | *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |= | |
649 | ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | | |
650 | ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | | |
651 | (CPU_16BIT_STACK_SIZE - 1); | |
652 | ||
653 | cpu_gdt_descr->size = GDT_SIZE - 1; | |
654 | cpu_gdt_descr->address = (unsigned long)gdt; | |
655 | ||
656 | load_gdt(cpu_gdt_descr); | |
657 | load_idt(&idt_descr); | |
658 | ||
659 | /* | |
660 | * Set up and load the per-CPU TSS and LDT | |
661 | */ | |
662 | atomic_inc(&init_mm.mm_count); | |
663 | current->active_mm = &init_mm; | |
664 | if (current->mm) | |
665 | BUG(); | |
666 | enter_lazy_tlb(&init_mm, current); | |
667 | ||
668 | load_esp0(t, thread); | |
669 | set_tss_desc(cpu,t); | |
670 | load_TR_desc(); | |
671 | load_LDT(&init_mm.context); | |
672 | ||
673 | #ifdef CONFIG_DOUBLEFAULT | |
674 | /* Set up doublefault TSS pointer in the GDT */ | |
675 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | |
676 | #endif | |
677 | ||
678 | /* Clear %fs and %gs. */ | |
679 | asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); | |
680 | ||
681 | /* Clear all 6 debug registers: */ | |
682 | set_debugreg(0, 0); | |
683 | set_debugreg(0, 1); | |
684 | set_debugreg(0, 2); | |
685 | set_debugreg(0, 3); | |
686 | set_debugreg(0, 6); | |
687 | set_debugreg(0, 7); | |
688 | ||
689 | /* | |
690 | * Force FPU initialization: | |
691 | */ | |
692 | current_thread_info()->status = 0; | |
693 | clear_used_math(); | |
694 | mxcsr_feature_mask_init(); | |
695 | } | |
696 | ||
697 | #ifdef CONFIG_HOTPLUG_CPU | |
698 | void __cpuinit cpu_uninit(void) | |
699 | { | |
700 | int cpu = raw_smp_processor_id(); | |
701 | cpu_clear(cpu, cpu_initialized); | |
702 | ||
703 | /* lazy TLB state */ | |
704 | per_cpu(cpu_tlbstate, cpu).state = 0; | |
705 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | |
706 | } | |
707 | #endif |