]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/init.h> | |
2 | #include <linux/string.h> | |
3 | #include <linux/delay.h> | |
4 | #include <linux/smp.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/percpu.h> | |
7 | #include <asm/semaphore.h> | |
8 | #include <asm/processor.h> | |
9 | #include <asm/i387.h> | |
10 | #include <asm/msr.h> | |
11 | #include <asm/io.h> | |
12 | #include <asm/mmu_context.h> | |
13 | #ifdef CONFIG_X86_LOCAL_APIC | |
14 | #include <asm/mpspec.h> | |
15 | #include <asm/apic.h> | |
16 | #include <mach_apic.h> | |
17 | #endif | |
18 | ||
19 | #include "cpu.h" | |
20 | ||
21 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | |
22 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); | |
23 | ||
24 | static int cachesize_override __devinitdata = -1; | |
25 | static int disable_x86_fxsr __devinitdata = 0; | |
26 | static int disable_x86_serial_nr __devinitdata = 1; | |
27 | ||
28 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | |
29 | ||
30 | extern int disable_pse; | |
31 | ||
32 | static void default_init(struct cpuinfo_x86 * c) | |
33 | { | |
34 | /* Not much we can do here... */ | |
35 | /* Check if at least it has cpuid */ | |
36 | if (c->cpuid_level == -1) { | |
37 | /* No cpuid. It must be an ancient CPU */ | |
38 | if (c->x86 == 4) | |
39 | strcpy(c->x86_model_id, "486"); | |
40 | else if (c->x86 == 3) | |
41 | strcpy(c->x86_model_id, "386"); | |
42 | } | |
43 | } | |
44 | ||
45 | static struct cpu_dev default_cpu = { | |
46 | .c_init = default_init, | |
47 | }; | |
48 | static struct cpu_dev * this_cpu = &default_cpu; | |
49 | ||
50 | static int __init cachesize_setup(char *str) | |
51 | { | |
52 | get_option (&str, &cachesize_override); | |
53 | return 1; | |
54 | } | |
55 | __setup("cachesize=", cachesize_setup); | |
56 | ||
57 | int __devinit get_model_name(struct cpuinfo_x86 *c) | |
58 | { | |
59 | unsigned int *v; | |
60 | char *p, *q; | |
61 | ||
62 | if (cpuid_eax(0x80000000) < 0x80000004) | |
63 | return 0; | |
64 | ||
65 | v = (unsigned int *) c->x86_model_id; | |
66 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | |
67 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | |
68 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | |
69 | c->x86_model_id[48] = 0; | |
70 | ||
71 | /* Intel chips right-justify this string for some dumb reason; | |
72 | undo that brain damage */ | |
73 | p = q = &c->x86_model_id[0]; | |
74 | while ( *p == ' ' ) | |
75 | p++; | |
76 | if ( p != q ) { | |
77 | while ( *p ) | |
78 | *q++ = *p++; | |
79 | while ( q <= &c->x86_model_id[48] ) | |
80 | *q++ = '\0'; /* Zero-pad the rest */ | |
81 | } | |
82 | ||
83 | return 1; | |
84 | } | |
85 | ||
86 | ||
87 | void __devinit display_cacheinfo(struct cpuinfo_x86 *c) | |
88 | { | |
89 | unsigned int n, dummy, ecx, edx, l2size; | |
90 | ||
91 | n = cpuid_eax(0x80000000); | |
92 | ||
93 | if (n >= 0x80000005) { | |
94 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | |
95 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | |
96 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | |
97 | c->x86_cache_size=(ecx>>24)+(edx>>24); | |
98 | } | |
99 | ||
100 | if (n < 0x80000006) /* Some chips just has a large L1. */ | |
101 | return; | |
102 | ||
103 | ecx = cpuid_ecx(0x80000006); | |
104 | l2size = ecx >> 16; | |
105 | ||
106 | /* do processor-specific cache resizing */ | |
107 | if (this_cpu->c_size_cache) | |
108 | l2size = this_cpu->c_size_cache(c,l2size); | |
109 | ||
110 | /* Allow user to override all this if necessary. */ | |
111 | if (cachesize_override != -1) | |
112 | l2size = cachesize_override; | |
113 | ||
114 | if ( l2size == 0 ) | |
115 | return; /* Again, no L2 cache is possible */ | |
116 | ||
117 | c->x86_cache_size = l2size; | |
118 | ||
119 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | |
120 | l2size, ecx & 0xFF); | |
121 | } | |
122 | ||
123 | /* Naming convention should be: <Name> [(<Codename>)] */ | |
124 | /* This table only is used unless init_<vendor>() below doesn't set it; */ | |
125 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | |
126 | ||
127 | /* Look up CPU names by table lookup. */ | |
128 | static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) | |
129 | { | |
130 | struct cpu_model_info *info; | |
131 | ||
132 | if ( c->x86_model >= 16 ) | |
133 | return NULL; /* Range check */ | |
134 | ||
135 | if (!this_cpu) | |
136 | return NULL; | |
137 | ||
138 | info = this_cpu->c_models; | |
139 | ||
140 | while (info && info->family) { | |
141 | if (info->family == c->x86) | |
142 | return info->model_names[c->x86_model]; | |
143 | info++; | |
144 | } | |
145 | return NULL; /* Not found */ | |
146 | } | |
147 | ||
148 | ||
149 | static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |
150 | { | |
151 | char *v = c->x86_vendor_id; | |
152 | int i; | |
153 | ||
154 | for (i = 0; i < X86_VENDOR_NUM; i++) { | |
155 | if (cpu_devs[i]) { | |
156 | if (!strcmp(v,cpu_devs[i]->c_ident[0]) || | |
157 | (cpu_devs[i]->c_ident[1] && | |
158 | !strcmp(v,cpu_devs[i]->c_ident[1]))) { | |
159 | c->x86_vendor = i; | |
160 | if (!early) | |
161 | this_cpu = cpu_devs[i]; | |
162 | break; | |
163 | } | |
164 | } | |
165 | } | |
166 | } | |
167 | ||
168 | ||
169 | static int __init x86_fxsr_setup(char * s) | |
170 | { | |
171 | disable_x86_fxsr = 1; | |
172 | return 1; | |
173 | } | |
174 | __setup("nofxsr", x86_fxsr_setup); | |
175 | ||
176 | ||
177 | /* Standard macro to see if a specific flag is changeable */ | |
178 | static inline int flag_is_changeable_p(u32 flag) | |
179 | { | |
180 | u32 f1, f2; | |
181 | ||
182 | asm("pushfl\n\t" | |
183 | "pushfl\n\t" | |
184 | "popl %0\n\t" | |
185 | "movl %0,%1\n\t" | |
186 | "xorl %2,%0\n\t" | |
187 | "pushl %0\n\t" | |
188 | "popfl\n\t" | |
189 | "pushfl\n\t" | |
190 | "popl %0\n\t" | |
191 | "popfl\n\t" | |
192 | : "=&r" (f1), "=&r" (f2) | |
193 | : "ir" (flag)); | |
194 | ||
195 | return ((f1^f2) & flag) != 0; | |
196 | } | |
197 | ||
198 | ||
199 | /* Probe for the CPUID instruction */ | |
200 | static int __devinit have_cpuid_p(void) | |
201 | { | |
202 | return flag_is_changeable_p(X86_EFLAGS_ID); | |
203 | } | |
204 | ||
205 | /* Do minimum CPU detection early. | |
206 | Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. | |
207 | The others are not touched to avoid unwanted side effects. */ | |
208 | static void __init early_cpu_detect(void) | |
209 | { | |
210 | struct cpuinfo_x86 *c = &boot_cpu_data; | |
211 | ||
212 | c->x86_cache_alignment = 32; | |
213 | ||
214 | if (!have_cpuid_p()) | |
215 | return; | |
216 | ||
217 | /* Get vendor name */ | |
218 | cpuid(0x00000000, &c->cpuid_level, | |
219 | (int *)&c->x86_vendor_id[0], | |
220 | (int *)&c->x86_vendor_id[8], | |
221 | (int *)&c->x86_vendor_id[4]); | |
222 | ||
223 | get_cpu_vendor(c, 1); | |
224 | ||
225 | c->x86 = 4; | |
226 | if (c->cpuid_level >= 0x00000001) { | |
227 | u32 junk, tfms, cap0, misc; | |
228 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | |
229 | c->x86 = (tfms >> 8) & 15; | |
230 | c->x86_model = (tfms >> 4) & 15; | |
231 | if (c->x86 == 0xf) | |
232 | c->x86 += (tfms >> 20) & 0xff; | |
233 | if (c->x86 >= 0x6) | |
234 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | |
235 | c->x86_mask = tfms & 15; | |
236 | if (cap0 & (1<<19)) | |
237 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | |
238 | } | |
239 | ||
240 | early_intel_workaround(c); | |
241 | ||
242 | #ifdef CONFIG_X86_HT | |
243 | phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; | |
244 | #endif | |
245 | } | |
246 | ||
247 | void __devinit generic_identify(struct cpuinfo_x86 * c) | |
248 | { | |
249 | u32 tfms, xlvl; | |
250 | int junk; | |
251 | ||
252 | if (have_cpuid_p()) { | |
253 | /* Get vendor name */ | |
254 | cpuid(0x00000000, &c->cpuid_level, | |
255 | (int *)&c->x86_vendor_id[0], | |
256 | (int *)&c->x86_vendor_id[8], | |
257 | (int *)&c->x86_vendor_id[4]); | |
258 | ||
259 | get_cpu_vendor(c, 0); | |
260 | /* Initialize the standard set of capabilities */ | |
261 | /* Note that the vendor-specific code below might override */ | |
262 | ||
263 | /* Intel-defined flags: level 0x00000001 */ | |
264 | if ( c->cpuid_level >= 0x00000001 ) { | |
265 | u32 capability, excap; | |
266 | cpuid(0x00000001, &tfms, &junk, &excap, &capability); | |
267 | c->x86_capability[0] = capability; | |
268 | c->x86_capability[4] = excap; | |
269 | c->x86 = (tfms >> 8) & 15; | |
270 | c->x86_model = (tfms >> 4) & 15; | |
271 | if (c->x86 == 0xf) { | |
272 | c->x86 += (tfms >> 20) & 0xff; | |
273 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | |
274 | } | |
275 | c->x86_mask = tfms & 15; | |
276 | } else { | |
277 | /* Have CPUID level 0 only - unheard of */ | |
278 | c->x86 = 4; | |
279 | } | |
280 | ||
281 | /* AMD-defined flags: level 0x80000001 */ | |
282 | xlvl = cpuid_eax(0x80000000); | |
283 | if ( (xlvl & 0xffff0000) == 0x80000000 ) { | |
284 | if ( xlvl >= 0x80000001 ) { | |
285 | c->x86_capability[1] = cpuid_edx(0x80000001); | |
286 | c->x86_capability[6] = cpuid_ecx(0x80000001); | |
287 | } | |
288 | if ( xlvl >= 0x80000004 ) | |
289 | get_model_name(c); /* Default name */ | |
290 | } | |
291 | } | |
292 | } | |
293 | ||
294 | static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |
295 | { | |
296 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | |
297 | /* Disable processor serial number */ | |
298 | unsigned long lo,hi; | |
299 | rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | |
300 | lo |= 0x200000; | |
301 | wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | |
302 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | |
303 | clear_bit(X86_FEATURE_PN, c->x86_capability); | |
304 | ||
305 | /* Disabling the serial number may affect the cpuid level */ | |
306 | c->cpuid_level = cpuid_eax(0); | |
307 | } | |
308 | } | |
309 | ||
310 | static int __init x86_serial_nr_setup(char *s) | |
311 | { | |
312 | disable_x86_serial_nr = 0; | |
313 | return 1; | |
314 | } | |
315 | __setup("serialnumber", x86_serial_nr_setup); | |
316 | ||
317 | ||
318 | ||
319 | /* | |
320 | * This does the hard work of actually picking apart the CPU stuff... | |
321 | */ | |
322 | void __devinit identify_cpu(struct cpuinfo_x86 *c) | |
323 | { | |
324 | int i; | |
325 | ||
326 | c->loops_per_jiffy = loops_per_jiffy; | |
327 | c->x86_cache_size = -1; | |
328 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
329 | c->cpuid_level = -1; /* CPUID not detected */ | |
330 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | |
331 | c->x86_vendor_id[0] = '\0'; /* Unset */ | |
332 | c->x86_model_id[0] = '\0'; /* Unset */ | |
333 | c->x86_max_cores = 1; | |
334 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | |
335 | ||
336 | if (!have_cpuid_p()) { | |
337 | /* First of all, decide if this is a 486 or higher */ | |
338 | /* It's a 486 if we can modify the AC flag */ | |
339 | if ( flag_is_changeable_p(X86_EFLAGS_AC) ) | |
340 | c->x86 = 4; | |
341 | else | |
342 | c->x86 = 3; | |
343 | } | |
344 | ||
345 | generic_identify(c); | |
346 | ||
347 | printk(KERN_DEBUG "CPU: After generic identify, caps:"); | |
348 | for (i = 0; i < NCAPINTS; i++) | |
349 | printk(" %08lx", c->x86_capability[i]); | |
350 | printk("\n"); | |
351 | ||
352 | if (this_cpu->c_identify) { | |
353 | this_cpu->c_identify(c); | |
354 | ||
355 | printk(KERN_DEBUG "CPU: After vendor identify, caps:"); | |
356 | for (i = 0; i < NCAPINTS; i++) | |
357 | printk(" %08lx", c->x86_capability[i]); | |
358 | printk("\n"); | |
359 | } | |
360 | ||
361 | /* | |
362 | * Vendor-specific initialization. In this section we | |
363 | * canonicalize the feature flags, meaning if there are | |
364 | * features a certain CPU supports which CPUID doesn't | |
365 | * tell us, CPUID claiming incorrect flags, or other bugs, | |
366 | * we handle them here. | |
367 | * | |
368 | * At the end of this section, c->x86_capability better | |
369 | * indicate the features this CPU genuinely supports! | |
370 | */ | |
371 | if (this_cpu->c_init) | |
372 | this_cpu->c_init(c); | |
373 | ||
374 | /* Disable the PN if appropriate */ | |
375 | squash_the_stupid_serial_number(c); | |
376 | ||
377 | /* | |
378 | * The vendor-specific functions might have changed features. Now | |
379 | * we do "generic changes." | |
380 | */ | |
381 | ||
382 | /* TSC disabled? */ | |
383 | if ( tsc_disable ) | |
384 | clear_bit(X86_FEATURE_TSC, c->x86_capability); | |
385 | ||
386 | /* FXSR disabled? */ | |
387 | if (disable_x86_fxsr) { | |
388 | clear_bit(X86_FEATURE_FXSR, c->x86_capability); | |
389 | clear_bit(X86_FEATURE_XMM, c->x86_capability); | |
390 | } | |
391 | ||
392 | if (disable_pse) | |
393 | clear_bit(X86_FEATURE_PSE, c->x86_capability); | |
394 | ||
395 | /* If the model name is still unset, do table lookup. */ | |
396 | if ( !c->x86_model_id[0] ) { | |
397 | char *p; | |
398 | p = table_lookup_model(c); | |
399 | if ( p ) | |
400 | strcpy(c->x86_model_id, p); | |
401 | else | |
402 | /* Last resort... */ | |
403 | sprintf(c->x86_model_id, "%02x/%02x", | |
404 | c->x86_vendor, c->x86_model); | |
405 | } | |
406 | ||
407 | /* Now the feature flags better reflect actual CPU features! */ | |
408 | ||
409 | printk(KERN_DEBUG "CPU: After all inits, caps:"); | |
410 | for (i = 0; i < NCAPINTS; i++) | |
411 | printk(" %08lx", c->x86_capability[i]); | |
412 | printk("\n"); | |
413 | ||
414 | /* | |
415 | * On SMP, boot_cpu_data holds the common feature set between | |
416 | * all CPUs; so make sure that we indicate which features are | |
417 | * common between the CPUs. The first time this routine gets | |
418 | * executed, c == &boot_cpu_data. | |
419 | */ | |
420 | if ( c != &boot_cpu_data ) { | |
421 | /* AND the already accumulated flags with these */ | |
422 | for ( i = 0 ; i < NCAPINTS ; i++ ) | |
423 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | |
424 | } | |
425 | ||
426 | /* Init Machine Check Exception if available. */ | |
427 | mcheck_init(c); | |
428 | ||
429 | if (c == &boot_cpu_data) | |
430 | sysenter_setup(); | |
431 | enable_sep_cpu(); | |
432 | ||
433 | if (c == &boot_cpu_data) | |
434 | mtrr_bp_init(); | |
435 | else | |
436 | mtrr_ap_init(); | |
437 | } | |
438 | ||
439 | #ifdef CONFIG_X86_HT | |
440 | void __devinit detect_ht(struct cpuinfo_x86 *c) | |
441 | { | |
442 | u32 eax, ebx, ecx, edx; | |
443 | int index_msb, core_bits; | |
444 | int cpu = smp_processor_id(); | |
445 | ||
446 | cpuid(1, &eax, &ebx, &ecx, &edx); | |
447 | ||
448 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); | |
449 | ||
450 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | |
451 | return; | |
452 | ||
453 | smp_num_siblings = (ebx & 0xff0000) >> 16; | |
454 | ||
455 | if (smp_num_siblings == 1) { | |
456 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | |
457 | } else if (smp_num_siblings > 1 ) { | |
458 | ||
459 | if (smp_num_siblings > NR_CPUS) { | |
460 | printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); | |
461 | smp_num_siblings = 1; | |
462 | return; | |
463 | } | |
464 | ||
465 | index_msb = get_count_order(smp_num_siblings); | |
466 | phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); | |
467 | ||
468 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | |
469 | phys_proc_id[cpu]); | |
470 | ||
471 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | |
472 | ||
473 | index_msb = get_count_order(smp_num_siblings) ; | |
474 | ||
475 | core_bits = get_count_order(c->x86_max_cores); | |
476 | ||
477 | cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & | |
478 | ((1 << core_bits) - 1); | |
479 | ||
480 | if (c->x86_max_cores > 1) | |
481 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | |
482 | cpu_core_id[cpu]); | |
483 | } | |
484 | } | |
485 | #endif | |
486 | ||
487 | void __devinit print_cpu_info(struct cpuinfo_x86 *c) | |
488 | { | |
489 | char *vendor = NULL; | |
490 | ||
491 | if (c->x86_vendor < X86_VENDOR_NUM) | |
492 | vendor = this_cpu->c_vendor; | |
493 | else if (c->cpuid_level >= 0) | |
494 | vendor = c->x86_vendor_id; | |
495 | ||
496 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | |
497 | printk("%s ", vendor); | |
498 | ||
499 | if (!c->x86_model_id[0]) | |
500 | printk("%d86", c->x86); | |
501 | else | |
502 | printk("%s", c->x86_model_id); | |
503 | ||
504 | if (c->x86_mask || c->cpuid_level >= 0) | |
505 | printk(" stepping %02x\n", c->x86_mask); | |
506 | else | |
507 | printk("\n"); | |
508 | } | |
509 | ||
510 | cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE; | |
511 | ||
512 | /* This is hacky. :) | |
513 | * We're emulating future behavior. | |
514 | * In the future, the cpu-specific init functions will be called implicitly | |
515 | * via the magic of initcalls. | |
516 | * They will insert themselves into the cpu_devs structure. | |
517 | * Then, when cpu_init() is called, we can just iterate over that array. | |
518 | */ | |
519 | ||
520 | extern int intel_cpu_init(void); | |
521 | extern int cyrix_init_cpu(void); | |
522 | extern int nsc_init_cpu(void); | |
523 | extern int amd_init_cpu(void); | |
524 | extern int centaur_init_cpu(void); | |
525 | extern int transmeta_init_cpu(void); | |
526 | extern int rise_init_cpu(void); | |
527 | extern int nexgen_init_cpu(void); | |
528 | extern int umc_init_cpu(void); | |
529 | ||
530 | void __init early_cpu_init(void) | |
531 | { | |
532 | intel_cpu_init(); | |
533 | cyrix_init_cpu(); | |
534 | nsc_init_cpu(); | |
535 | amd_init_cpu(); | |
536 | centaur_init_cpu(); | |
537 | transmeta_init_cpu(); | |
538 | rise_init_cpu(); | |
539 | nexgen_init_cpu(); | |
540 | umc_init_cpu(); | |
541 | early_cpu_detect(); | |
542 | ||
543 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
544 | /* pse is not compatible with on-the-fly unmapping, | |
545 | * disable it even if the cpus claim to support it. | |
546 | */ | |
547 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | |
548 | disable_pse = 1; | |
549 | #endif | |
550 | } | |
551 | /* | |
552 | * cpu_init() initializes state that is per-CPU. Some data is already | |
553 | * initialized (naturally) in the bootstrap process, such as the GDT | |
554 | * and IDT. We reload them nevertheless, this function acts as a | |
555 | * 'CPU state barrier', nothing should get across. | |
556 | */ | |
557 | void __devinit cpu_init(void) | |
558 | { | |
559 | int cpu = smp_processor_id(); | |
560 | struct tss_struct * t = &per_cpu(init_tss, cpu); | |
561 | struct thread_struct *thread = ¤t->thread; | |
562 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | |
563 | __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); | |
564 | ||
565 | if (cpu_test_and_set(cpu, cpu_initialized)) { | |
566 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | |
567 | for (;;) local_irq_enable(); | |
568 | } | |
569 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | |
570 | ||
571 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | |
572 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | |
573 | if (tsc_disable && cpu_has_tsc) { | |
574 | printk(KERN_NOTICE "Disabling TSC...\n"); | |
575 | /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/ | |
576 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); | |
577 | set_in_cr4(X86_CR4_TSD); | |
578 | } | |
579 | ||
580 | /* | |
581 | * Initialize the per-CPU GDT with the boot GDT, | |
582 | * and set up the GDT descriptor: | |
583 | */ | |
584 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); | |
585 | ||
586 | /* Set up GDT entry for 16bit stack */ | |
587 | *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |= | |
588 | ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | | |
589 | ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | | |
590 | (CPU_16BIT_STACK_SIZE - 1); | |
591 | ||
592 | cpu_gdt_descr[cpu].size = GDT_SIZE - 1; | |
593 | cpu_gdt_descr[cpu].address = (unsigned long)gdt; | |
594 | ||
595 | load_gdt(&cpu_gdt_descr[cpu]); | |
596 | load_idt(&idt_descr); | |
597 | ||
598 | /* | |
599 | * Set up and load the per-CPU TSS and LDT | |
600 | */ | |
601 | atomic_inc(&init_mm.mm_count); | |
602 | current->active_mm = &init_mm; | |
603 | if (current->mm) | |
604 | BUG(); | |
605 | enter_lazy_tlb(&init_mm, current); | |
606 | ||
607 | load_esp0(t, thread); | |
608 | set_tss_desc(cpu,t); | |
609 | load_TR_desc(); | |
610 | load_LDT(&init_mm.context); | |
611 | ||
612 | #ifdef CONFIG_DOUBLEFAULT | |
613 | /* Set up doublefault TSS pointer in the GDT */ | |
614 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | |
615 | #endif | |
616 | ||
617 | /* Clear %fs and %gs. */ | |
618 | asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); | |
619 | ||
620 | /* Clear all 6 debug registers: */ | |
621 | set_debugreg(0, 0); | |
622 | set_debugreg(0, 1); | |
623 | set_debugreg(0, 2); | |
624 | set_debugreg(0, 3); | |
625 | set_debugreg(0, 6); | |
626 | set_debugreg(0, 7); | |
627 | ||
628 | /* | |
629 | * Force FPU initialization: | |
630 | */ | |
631 | current_thread_info()->status = 0; | |
632 | clear_used_math(); | |
633 | mxcsr_feature_mask_init(); | |
634 | } | |
635 | ||
636 | #ifdef CONFIG_HOTPLUG_CPU | |
637 | void __devinit cpu_uninit(void) | |
638 | { | |
639 | int cpu = raw_smp_processor_id(); | |
640 | cpu_clear(cpu, cpu_initialized); | |
641 | ||
642 | /* lazy TLB state */ | |
643 | per_cpu(cpu_tlbstate, cpu).state = 0; | |
644 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | |
645 | } | |
646 | #endif |