]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/init.h> |
2 | #include <linux/string.h> | |
3 | #include <linux/delay.h> | |
4 | #include <linux/smp.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/percpu.h> | |
7 | #include <asm/semaphore.h> | |
8 | #include <asm/processor.h> | |
9 | #include <asm/i387.h> | |
10 | #include <asm/msr.h> | |
11 | #include <asm/io.h> | |
12 | #include <asm/mmu_context.h> | |
13 | #ifdef CONFIG_X86_LOCAL_APIC | |
14 | #include <asm/mpspec.h> | |
15 | #include <asm/apic.h> | |
16 | #include <mach_apic.h> | |
17 | #endif | |
18 | ||
19 | #include "cpu.h" | |
20 | ||
21 | DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); | |
22 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_table); | |
23 | ||
24 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | |
25 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); | |
26 | ||
27 | static int cachesize_override __initdata = -1; | |
28 | static int disable_x86_fxsr __initdata = 0; | |
29 | static int disable_x86_serial_nr __initdata = 1; | |
30 | ||
31 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | |
32 | ||
33 | extern void mcheck_init(struct cpuinfo_x86 *c); | |
34 | ||
35 | extern int disable_pse; | |
36 | ||
37 | static void default_init(struct cpuinfo_x86 * c) | |
38 | { | |
39 | /* Not much we can do here... */ | |
40 | /* Check if at least it has cpuid */ | |
41 | if (c->cpuid_level == -1) { | |
42 | /* No cpuid. It must be an ancient CPU */ | |
43 | if (c->x86 == 4) | |
44 | strcpy(c->x86_model_id, "486"); | |
45 | else if (c->x86 == 3) | |
46 | strcpy(c->x86_model_id, "386"); | |
47 | } | |
48 | } | |
49 | ||
50 | static struct cpu_dev default_cpu = { | |
51 | .c_init = default_init, | |
52 | }; | |
53 | static struct cpu_dev * this_cpu = &default_cpu; | |
54 | ||
55 | static int __init cachesize_setup(char *str) | |
56 | { | |
57 | get_option (&str, &cachesize_override); | |
58 | return 1; | |
59 | } | |
60 | __setup("cachesize=", cachesize_setup); | |
61 | ||
62 | int __init get_model_name(struct cpuinfo_x86 *c) | |
63 | { | |
64 | unsigned int *v; | |
65 | char *p, *q; | |
66 | ||
67 | if (cpuid_eax(0x80000000) < 0x80000004) | |
68 | return 0; | |
69 | ||
70 | v = (unsigned int *) c->x86_model_id; | |
71 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | |
72 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | |
73 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | |
74 | c->x86_model_id[48] = 0; | |
75 | ||
76 | /* Intel chips right-justify this string for some dumb reason; | |
77 | undo that brain damage */ | |
78 | p = q = &c->x86_model_id[0]; | |
79 | while ( *p == ' ' ) | |
80 | p++; | |
81 | if ( p != q ) { | |
82 | while ( *p ) | |
83 | *q++ = *p++; | |
84 | while ( q <= &c->x86_model_id[48] ) | |
85 | *q++ = '\0'; /* Zero-pad the rest */ | |
86 | } | |
87 | ||
88 | return 1; | |
89 | } | |
90 | ||
91 | ||
92 | void __init display_cacheinfo(struct cpuinfo_x86 *c) | |
93 | { | |
94 | unsigned int n, dummy, ecx, edx, l2size; | |
95 | ||
96 | n = cpuid_eax(0x80000000); | |
97 | ||
98 | if (n >= 0x80000005) { | |
99 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | |
100 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | |
101 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | |
102 | c->x86_cache_size=(ecx>>24)+(edx>>24); | |
103 | } | |
104 | ||
105 | if (n < 0x80000006) /* Some chips just has a large L1. */ | |
106 | return; | |
107 | ||
108 | ecx = cpuid_ecx(0x80000006); | |
109 | l2size = ecx >> 16; | |
110 | ||
111 | /* do processor-specific cache resizing */ | |
112 | if (this_cpu->c_size_cache) | |
113 | l2size = this_cpu->c_size_cache(c,l2size); | |
114 | ||
115 | /* Allow user to override all this if necessary. */ | |
116 | if (cachesize_override != -1) | |
117 | l2size = cachesize_override; | |
118 | ||
119 | if ( l2size == 0 ) | |
120 | return; /* Again, no L2 cache is possible */ | |
121 | ||
122 | c->x86_cache_size = l2size; | |
123 | ||
124 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | |
125 | l2size, ecx & 0xFF); | |
126 | } | |
127 | ||
128 | /* Naming convention should be: <Name> [(<Codename>)] */ | |
129 | /* This table only is used unless init_<vendor>() below doesn't set it; */ | |
130 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | |
131 | ||
132 | /* Look up CPU names by table lookup. */ | |
133 | static char __init *table_lookup_model(struct cpuinfo_x86 *c) | |
134 | { | |
135 | struct cpu_model_info *info; | |
136 | ||
137 | if ( c->x86_model >= 16 ) | |
138 | return NULL; /* Range check */ | |
139 | ||
140 | if (!this_cpu) | |
141 | return NULL; | |
142 | ||
143 | info = this_cpu->c_models; | |
144 | ||
145 | while (info && info->family) { | |
146 | if (info->family == c->x86) | |
147 | return info->model_names[c->x86_model]; | |
148 | info++; | |
149 | } | |
150 | return NULL; /* Not found */ | |
151 | } | |
152 | ||
153 | ||
154 | void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |
155 | { | |
156 | char *v = c->x86_vendor_id; | |
157 | int i; | |
158 | ||
159 | for (i = 0; i < X86_VENDOR_NUM; i++) { | |
160 | if (cpu_devs[i]) { | |
161 | if (!strcmp(v,cpu_devs[i]->c_ident[0]) || | |
162 | (cpu_devs[i]->c_ident[1] && | |
163 | !strcmp(v,cpu_devs[i]->c_ident[1]))) { | |
164 | c->x86_vendor = i; | |
165 | if (!early) | |
166 | this_cpu = cpu_devs[i]; | |
167 | break; | |
168 | } | |
169 | } | |
170 | } | |
171 | } | |
172 | ||
173 | ||
174 | static int __init x86_fxsr_setup(char * s) | |
175 | { | |
176 | disable_x86_fxsr = 1; | |
177 | return 1; | |
178 | } | |
179 | __setup("nofxsr", x86_fxsr_setup); | |
180 | ||
181 | ||
182 | /* Standard macro to see if a specific flag is changeable */ | |
183 | static inline int flag_is_changeable_p(u32 flag) | |
184 | { | |
185 | u32 f1, f2; | |
186 | ||
187 | asm("pushfl\n\t" | |
188 | "pushfl\n\t" | |
189 | "popl %0\n\t" | |
190 | "movl %0,%1\n\t" | |
191 | "xorl %2,%0\n\t" | |
192 | "pushl %0\n\t" | |
193 | "popfl\n\t" | |
194 | "pushfl\n\t" | |
195 | "popl %0\n\t" | |
196 | "popfl\n\t" | |
197 | : "=&r" (f1), "=&r" (f2) | |
198 | : "ir" (flag)); | |
199 | ||
200 | return ((f1^f2) & flag) != 0; | |
201 | } | |
202 | ||
203 | ||
204 | /* Probe for the CPUID instruction */ | |
205 | static int __init have_cpuid_p(void) | |
206 | { | |
207 | return flag_is_changeable_p(X86_EFLAGS_ID); | |
208 | } | |
209 | ||
210 | /* Do minimum CPU detection early. | |
211 | Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. | |
212 | The others are not touched to avoid unwanted side effects. */ | |
213 | static void __init early_cpu_detect(void) | |
214 | { | |
215 | struct cpuinfo_x86 *c = &boot_cpu_data; | |
216 | ||
217 | c->x86_cache_alignment = 32; | |
218 | ||
219 | if (!have_cpuid_p()) | |
220 | return; | |
221 | ||
222 | /* Get vendor name */ | |
223 | cpuid(0x00000000, &c->cpuid_level, | |
224 | (int *)&c->x86_vendor_id[0], | |
225 | (int *)&c->x86_vendor_id[8], | |
226 | (int *)&c->x86_vendor_id[4]); | |
227 | ||
228 | get_cpu_vendor(c, 1); | |
229 | ||
230 | c->x86 = 4; | |
231 | if (c->cpuid_level >= 0x00000001) { | |
232 | u32 junk, tfms, cap0, misc; | |
233 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | |
234 | c->x86 = (tfms >> 8) & 15; | |
235 | c->x86_model = (tfms >> 4) & 15; | |
236 | if (c->x86 == 0xf) { | |
237 | c->x86 += (tfms >> 20) & 0xff; | |
238 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | |
239 | } | |
240 | c->x86_mask = tfms & 15; | |
241 | if (cap0 & (1<<19)) | |
242 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | |
243 | } | |
244 | ||
245 | early_intel_workaround(c); | |
246 | } | |
247 | ||
248 | void __init generic_identify(struct cpuinfo_x86 * c) | |
249 | { | |
250 | u32 tfms, xlvl; | |
251 | int junk; | |
252 | ||
253 | if (have_cpuid_p()) { | |
254 | /* Get vendor name */ | |
255 | cpuid(0x00000000, &c->cpuid_level, | |
256 | (int *)&c->x86_vendor_id[0], | |
257 | (int *)&c->x86_vendor_id[8], | |
258 | (int *)&c->x86_vendor_id[4]); | |
259 | ||
260 | get_cpu_vendor(c, 0); | |
261 | /* Initialize the standard set of capabilities */ | |
262 | /* Note that the vendor-specific code below might override */ | |
263 | ||
264 | /* Intel-defined flags: level 0x00000001 */ | |
265 | if ( c->cpuid_level >= 0x00000001 ) { | |
266 | u32 capability, excap; | |
267 | cpuid(0x00000001, &tfms, &junk, &excap, &capability); | |
268 | c->x86_capability[0] = capability; | |
269 | c->x86_capability[4] = excap; | |
270 | c->x86 = (tfms >> 8) & 15; | |
271 | c->x86_model = (tfms >> 4) & 15; | |
272 | if (c->x86 == 0xf) { | |
273 | c->x86 += (tfms >> 20) & 0xff; | |
274 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | |
275 | } | |
276 | c->x86_mask = tfms & 15; | |
277 | } else { | |
278 | /* Have CPUID level 0 only - unheard of */ | |
279 | c->x86 = 4; | |
280 | } | |
281 | ||
282 | /* AMD-defined flags: level 0x80000001 */ | |
283 | xlvl = cpuid_eax(0x80000000); | |
284 | if ( (xlvl & 0xffff0000) == 0x80000000 ) { | |
285 | if ( xlvl >= 0x80000001 ) { | |
286 | c->x86_capability[1] = cpuid_edx(0x80000001); | |
287 | c->x86_capability[6] = cpuid_ecx(0x80000001); | |
288 | } | |
289 | if ( xlvl >= 0x80000004 ) | |
290 | get_model_name(c); /* Default name */ | |
291 | } | |
292 | } | |
293 | } | |
294 | ||
295 | static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |
296 | { | |
297 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | |
298 | /* Disable processor serial number */ | |
299 | unsigned long lo,hi; | |
300 | rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | |
301 | lo |= 0x200000; | |
302 | wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | |
303 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | |
304 | clear_bit(X86_FEATURE_PN, c->x86_capability); | |
305 | ||
306 | /* Disabling the serial number may affect the cpuid level */ | |
307 | c->cpuid_level = cpuid_eax(0); | |
308 | } | |
309 | } | |
310 | ||
311 | static int __init x86_serial_nr_setup(char *s) | |
312 | { | |
313 | disable_x86_serial_nr = 0; | |
314 | return 1; | |
315 | } | |
316 | __setup("serialnumber", x86_serial_nr_setup); | |
317 | ||
318 | ||
319 | ||
320 | /* | |
321 | * This does the hard work of actually picking apart the CPU stuff... | |
322 | */ | |
323 | void __init identify_cpu(struct cpuinfo_x86 *c) | |
324 | { | |
325 | int i; | |
326 | ||
327 | c->loops_per_jiffy = loops_per_jiffy; | |
328 | c->x86_cache_size = -1; | |
329 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
330 | c->cpuid_level = -1; /* CPUID not detected */ | |
331 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | |
332 | c->x86_vendor_id[0] = '\0'; /* Unset */ | |
333 | c->x86_model_id[0] = '\0'; /* Unset */ | |
334 | c->x86_num_cores = 1; | |
335 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | |
336 | ||
337 | if (!have_cpuid_p()) { | |
338 | /* First of all, decide if this is a 486 or higher */ | |
339 | /* It's a 486 if we can modify the AC flag */ | |
340 | if ( flag_is_changeable_p(X86_EFLAGS_AC) ) | |
341 | c->x86 = 4; | |
342 | else | |
343 | c->x86 = 3; | |
344 | } | |
345 | ||
346 | generic_identify(c); | |
347 | ||
348 | printk(KERN_DEBUG "CPU: After generic identify, caps:"); | |
349 | for (i = 0; i < NCAPINTS; i++) | |
350 | printk(" %08lx", c->x86_capability[i]); | |
351 | printk("\n"); | |
352 | ||
353 | if (this_cpu->c_identify) { | |
354 | this_cpu->c_identify(c); | |
355 | ||
356 | printk(KERN_DEBUG "CPU: After vendor identify, caps:"); | |
357 | for (i = 0; i < NCAPINTS; i++) | |
358 | printk(" %08lx", c->x86_capability[i]); | |
359 | printk("\n"); | |
360 | } | |
361 | ||
362 | /* | |
363 | * Vendor-specific initialization. In this section we | |
364 | * canonicalize the feature flags, meaning if there are | |
365 | * features a certain CPU supports which CPUID doesn't | |
366 | * tell us, CPUID claiming incorrect flags, or other bugs, | |
367 | * we handle them here. | |
368 | * | |
369 | * At the end of this section, c->x86_capability better | |
370 | * indicate the features this CPU genuinely supports! | |
371 | */ | |
372 | if (this_cpu->c_init) | |
373 | this_cpu->c_init(c); | |
374 | ||
375 | /* Disable the PN if appropriate */ | |
376 | squash_the_stupid_serial_number(c); | |
377 | ||
378 | /* | |
379 | * The vendor-specific functions might have changed features. Now | |
380 | * we do "generic changes." | |
381 | */ | |
382 | ||
383 | /* TSC disabled? */ | |
384 | if ( tsc_disable ) | |
385 | clear_bit(X86_FEATURE_TSC, c->x86_capability); | |
386 | ||
387 | /* FXSR disabled? */ | |
388 | if (disable_x86_fxsr) { | |
389 | clear_bit(X86_FEATURE_FXSR, c->x86_capability); | |
390 | clear_bit(X86_FEATURE_XMM, c->x86_capability); | |
391 | } | |
392 | ||
393 | if (disable_pse) | |
394 | clear_bit(X86_FEATURE_PSE, c->x86_capability); | |
395 | ||
396 | /* If the model name is still unset, do table lookup. */ | |
397 | if ( !c->x86_model_id[0] ) { | |
398 | char *p; | |
399 | p = table_lookup_model(c); | |
400 | if ( p ) | |
401 | strcpy(c->x86_model_id, p); | |
402 | else | |
403 | /* Last resort... */ | |
404 | sprintf(c->x86_model_id, "%02x/%02x", | |
405 | c->x86_vendor, c->x86_model); | |
406 | } | |
407 | ||
408 | /* Now the feature flags better reflect actual CPU features! */ | |
409 | ||
410 | printk(KERN_DEBUG "CPU: After all inits, caps:"); | |
411 | for (i = 0; i < NCAPINTS; i++) | |
412 | printk(" %08lx", c->x86_capability[i]); | |
413 | printk("\n"); | |
414 | ||
415 | /* | |
416 | * On SMP, boot_cpu_data holds the common feature set between | |
417 | * all CPUs; so make sure that we indicate which features are | |
418 | * common between the CPUs. The first time this routine gets | |
419 | * executed, c == &boot_cpu_data. | |
420 | */ | |
421 | if ( c != &boot_cpu_data ) { | |
422 | /* AND the already accumulated flags with these */ | |
423 | for ( i = 0 ; i < NCAPINTS ; i++ ) | |
424 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | |
425 | } | |
426 | ||
427 | /* Init Machine Check Exception if available. */ | |
428 | #ifdef CONFIG_X86_MCE | |
429 | mcheck_init(c); | |
430 | #endif | |
431 | } | |
432 | ||
433 | #ifdef CONFIG_X86_HT | |
434 | void __init detect_ht(struct cpuinfo_x86 *c) | |
435 | { | |
436 | u32 eax, ebx, ecx, edx; | |
3dd9d514 | 437 | int index_msb, tmp; |
1da177e4 LT |
438 | int cpu = smp_processor_id(); |
439 | ||
63518644 | 440 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
1da177e4 LT |
441 | return; |
442 | ||
443 | cpuid(1, &eax, &ebx, &ecx, &edx); | |
444 | smp_num_siblings = (ebx & 0xff0000) >> 16; | |
445 | ||
446 | if (smp_num_siblings == 1) { | |
447 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | |
448 | } else if (smp_num_siblings > 1 ) { | |
1da177e4 LT |
449 | index_msb = 31; |
450 | ||
451 | if (smp_num_siblings > NR_CPUS) { | |
452 | printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); | |
453 | smp_num_siblings = 1; | |
454 | return; | |
455 | } | |
456 | tmp = smp_num_siblings; | |
1da177e4 LT |
457 | while ((tmp & 0x80000000 ) == 0) { |
458 | tmp <<=1 ; | |
459 | index_msb--; | |
460 | } | |
3dd9d514 | 461 | if (smp_num_siblings & (smp_num_siblings - 1)) |
1da177e4 LT |
462 | index_msb++; |
463 | phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); | |
464 | ||
465 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | |
466 | phys_proc_id[cpu]); | |
3dd9d514 AK |
467 | |
468 | smp_num_siblings = smp_num_siblings / c->x86_num_cores; | |
469 | ||
470 | tmp = smp_num_siblings; | |
471 | index_msb = 31; | |
472 | while ((tmp & 0x80000000) == 0) { | |
473 | tmp <<=1 ; | |
474 | index_msb--; | |
475 | } | |
476 | ||
477 | if (smp_num_siblings & (smp_num_siblings - 1)) | |
478 | index_msb++; | |
479 | ||
480 | cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); | |
481 | ||
482 | if (c->x86_num_cores > 1) | |
483 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | |
484 | cpu_core_id[cpu]); | |
1da177e4 LT |
485 | } |
486 | } | |
487 | #endif | |
488 | ||
489 | void __init print_cpu_info(struct cpuinfo_x86 *c) | |
490 | { | |
491 | char *vendor = NULL; | |
492 | ||
493 | if (c->x86_vendor < X86_VENDOR_NUM) | |
494 | vendor = this_cpu->c_vendor; | |
495 | else if (c->cpuid_level >= 0) | |
496 | vendor = c->x86_vendor_id; | |
497 | ||
498 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | |
499 | printk("%s ", vendor); | |
500 | ||
501 | if (!c->x86_model_id[0]) | |
502 | printk("%d86", c->x86); | |
503 | else | |
504 | printk("%s", c->x86_model_id); | |
505 | ||
506 | if (c->x86_mask || c->cpuid_level >= 0) | |
507 | printk(" stepping %02x\n", c->x86_mask); | |
508 | else | |
509 | printk("\n"); | |
510 | } | |
511 | ||
512 | cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; | |
513 | ||
514 | /* This is hacky. :) | |
515 | * We're emulating future behavior. | |
516 | * In the future, the cpu-specific init functions will be called implicitly | |
517 | * via the magic of initcalls. | |
518 | * They will insert themselves into the cpu_devs structure. | |
519 | * Then, when cpu_init() is called, we can just iterate over that array. | |
520 | */ | |
521 | ||
522 | extern int intel_cpu_init(void); | |
523 | extern int cyrix_init_cpu(void); | |
524 | extern int nsc_init_cpu(void); | |
525 | extern int amd_init_cpu(void); | |
526 | extern int centaur_init_cpu(void); | |
527 | extern int transmeta_init_cpu(void); | |
528 | extern int rise_init_cpu(void); | |
529 | extern int nexgen_init_cpu(void); | |
530 | extern int umc_init_cpu(void); | |
531 | ||
532 | void __init early_cpu_init(void) | |
533 | { | |
534 | intel_cpu_init(); | |
535 | cyrix_init_cpu(); | |
536 | nsc_init_cpu(); | |
537 | amd_init_cpu(); | |
538 | centaur_init_cpu(); | |
539 | transmeta_init_cpu(); | |
540 | rise_init_cpu(); | |
541 | nexgen_init_cpu(); | |
542 | umc_init_cpu(); | |
543 | early_cpu_detect(); | |
544 | ||
545 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
546 | /* pse is not compatible with on-the-fly unmapping, | |
547 | * disable it even if the cpus claim to support it. | |
548 | */ | |
549 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | |
550 | disable_pse = 1; | |
551 | #endif | |
552 | } | |
553 | /* | |
554 | * cpu_init() initializes state that is per-CPU. Some data is already | |
555 | * initialized (naturally) in the bootstrap process, such as the GDT | |
556 | * and IDT. We reload them nevertheless, this function acts as a | |
557 | * 'CPU state barrier', nothing should get across. | |
558 | */ | |
559 | void __init cpu_init (void) | |
560 | { | |
561 | int cpu = smp_processor_id(); | |
562 | struct tss_struct * t = &per_cpu(init_tss, cpu); | |
563 | struct thread_struct *thread = ¤t->thread; | |
564 | __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); | |
565 | ||
566 | if (cpu_test_and_set(cpu, cpu_initialized)) { | |
567 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | |
568 | for (;;) local_irq_enable(); | |
569 | } | |
570 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | |
571 | ||
572 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | |
573 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | |
574 | if (tsc_disable && cpu_has_tsc) { | |
575 | printk(KERN_NOTICE "Disabling TSC...\n"); | |
576 | /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/ | |
577 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); | |
578 | set_in_cr4(X86_CR4_TSD); | |
579 | } | |
580 | ||
581 | /* | |
582 | * Initialize the per-CPU GDT with the boot GDT, | |
583 | * and set up the GDT descriptor: | |
584 | */ | |
585 | memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table, | |
586 | GDT_SIZE); | |
587 | ||
588 | /* Set up GDT entry for 16bit stack */ | |
589 | *(__u64 *)&(per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_ESPFIX_SS]) |= | |
590 | ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | | |
591 | ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | | |
592 | (CPU_16BIT_STACK_SIZE - 1); | |
593 | ||
594 | cpu_gdt_descr[cpu].size = GDT_SIZE - 1; | |
595 | cpu_gdt_descr[cpu].address = | |
596 | (unsigned long)&per_cpu(cpu_gdt_table, cpu); | |
597 | ||
598 | /* | |
599 | * Set up the per-thread TLS descriptor cache: | |
600 | */ | |
601 | memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu), | |
602 | GDT_ENTRY_TLS_ENTRIES * 8); | |
603 | ||
604 | __asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu])); | |
605 | __asm__ __volatile__("lidt %0" : : "m" (idt_descr)); | |
606 | ||
607 | /* | |
608 | * Delete NT | |
609 | */ | |
610 | __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl"); | |
611 | ||
612 | /* | |
613 | * Set up and load the per-CPU TSS and LDT | |
614 | */ | |
615 | atomic_inc(&init_mm.mm_count); | |
616 | current->active_mm = &init_mm; | |
617 | if (current->mm) | |
618 | BUG(); | |
619 | enter_lazy_tlb(&init_mm, current); | |
620 | ||
621 | load_esp0(t, thread); | |
622 | set_tss_desc(cpu,t); | |
623 | load_TR_desc(); | |
624 | load_LDT(&init_mm.context); | |
625 | ||
626 | /* Set up doublefault TSS pointer in the GDT */ | |
627 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | |
628 | ||
629 | /* Clear %fs and %gs. */ | |
630 | asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); | |
631 | ||
632 | /* Clear all 6 debug registers: */ | |
633 | ||
634 | #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) ); | |
635 | ||
636 | CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7); | |
637 | ||
638 | #undef CD | |
639 | ||
640 | /* | |
641 | * Force FPU initialization: | |
642 | */ | |
643 | current_thread_info()->status = 0; | |
644 | clear_used_math(); | |
645 | mxcsr_feature_mask_init(); | |
646 | } |