]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/init.h> | |
2 | #include <linux/string.h> | |
3 | #include <linux/delay.h> | |
4 | #include <linux/smp.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/percpu.h> | |
7 | #include <linux/bootmem.h> | |
8 | #include <asm/processor.h> | |
9 | #include <asm/i387.h> | |
10 | #include <asm/msr.h> | |
11 | #include <asm/io.h> | |
12 | #include <asm/mmu_context.h> | |
13 | #include <asm/mtrr.h> | |
14 | #include <asm/mce.h> | |
15 | #include <asm/pat.h> | |
16 | #ifdef CONFIG_X86_LOCAL_APIC | |
17 | #include <asm/mpspec.h> | |
18 | #include <asm/apic.h> | |
19 | #include <mach_apic.h> | |
20 | #endif | |
21 | ||
22 | #include "cpu.h" | |
23 | ||
24 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | |
25 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | |
26 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | |
27 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | |
28 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, | |
29 | /* | |
30 | * Segments used for calling PnP BIOS have byte granularity. | |
31 | * They code segments and data segments have fixed 64k limits, | |
32 | * the transfer segment sizes are set at run time. | |
33 | */ | |
34 | /* 32-bit code */ | |
35 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, | |
36 | /* 16-bit code */ | |
37 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, | |
38 | /* 16-bit data */ | |
39 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, | |
40 | /* 16-bit data */ | |
41 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, | |
42 | /* 16-bit data */ | |
43 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, | |
44 | /* | |
45 | * The APM segments have byte granularity and their bases | |
46 | * are set at run time. All have 64k limits. | |
47 | */ | |
48 | /* 32-bit code */ | |
49 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, | |
50 | /* 16-bit code */ | |
51 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, | |
52 | /* data */ | |
53 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | |
54 | ||
55 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, | |
56 | [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, | |
57 | } }; | |
58 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | |
59 | ||
60 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | |
61 | ||
62 | static int cachesize_override __cpuinitdata = -1; | |
63 | static int disable_x86_serial_nr __cpuinitdata = 1; | |
64 | ||
65 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | |
66 | ||
67 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | |
68 | { | |
69 | /* Not much we can do here... */ | |
70 | /* Check if at least it has cpuid */ | |
71 | if (c->cpuid_level == -1) { | |
72 | /* No cpuid. It must be an ancient CPU */ | |
73 | if (c->x86 == 4) | |
74 | strcpy(c->x86_model_id, "486"); | |
75 | else if (c->x86 == 3) | |
76 | strcpy(c->x86_model_id, "386"); | |
77 | } | |
78 | } | |
79 | ||
80 | static struct cpu_dev __cpuinitdata default_cpu = { | |
81 | .c_init = default_init, | |
82 | .c_vendor = "Unknown", | |
83 | }; | |
84 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | |
85 | ||
86 | static int __init cachesize_setup(char *str) | |
87 | { | |
88 | get_option(&str, &cachesize_override); | |
89 | return 1; | |
90 | } | |
91 | __setup("cachesize=", cachesize_setup); | |
92 | ||
93 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |
94 | { | |
95 | unsigned int *v; | |
96 | char *p, *q; | |
97 | ||
98 | if (cpuid_eax(0x80000000) < 0x80000004) | |
99 | return 0; | |
100 | ||
101 | v = (unsigned int *) c->x86_model_id; | |
102 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | |
103 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | |
104 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | |
105 | c->x86_model_id[48] = 0; | |
106 | ||
107 | /* Intel chips right-justify this string for some dumb reason; | |
108 | undo that brain damage */ | |
109 | p = q = &c->x86_model_id[0]; | |
110 | while (*p == ' ') | |
111 | p++; | |
112 | if (p != q) { | |
113 | while (*p) | |
114 | *q++ = *p++; | |
115 | while (q <= &c->x86_model_id[48]) | |
116 | *q++ = '\0'; /* Zero-pad the rest */ | |
117 | } | |
118 | ||
119 | return 1; | |
120 | } | |
121 | ||
122 | ||
123 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |
124 | { | |
125 | unsigned int n, dummy, ecx, edx, l2size; | |
126 | ||
127 | n = cpuid_eax(0x80000000); | |
128 | ||
129 | if (n >= 0x80000005) { | |
130 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | |
131 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | |
132 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | |
133 | c->x86_cache_size = (ecx>>24)+(edx>>24); | |
134 | } | |
135 | ||
136 | if (n < 0x80000006) /* Some chips just has a large L1. */ | |
137 | return; | |
138 | ||
139 | ecx = cpuid_ecx(0x80000006); | |
140 | l2size = ecx >> 16; | |
141 | ||
142 | /* do processor-specific cache resizing */ | |
143 | if (this_cpu->c_size_cache) | |
144 | l2size = this_cpu->c_size_cache(c, l2size); | |
145 | ||
146 | /* Allow user to override all this if necessary. */ | |
147 | if (cachesize_override != -1) | |
148 | l2size = cachesize_override; | |
149 | ||
150 | if (l2size == 0) | |
151 | return; /* Again, no L2 cache is possible */ | |
152 | ||
153 | c->x86_cache_size = l2size; | |
154 | ||
155 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | |
156 | l2size, ecx & 0xFF); | |
157 | } | |
158 | ||
159 | /* | |
160 | * Naming convention should be: <Name> [(<Codename>)] | |
161 | * This table only is used unless init_<vendor>() below doesn't set it; | |
162 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | |
163 | * | |
164 | */ | |
165 | ||
166 | /* Look up CPU names by table lookup. */ | |
167 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | |
168 | { | |
169 | struct cpu_model_info *info; | |
170 | ||
171 | if (c->x86_model >= 16) | |
172 | return NULL; /* Range check */ | |
173 | ||
174 | if (!this_cpu) | |
175 | return NULL; | |
176 | ||
177 | info = this_cpu->c_models; | |
178 | ||
179 | while (info && info->family) { | |
180 | if (info->family == c->x86) | |
181 | return info->model_names[c->x86_model]; | |
182 | info++; | |
183 | } | |
184 | return NULL; /* Not found */ | |
185 | } | |
186 | ||
187 | ||
188 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |
189 | { | |
190 | char *v = c->x86_vendor_id; | |
191 | int i; | |
192 | static int printed; | |
193 | ||
194 | for (i = 0; i < X86_VENDOR_NUM; i++) { | |
195 | if (cpu_devs[i]) { | |
196 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | |
197 | (cpu_devs[i]->c_ident[1] && | |
198 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | |
199 | c->x86_vendor = i; | |
200 | if (!early) | |
201 | this_cpu = cpu_devs[i]; | |
202 | return; | |
203 | } | |
204 | } | |
205 | } | |
206 | if (!printed) { | |
207 | printed++; | |
208 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | |
209 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | |
210 | } | |
211 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
212 | this_cpu = &default_cpu; | |
213 | } | |
214 | ||
215 | ||
216 | static int __init x86_fxsr_setup(char *s) | |
217 | { | |
218 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | |
219 | setup_clear_cpu_cap(X86_FEATURE_XMM); | |
220 | return 1; | |
221 | } | |
222 | __setup("nofxsr", x86_fxsr_setup); | |
223 | ||
224 | ||
225 | static int __init x86_sep_setup(char *s) | |
226 | { | |
227 | setup_clear_cpu_cap(X86_FEATURE_SEP); | |
228 | return 1; | |
229 | } | |
230 | __setup("nosep", x86_sep_setup); | |
231 | ||
232 | ||
233 | /* Standard macro to see if a specific flag is changeable */ | |
234 | static inline int flag_is_changeable_p(u32 flag) | |
235 | { | |
236 | u32 f1, f2; | |
237 | ||
238 | asm("pushfl\n\t" | |
239 | "pushfl\n\t" | |
240 | "popl %0\n\t" | |
241 | "movl %0,%1\n\t" | |
242 | "xorl %2,%0\n\t" | |
243 | "pushl %0\n\t" | |
244 | "popfl\n\t" | |
245 | "pushfl\n\t" | |
246 | "popl %0\n\t" | |
247 | "popfl\n\t" | |
248 | : "=&r" (f1), "=&r" (f2) | |
249 | : "ir" (flag)); | |
250 | ||
251 | return ((f1^f2) & flag) != 0; | |
252 | } | |
253 | ||
254 | ||
255 | /* Probe for the CPUID instruction */ | |
256 | static int __cpuinit have_cpuid_p(void) | |
257 | { | |
258 | return flag_is_changeable_p(X86_EFLAGS_ID); | |
259 | } | |
260 | ||
261 | void __init cpu_detect(struct cpuinfo_x86 *c) | |
262 | { | |
263 | /* Get vendor name */ | |
264 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | |
265 | (unsigned int *)&c->x86_vendor_id[0], | |
266 | (unsigned int *)&c->x86_vendor_id[8], | |
267 | (unsigned int *)&c->x86_vendor_id[4]); | |
268 | ||
269 | c->x86 = 4; | |
270 | if (c->cpuid_level >= 0x00000001) { | |
271 | u32 junk, tfms, cap0, misc; | |
272 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | |
273 | c->x86 = (tfms >> 8) & 15; | |
274 | c->x86_model = (tfms >> 4) & 15; | |
275 | if (c->x86 == 0xf) | |
276 | c->x86 += (tfms >> 20) & 0xff; | |
277 | if (c->x86 >= 0x6) | |
278 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | |
279 | c->x86_mask = tfms & 15; | |
280 | if (cap0 & (1<<19)) { | |
281 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | |
282 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | |
283 | } | |
284 | } | |
285 | } | |
286 | static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | |
287 | { | |
288 | u32 tfms, xlvl; | |
289 | unsigned int ebx; | |
290 | ||
291 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | |
292 | if (have_cpuid_p()) { | |
293 | /* Intel-defined flags: level 0x00000001 */ | |
294 | if (c->cpuid_level >= 0x00000001) { | |
295 | u32 capability, excap; | |
296 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | |
297 | c->x86_capability[0] = capability; | |
298 | c->x86_capability[4] = excap; | |
299 | } | |
300 | ||
301 | /* AMD-defined flags: level 0x80000001 */ | |
302 | xlvl = cpuid_eax(0x80000000); | |
303 | if ((xlvl & 0xffff0000) == 0x80000000) { | |
304 | if (xlvl >= 0x80000001) { | |
305 | c->x86_capability[1] = cpuid_edx(0x80000001); | |
306 | c->x86_capability[6] = cpuid_ecx(0x80000001); | |
307 | } | |
308 | } | |
309 | ||
310 | } | |
311 | ||
312 | } | |
313 | ||
314 | /* | |
315 | * Do minimum CPU detection early. | |
316 | * Fields really needed: vendor, cpuid_level, family, model, mask, | |
317 | * cache alignment. | |
318 | * The others are not touched to avoid unwanted side effects. | |
319 | * | |
320 | * WARNING: this function is only called on the BP. Don't add code here | |
321 | * that is supposed to run on all CPUs. | |
322 | */ | |
323 | static void __init early_cpu_detect(void) | |
324 | { | |
325 | struct cpuinfo_x86 *c = &boot_cpu_data; | |
326 | ||
327 | c->x86_cache_alignment = 32; | |
328 | c->x86_clflush_size = 32; | |
329 | ||
330 | if (!have_cpuid_p()) | |
331 | return; | |
332 | ||
333 | cpu_detect(c); | |
334 | ||
335 | get_cpu_vendor(c, 1); | |
336 | ||
337 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | |
338 | cpu_devs[c->x86_vendor]->c_early_init) | |
339 | cpu_devs[c->x86_vendor]->c_early_init(c); | |
340 | ||
341 | early_get_cap(c); | |
342 | } | |
343 | ||
344 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |
345 | { | |
346 | u32 tfms, xlvl; | |
347 | unsigned int ebx; | |
348 | ||
349 | if (have_cpuid_p()) { | |
350 | /* Get vendor name */ | |
351 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | |
352 | (unsigned int *)&c->x86_vendor_id[0], | |
353 | (unsigned int *)&c->x86_vendor_id[8], | |
354 | (unsigned int *)&c->x86_vendor_id[4]); | |
355 | ||
356 | get_cpu_vendor(c, 0); | |
357 | /* Initialize the standard set of capabilities */ | |
358 | /* Note that the vendor-specific code below might override */ | |
359 | /* Intel-defined flags: level 0x00000001 */ | |
360 | if (c->cpuid_level >= 0x00000001) { | |
361 | u32 capability, excap; | |
362 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | |
363 | c->x86_capability[0] = capability; | |
364 | c->x86_capability[4] = excap; | |
365 | c->x86 = (tfms >> 8) & 15; | |
366 | c->x86_model = (tfms >> 4) & 15; | |
367 | if (c->x86 == 0xf) | |
368 | c->x86 += (tfms >> 20) & 0xff; | |
369 | if (c->x86 >= 0x6) | |
370 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | |
371 | c->x86_mask = tfms & 15; | |
372 | c->initial_apicid = (ebx >> 24) & 0xFF; | |
373 | #ifdef CONFIG_X86_HT | |
374 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | |
375 | c->phys_proc_id = c->initial_apicid; | |
376 | #else | |
377 | c->apicid = c->initial_apicid; | |
378 | #endif | |
379 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | |
380 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; | |
381 | } else { | |
382 | /* Have CPUID level 0 only - unheard of */ | |
383 | c->x86 = 4; | |
384 | } | |
385 | ||
386 | /* AMD-defined flags: level 0x80000001 */ | |
387 | xlvl = cpuid_eax(0x80000000); | |
388 | if ((xlvl & 0xffff0000) == 0x80000000) { | |
389 | if (xlvl >= 0x80000001) { | |
390 | c->x86_capability[1] = cpuid_edx(0x80000001); | |
391 | c->x86_capability[6] = cpuid_ecx(0x80000001); | |
392 | } | |
393 | if (xlvl >= 0x80000004) | |
394 | get_model_name(c); /* Default name */ | |
395 | } | |
396 | ||
397 | init_scattered_cpuid_features(c); | |
398 | } | |
399 | ||
400 | } | |
401 | ||
402 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |
403 | { | |
404 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | |
405 | /* Disable processor serial number */ | |
406 | unsigned long lo, hi; | |
407 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | |
408 | lo |= 0x200000; | |
409 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | |
410 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | |
411 | clear_cpu_cap(c, X86_FEATURE_PN); | |
412 | ||
413 | /* Disabling the serial number may affect the cpuid level */ | |
414 | c->cpuid_level = cpuid_eax(0); | |
415 | } | |
416 | } | |
417 | ||
418 | static int __init x86_serial_nr_setup(char *s) | |
419 | { | |
420 | disable_x86_serial_nr = 0; | |
421 | return 1; | |
422 | } | |
423 | __setup("serialnumber", x86_serial_nr_setup); | |
424 | ||
425 | ||
426 | ||
427 | /* | |
428 | * This does the hard work of actually picking apart the CPU stuff... | |
429 | */ | |
430 | static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |
431 | { | |
432 | int i; | |
433 | ||
434 | c->loops_per_jiffy = loops_per_jiffy; | |
435 | c->x86_cache_size = -1; | |
436 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
437 | c->cpuid_level = -1; /* CPUID not detected */ | |
438 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | |
439 | c->x86_vendor_id[0] = '\0'; /* Unset */ | |
440 | c->x86_model_id[0] = '\0'; /* Unset */ | |
441 | c->x86_max_cores = 1; | |
442 | c->x86_clflush_size = 32; | |
443 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | |
444 | ||
445 | if (!have_cpuid_p()) { | |
446 | /* | |
447 | * First of all, decide if this is a 486 or higher | |
448 | * It's a 486 if we can modify the AC flag | |
449 | */ | |
450 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | |
451 | c->x86 = 4; | |
452 | else | |
453 | c->x86 = 3; | |
454 | } | |
455 | ||
456 | generic_identify(c); | |
457 | ||
458 | if (this_cpu->c_identify) | |
459 | this_cpu->c_identify(c); | |
460 | ||
461 | /* | |
462 | * Vendor-specific initialization. In this section we | |
463 | * canonicalize the feature flags, meaning if there are | |
464 | * features a certain CPU supports which CPUID doesn't | |
465 | * tell us, CPUID claiming incorrect flags, or other bugs, | |
466 | * we handle them here. | |
467 | * | |
468 | * At the end of this section, c->x86_capability better | |
469 | * indicate the features this CPU genuinely supports! | |
470 | */ | |
471 | if (this_cpu->c_init) | |
472 | this_cpu->c_init(c); | |
473 | ||
474 | /* Disable the PN if appropriate */ | |
475 | squash_the_stupid_serial_number(c); | |
476 | ||
477 | /* | |
478 | * The vendor-specific functions might have changed features. Now | |
479 | * we do "generic changes." | |
480 | */ | |
481 | ||
482 | /* If the model name is still unset, do table lookup. */ | |
483 | if (!c->x86_model_id[0]) { | |
484 | char *p; | |
485 | p = table_lookup_model(c); | |
486 | if (p) | |
487 | strcpy(c->x86_model_id, p); | |
488 | else | |
489 | /* Last resort... */ | |
490 | sprintf(c->x86_model_id, "%02x/%02x", | |
491 | c->x86, c->x86_model); | |
492 | } | |
493 | ||
494 | /* | |
495 | * On SMP, boot_cpu_data holds the common feature set between | |
496 | * all CPUs; so make sure that we indicate which features are | |
497 | * common between the CPUs. The first time this routine gets | |
498 | * executed, c == &boot_cpu_data. | |
499 | */ | |
500 | if (c != &boot_cpu_data) { | |
501 | /* AND the already accumulated flags with these */ | |
502 | for (i = 0 ; i < NCAPINTS ; i++) | |
503 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | |
504 | } | |
505 | ||
506 | /* Clear all flags overriden by options */ | |
507 | for (i = 0; i < NCAPINTS; i++) | |
508 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; | |
509 | ||
510 | /* Init Machine Check Exception if available. */ | |
511 | mcheck_init(c); | |
512 | ||
513 | select_idle_routine(c); | |
514 | } | |
515 | ||
516 | void __init identify_boot_cpu(void) | |
517 | { | |
518 | identify_cpu(&boot_cpu_data); | |
519 | sysenter_setup(); | |
520 | enable_sep_cpu(); | |
521 | } | |
522 | ||
523 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |
524 | { | |
525 | BUG_ON(c == &boot_cpu_data); | |
526 | identify_cpu(c); | |
527 | enable_sep_cpu(); | |
528 | mtrr_ap_init(); | |
529 | } | |
530 | ||
531 | #ifdef CONFIG_X86_HT | |
532 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |
533 | { | |
534 | u32 eax, ebx, ecx, edx; | |
535 | int index_msb, core_bits; | |
536 | ||
537 | cpuid(1, &eax, &ebx, &ecx, &edx); | |
538 | ||
539 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | |
540 | return; | |
541 | ||
542 | smp_num_siblings = (ebx & 0xff0000) >> 16; | |
543 | ||
544 | if (smp_num_siblings == 1) { | |
545 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | |
546 | } else if (smp_num_siblings > 1) { | |
547 | ||
548 | if (smp_num_siblings > NR_CPUS) { | |
549 | printk(KERN_WARNING "CPU: Unsupported number of the " | |
550 | "siblings %d", smp_num_siblings); | |
551 | smp_num_siblings = 1; | |
552 | return; | |
553 | } | |
554 | ||
555 | index_msb = get_count_order(smp_num_siblings); | |
556 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | |
557 | ||
558 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | |
559 | c->phys_proc_id); | |
560 | ||
561 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | |
562 | ||
563 | index_msb = get_count_order(smp_num_siblings) ; | |
564 | ||
565 | core_bits = get_count_order(c->x86_max_cores); | |
566 | ||
567 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | |
568 | ((1 << core_bits) - 1); | |
569 | ||
570 | if (c->x86_max_cores > 1) | |
571 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | |
572 | c->cpu_core_id); | |
573 | } | |
574 | } | |
575 | #endif | |
576 | ||
577 | static __init int setup_noclflush(char *arg) | |
578 | { | |
579 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | |
580 | return 1; | |
581 | } | |
582 | __setup("noclflush", setup_noclflush); | |
583 | ||
584 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |
585 | { | |
586 | char *vendor = NULL; | |
587 | ||
588 | if (c->x86_vendor < X86_VENDOR_NUM) | |
589 | vendor = this_cpu->c_vendor; | |
590 | else if (c->cpuid_level >= 0) | |
591 | vendor = c->x86_vendor_id; | |
592 | ||
593 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | |
594 | printk("%s ", vendor); | |
595 | ||
596 | if (!c->x86_model_id[0]) | |
597 | printk("%d86", c->x86); | |
598 | else | |
599 | printk("%s", c->x86_model_id); | |
600 | ||
601 | if (c->x86_mask || c->cpuid_level >= 0) | |
602 | printk(" stepping %02x\n", c->x86_mask); | |
603 | else | |
604 | printk("\n"); | |
605 | } | |
606 | ||
607 | static __init int setup_disablecpuid(char *arg) | |
608 | { | |
609 | int bit; | |
610 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | |
611 | setup_clear_cpu_cap(bit); | |
612 | else | |
613 | return 0; | |
614 | return 1; | |
615 | } | |
616 | __setup("clearcpuid=", setup_disablecpuid); | |
617 | ||
618 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | |
619 | ||
620 | void __init early_cpu_init(void) | |
621 | { | |
622 | struct cpu_vendor_dev *cvdev; | |
623 | ||
624 | for (cvdev = __x86cpuvendor_start ; | |
625 | cvdev < __x86cpuvendor_end ; | |
626 | cvdev++) | |
627 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | |
628 | ||
629 | early_cpu_detect(); | |
630 | validate_pat_support(&boot_cpu_data); | |
631 | } | |
632 | ||
633 | /* Make sure %fs is initialized properly in idle threads */ | |
634 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |
635 | { | |
636 | memset(regs, 0, sizeof(struct pt_regs)); | |
637 | regs->fs = __KERNEL_PERCPU; | |
638 | return regs; | |
639 | } | |
640 | ||
641 | /* Current gdt points %fs at the "master" per-cpu area: after this, | |
642 | * it's on the real one. */ | |
643 | void switch_to_new_gdt(void) | |
644 | { | |
645 | struct desc_ptr gdt_descr; | |
646 | ||
647 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | |
648 | gdt_descr.size = GDT_SIZE - 1; | |
649 | load_gdt(&gdt_descr); | |
650 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | |
651 | } | |
652 | ||
653 | /* | |
654 | * cpu_init() initializes state that is per-CPU. Some data is already | |
655 | * initialized (naturally) in the bootstrap process, such as the GDT | |
656 | * and IDT. We reload them nevertheless, this function acts as a | |
657 | * 'CPU state barrier', nothing should get across. | |
658 | */ | |
659 | void __cpuinit cpu_init(void) | |
660 | { | |
661 | int cpu = smp_processor_id(); | |
662 | struct task_struct *curr = current; | |
663 | struct tss_struct *t = &per_cpu(init_tss, cpu); | |
664 | struct thread_struct *thread = &curr->thread; | |
665 | ||
666 | if (cpu_test_and_set(cpu, cpu_initialized)) { | |
667 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | |
668 | for (;;) local_irq_enable(); | |
669 | } | |
670 | ||
671 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | |
672 | ||
673 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | |
674 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | |
675 | ||
676 | load_idt(&idt_descr); | |
677 | switch_to_new_gdt(); | |
678 | ||
679 | /* | |
680 | * Set up and load the per-CPU TSS and LDT | |
681 | */ | |
682 | atomic_inc(&init_mm.mm_count); | |
683 | curr->active_mm = &init_mm; | |
684 | if (curr->mm) | |
685 | BUG(); | |
686 | enter_lazy_tlb(&init_mm, curr); | |
687 | ||
688 | load_sp0(t, thread); | |
689 | set_tss_desc(cpu, t); | |
690 | load_TR_desc(); | |
691 | load_LDT(&init_mm.context); | |
692 | ||
693 | #ifdef CONFIG_DOUBLEFAULT | |
694 | /* Set up doublefault TSS pointer in the GDT */ | |
695 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | |
696 | #endif | |
697 | ||
698 | /* Clear %gs. */ | |
699 | asm volatile ("mov %0, %%gs" : : "r" (0)); | |
700 | ||
701 | /* Clear all 6 debug registers: */ | |
702 | set_debugreg(0, 0); | |
703 | set_debugreg(0, 1); | |
704 | set_debugreg(0, 2); | |
705 | set_debugreg(0, 3); | |
706 | set_debugreg(0, 6); | |
707 | set_debugreg(0, 7); | |
708 | ||
709 | /* | |
710 | * Force FPU initialization: | |
711 | */ | |
712 | current_thread_info()->status = 0; | |
713 | clear_used_math(); | |
714 | mxcsr_feature_mask_init(); | |
715 | } | |
716 | ||
717 | #ifdef CONFIG_HOTPLUG_CPU | |
718 | void __cpuinit cpu_uninit(void) | |
719 | { | |
720 | int cpu = raw_smp_processor_id(); | |
721 | cpu_clear(cpu, cpu_initialized); | |
722 | ||
723 | /* lazy TLB state */ | |
724 | per_cpu(cpu_tlbstate, cpu).state = 0; | |
725 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | |
726 | } | |
727 | #endif |