]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/cpu/common.c
x86-64: add two __cpuinit annotations
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / common.c
CommitLineData
1da177e4 1#include <linux/init.h>
f0fc4aff
YL
2#include <linux/kernel.h>
3#include <linux/sched.h>
1da177e4 4#include <linux/string.h>
f0fc4aff
YL
5#include <linux/bootmem.h>
6#include <linux/bitops.h>
7#include <linux/module.h>
8#include <linux/kgdb.h>
9#include <linux/topology.h>
1da177e4
LT
10#include <linux/delay.h>
11#include <linux/smp.h>
1da177e4 12#include <linux/percpu.h>
1da177e4
LT
13#include <asm/i387.h>
14#include <asm/msr.h>
15#include <asm/io.h>
f0fc4aff 16#include <asm/linkage.h>
1da177e4 17#include <asm/mmu_context.h>
27b07da7 18#include <asm/mtrr.h>
a03a3e28 19#include <asm/mce.h>
8d4a4300 20#include <asm/pat.h>
7e00df58 21#include <asm/asm.h>
f0fc4aff 22#include <asm/numa.h>
1da177e4
LT
23#ifdef CONFIG_X86_LOCAL_APIC
24#include <asm/mpspec.h>
25#include <asm/apic.h>
26#include <mach_apic.h>
f0fc4aff 27#include <asm/genapic.h>
1da177e4
LT
28#endif
29
f0fc4aff
YL
30#include <asm/pda.h>
31#include <asm/pgtable.h>
32#include <asm/processor.h>
33#include <asm/desc.h>
34#include <asm/atomic.h>
35#include <asm/proto.h>
36#include <asm/sections.h>
37#include <asm/setup.h>
38
1da177e4
LT
39#include "cpu.h"
40
0a488a53
YL
41static struct cpu_dev *this_cpu __cpuinitdata;
42
950ad7ff
YL
43#ifdef CONFIG_X86_64
44/* We need valid kernel segments for data and code in long mode too
45 * IRET will check the segment types kkeil 2000/10/28
46 * Also sysret mandates a special GDT layout
47 */
48/* The TLS descriptors are currently at a different place compared to i386.
49 Hopefully nobody expects them at a fixed place (Wine?) */
50DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
51 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
52 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
53 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
54 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
55 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
56 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
57} };
58#else
63cc8c75 59DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
6842ef0e
GOC
60 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
61 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
62 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
63 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
bf504672
RR
64 /*
65 * Segments used for calling PnP BIOS have byte granularity.
66 * They code segments and data segments have fixed 64k limits,
67 * the transfer segment sizes are set at run time.
68 */
6842ef0e
GOC
69 /* 32-bit code */
70 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
71 /* 16-bit code */
72 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
73 /* 16-bit data */
74 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
75 /* 16-bit data */
76 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
77 /* 16-bit data */
78 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
bf504672
RR
79 /*
80 * The APM segments have byte granularity and their bases
81 * are set at run time. All have 64k limits.
82 */
6842ef0e
GOC
83 /* 32-bit code */
84 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
bf504672 85 /* 16-bit code */
6842ef0e
GOC
86 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
87 /* data */
88 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
bf504672 89
6842ef0e
GOC
90 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
91 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
7a61d35d 92} };
950ad7ff 93#endif
7a61d35d 94EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
ae1ee11b 95
ba51dced 96#ifdef CONFIG_X86_32
3bc9b76b 97static int cachesize_override __cpuinitdata = -1;
3bc9b76b 98static int disable_x86_serial_nr __cpuinitdata = 1;
1da177e4 99
0a488a53
YL
100static int __init cachesize_setup(char *str)
101{
102 get_option(&str, &cachesize_override);
103 return 1;
104}
105__setup("cachesize=", cachesize_setup);
106
0a488a53
YL
107static int __init x86_fxsr_setup(char *s)
108{
109 setup_clear_cpu_cap(X86_FEATURE_FXSR);
110 setup_clear_cpu_cap(X86_FEATURE_XMM);
111 return 1;
112}
113__setup("nofxsr", x86_fxsr_setup);
114
115static int __init x86_sep_setup(char *s)
116{
117 setup_clear_cpu_cap(X86_FEATURE_SEP);
118 return 1;
119}
120__setup("nosep", x86_sep_setup);
121
122/* Standard macro to see if a specific flag is changeable */
123static inline int flag_is_changeable_p(u32 flag)
124{
125 u32 f1, f2;
126
127 asm("pushfl\n\t"
128 "pushfl\n\t"
129 "popl %0\n\t"
130 "movl %0,%1\n\t"
131 "xorl %2,%0\n\t"
132 "pushl %0\n\t"
133 "popfl\n\t"
134 "pushfl\n\t"
135 "popl %0\n\t"
136 "popfl\n\t"
137 : "=&r" (f1), "=&r" (f2)
138 : "ir" (flag));
139
140 return ((f1^f2) & flag) != 0;
141}
142
143/* Probe for the CPUID instruction */
144static int __cpuinit have_cpuid_p(void)
145{
146 return flag_is_changeable_p(X86_EFLAGS_ID);
147}
148
149static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
150{
151 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
152 /* Disable processor serial number */
153 unsigned long lo, hi;
154 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
155 lo |= 0x200000;
156 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
157 printk(KERN_NOTICE "CPU serial number disabled.\n");
158 clear_cpu_cap(c, X86_FEATURE_PN);
159
160 /* Disabling the serial number may affect the cpuid level */
161 c->cpuid_level = cpuid_eax(0);
162 }
163}
164
165static int __init x86_serial_nr_setup(char *s)
166{
167 disable_x86_serial_nr = 0;
168 return 1;
169}
170__setup("serialnumber", x86_serial_nr_setup);
ba51dced 171#else
102bbe3a
YL
172static inline int flag_is_changeable_p(u32 flag)
173{
174 return 1;
175}
ba51dced
YL
176/* Probe for the CPUID instruction */
177static inline int have_cpuid_p(void)
178{
179 return 1;
180}
102bbe3a
YL
181static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
182{
183}
ba51dced 184#endif
0a488a53 185
102bbe3a
YL
186/*
187 * Naming convention should be: <Name> [(<Codename>)]
188 * This table only is used unless init_<vendor>() below doesn't set it;
189 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
190 *
191 */
192
193/* Look up CPU names by table lookup. */
194static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
195{
196 struct cpu_model_info *info;
197
198 if (c->x86_model >= 16)
199 return NULL; /* Range check */
200
201 if (!this_cpu)
202 return NULL;
203
204 info = this_cpu->c_models;
205
206 while (info && info->family) {
207 if (info->family == c->x86)
208 return info->model_names[c->x86_model];
209 info++;
210 }
211 return NULL; /* Not found */
212}
213
7d851c8d
AK
214__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
215
9d31d35b
YL
216/* Current gdt points %fs at the "master" per-cpu area: after this,
217 * it's on the real one. */
218void switch_to_new_gdt(void)
219{
220 struct desc_ptr gdt_descr;
221
222 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
223 gdt_descr.size = GDT_SIZE - 1;
224 load_gdt(&gdt_descr);
fab334c1 225#ifdef CONFIG_X86_32
9d31d35b 226 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
fab334c1 227#endif
9d31d35b
YL
228}
229
10a434fc 230static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
1da177e4 231
34048c9e 232static void __cpuinit default_init(struct cpuinfo_x86 *c)
1da177e4 233{
b9e67f00
YL
234#ifdef CONFIG_X86_64
235 display_cacheinfo(c);
236#else
1da177e4
LT
237 /* Not much we can do here... */
238 /* Check if at least it has cpuid */
239 if (c->cpuid_level == -1) {
240 /* No cpuid. It must be an ancient CPU */
241 if (c->x86 == 4)
242 strcpy(c->x86_model_id, "486");
243 else if (c->x86 == 3)
244 strcpy(c->x86_model_id, "386");
245 }
b9e67f00 246#endif
1da177e4
LT
247}
248
95414930 249static struct cpu_dev __cpuinitdata default_cpu = {
1da177e4 250 .c_init = default_init,
fe38d855 251 .c_vendor = "Unknown",
10a434fc 252 .c_x86_vendor = X86_VENDOR_UNKNOWN,
1da177e4 253};
1da177e4 254
1b05d60d 255static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
256{
257 unsigned int *v;
258 char *p, *q;
259
3da99c97 260 if (c->extended_cpuid_level < 0x80000004)
1b05d60d 261 return;
1da177e4
LT
262
263 v = (unsigned int *) c->x86_model_id;
264 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
265 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
266 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
267 c->x86_model_id[48] = 0;
268
269 /* Intel chips right-justify this string for some dumb reason;
270 undo that brain damage */
271 p = q = &c->x86_model_id[0];
34048c9e 272 while (*p == ' ')
1da177e4 273 p++;
34048c9e
PC
274 if (p != q) {
275 while (*p)
1da177e4 276 *q++ = *p++;
34048c9e 277 while (q <= &c->x86_model_id[48])
1da177e4
LT
278 *q++ = '\0'; /* Zero-pad the rest */
279 }
1da177e4
LT
280}
281
3bc9b76b 282void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4 283{
9d31d35b 284 unsigned int n, dummy, ebx, ecx, edx, l2size;
1da177e4 285
3da99c97 286 n = c->extended_cpuid_level;
1da177e4
LT
287
288 if (n >= 0x80000005) {
9d31d35b 289 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
1da177e4 290 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
9d31d35b
YL
291 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
292 c->x86_cache_size = (ecx>>24) + (edx>>24);
140fc727
YL
293#ifdef CONFIG_X86_64
294 /* On K8 L1 TLB is inclusive, so don't count it */
295 c->x86_tlbsize = 0;
296#endif
1da177e4
LT
297 }
298
299 if (n < 0x80000006) /* Some chips just has a large L1. */
300 return;
301
0a488a53 302 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1da177e4 303 l2size = ecx >> 16;
34048c9e 304
140fc727
YL
305#ifdef CONFIG_X86_64
306 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
307#else
1da177e4
LT
308 /* do processor-specific cache resizing */
309 if (this_cpu->c_size_cache)
34048c9e 310 l2size = this_cpu->c_size_cache(c, l2size);
1da177e4
LT
311
312 /* Allow user to override all this if necessary. */
313 if (cachesize_override != -1)
314 l2size = cachesize_override;
315
34048c9e 316 if (l2size == 0)
1da177e4 317 return; /* Again, no L2 cache is possible */
140fc727 318#endif
1da177e4
LT
319
320 c->x86_cache_size = l2size;
321
322 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
0a488a53 323 l2size, ecx & 0xFF);
1da177e4
LT
324}
325
9d31d35b 326void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4 327{
97e4db7c 328#ifdef CONFIG_X86_HT
0a488a53
YL
329 u32 eax, ebx, ecx, edx;
330 int index_msb, core_bits;
1da177e4 331
0a488a53 332 if (!cpu_has(c, X86_FEATURE_HT))
9d31d35b 333 return;
1da177e4 334
0a488a53
YL
335 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
336 goto out;
1da177e4 337
1cd78776
YL
338 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
339 return;
340
0a488a53 341 cpuid(1, &eax, &ebx, &ecx, &edx);
1da177e4 342
9d31d35b
YL
343 smp_num_siblings = (ebx & 0xff0000) >> 16;
344
345 if (smp_num_siblings == 1) {
346 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
347 } else if (smp_num_siblings > 1) {
348
349 if (smp_num_siblings > NR_CPUS) {
350 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
351 smp_num_siblings);
352 smp_num_siblings = 1;
353 return;
354 }
355
356 index_msb = get_count_order(smp_num_siblings);
1cd78776
YL
357#ifdef CONFIG_X86_64
358 c->phys_proc_id = phys_pkg_id(index_msb);
359#else
9d31d35b 360 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
1cd78776 361#endif
9d31d35b
YL
362
363 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
364
365 index_msb = get_count_order(smp_num_siblings);
366
367 core_bits = get_count_order(c->x86_max_cores);
368
1cd78776
YL
369#ifdef CONFIG_X86_64
370 c->cpu_core_id = phys_pkg_id(index_msb) &
371 ((1 << core_bits) - 1);
372#else
9d31d35b
YL
373 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
374 ((1 << core_bits) - 1);
1cd78776 375#endif
1da177e4 376 }
1da177e4 377
0a488a53
YL
378out:
379 if ((c->x86_max_cores * smp_num_siblings) > 1) {
380 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
381 c->phys_proc_id);
382 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
383 c->cpu_core_id);
9d31d35b 384 }
9d31d35b 385#endif
97e4db7c 386}
1da177e4 387
3da99c97 388static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
389{
390 char *v = c->x86_vendor_id;
391 int i;
fe38d855 392 static int printed;
1da177e4
LT
393
394 for (i = 0; i < X86_VENDOR_NUM; i++) {
10a434fc
YL
395 if (!cpu_devs[i])
396 break;
397
398 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
399 (cpu_devs[i]->c_ident[1] &&
400 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
401 this_cpu = cpu_devs[i];
402 c->x86_vendor = this_cpu->c_x86_vendor;
403 return;
1da177e4
LT
404 }
405 }
10a434fc 406
fe38d855
CE
407 if (!printed) {
408 printed++;
409 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
410 printk(KERN_ERR "CPU: Your system may be unstable.\n");
411 }
10a434fc 412
fe38d855
CE
413 c->x86_vendor = X86_VENDOR_UNKNOWN;
414 this_cpu = &default_cpu;
1da177e4
LT
415}
416
9d31d35b 417void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
1da177e4 418{
1da177e4 419 /* Get vendor name */
4a148513
HH
420 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
421 (unsigned int *)&c->x86_vendor_id[0],
422 (unsigned int *)&c->x86_vendor_id[8],
423 (unsigned int *)&c->x86_vendor_id[4]);
1da177e4 424
1da177e4 425 c->x86 = 4;
9d31d35b 426 /* Intel-defined flags: level 0x00000001 */
1da177e4
LT
427 if (c->cpuid_level >= 0x00000001) {
428 u32 junk, tfms, cap0, misc;
429 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
9d31d35b
YL
430 c->x86 = (tfms >> 8) & 0xf;
431 c->x86_model = (tfms >> 4) & 0xf;
432 c->x86_mask = tfms & 0xf;
f5f786d0 433 if (c->x86 == 0xf)
1da177e4 434 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 435 if (c->x86 >= 0x6)
9d31d35b 436 c->x86_model += ((tfms >> 16) & 0xf) << 4;
d4387bd3 437 if (cap0 & (1<<19)) {
d4387bd3 438 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
9d31d35b 439 c->x86_cache_alignment = c->x86_clflush_size;
d4387bd3 440 }
1da177e4 441 }
1da177e4 442}
3da99c97
YL
443
444static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
093af8d7
YL
445{
446 u32 tfms, xlvl;
3da99c97 447 u32 ebx;
093af8d7 448
3da99c97
YL
449 /* Intel-defined flags: level 0x00000001 */
450 if (c->cpuid_level >= 0x00000001) {
451 u32 capability, excap;
452 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
453 c->x86_capability[0] = capability;
454 c->x86_capability[4] = excap;
455 }
093af8d7 456
3da99c97
YL
457 /* AMD-defined flags: level 0x80000001 */
458 xlvl = cpuid_eax(0x80000000);
459 c->extended_cpuid_level = xlvl;
460 if ((xlvl & 0xffff0000) == 0x80000000) {
461 if (xlvl >= 0x80000001) {
462 c->x86_capability[1] = cpuid_edx(0x80000001);
463 c->x86_capability[6] = cpuid_ecx(0x80000001);
093af8d7 464 }
093af8d7 465 }
5122c890
YL
466
467#ifdef CONFIG_X86_64
468 /* Transmeta-defined flags: level 0x80860001 */
469 xlvl = cpuid_eax(0x80860000);
470 if ((xlvl & 0xffff0000) == 0x80860000) {
471 /* Don't set x86_cpuid_level here for now to not confuse. */
472 if (xlvl >= 0x80860001)
473 c->x86_capability[2] = cpuid_edx(0x80860001);
474 }
475
5122c890
YL
476 if (c->extended_cpuid_level >= 0x80000008) {
477 u32 eax = cpuid_eax(0x80000008);
478
479 c->x86_virt_bits = (eax >> 8) & 0xff;
480 c->x86_phys_bits = eax & 0xff;
481 }
482#endif
e3224234
YL
483
484 if (c->extended_cpuid_level >= 0x80000007)
485 c->x86_power = cpuid_edx(0x80000007);
486
093af8d7 487}
34048c9e
PC
488/*
489 * Do minimum CPU detection early.
490 * Fields really needed: vendor, cpuid_level, family, model, mask,
491 * cache alignment.
492 * The others are not touched to avoid unwanted side effects.
493 *
494 * WARNING: this function is only called on the BP. Don't add code here
495 * that is supposed to run on all CPUs.
496 */
3da99c97 497static void __init early_identify_cpu(struct cpuinfo_x86 *c)
d7cd5611 498{
6627d242
YL
499#ifdef CONFIG_X86_64
500 c->x86_clflush_size = 64;
501#else
d4387bd3 502 c->x86_clflush_size = 32;
6627d242 503#endif
0a488a53 504 c->x86_cache_alignment = c->x86_clflush_size;
d7cd5611
RR
505
506 if (!have_cpuid_p())
507 return;
508
3da99c97
YL
509 memset(&c->x86_capability, 0, sizeof c->x86_capability);
510
0a488a53
YL
511 c->extended_cpuid_level = 0;
512
d7cd5611
RR
513 cpu_detect(c);
514
3da99c97 515 get_cpu_vendor(c);
2b16a235 516
3da99c97 517 get_cpu_cap(c);
2b16a235 518
10a434fc
YL
519 if (this_cpu->c_early_init)
520 this_cpu->c_early_init(c);
093af8d7 521
3da99c97 522 validate_pat_support(c);
d7cd5611
RR
523}
524
9d31d35b
YL
525void __init early_cpu_init(void)
526{
10a434fc
YL
527 struct cpu_dev **cdev;
528 int count = 0;
529
530 printk("KERNEL supported cpus:\n");
531 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
532 struct cpu_dev *cpudev = *cdev;
533 unsigned int j;
9d31d35b 534
10a434fc
YL
535 if (count >= X86_VENDOR_NUM)
536 break;
537 cpu_devs[count] = cpudev;
538 count++;
539
540 for (j = 0; j < 2; j++) {
541 if (!cpudev->c_ident[j])
542 continue;
543 printk(" %s %s\n", cpudev->c_vendor,
544 cpudev->c_ident[j]);
545 }
546 }
9d31d35b 547
9d31d35b 548 early_identify_cpu(&boot_cpu_data);
d7cd5611
RR
549}
550
7e00df58
PA
551/*
552 * The NOPL instruction is supposed to exist on all CPUs with
553 * family >= 6, unfortunately, that's not true in practice because
554 * of early VIA chips and (more importantly) broken virtualizers that
555 * are not easy to detect. Hence, probe for it based on first
556 * principles.
b89d3b3e
YL
557 *
558 * Note: no 64-bit chip is known to lack these, but put the code here
559 * for consistency with 32 bits, and to make it utterly trivial to
560 * diagnose the problem should it ever surface.
7e00df58
PA
561 */
562static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
563{
564 const u32 nopl_signature = 0x888c53b1; /* Random number */
565 u32 has_nopl = nopl_signature;
566
567 clear_cpu_cap(c, X86_FEATURE_NOPL);
568 if (c->x86 >= 6) {
569 asm volatile("\n"
570 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
571 "2:\n"
572 " .section .fixup,\"ax\"\n"
573 "3: xor %0,%0\n"
574 " jmp 2b\n"
575 " .previous\n"
576 _ASM_EXTABLE(1b,3b)
577 : "+a" (has_nopl));
578
579 if (has_nopl == nopl_signature)
580 set_cpu_cap(c, X86_FEATURE_NOPL);
581 }
582}
583
34048c9e 584static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
1da177e4 585{
3da99c97
YL
586 if (!have_cpuid_p())
587 return;
1da177e4 588
3da99c97 589 c->extended_cpuid_level = 0;
1d67953f 590
3da99c97 591 cpu_detect(c);
1da177e4 592
3da99c97 593 get_cpu_vendor(c);
1da177e4 594
3da99c97 595 get_cpu_cap(c);
1da177e4 596
3da99c97
YL
597 if (c->cpuid_level >= 0x00000001) {
598 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
b89d3b3e
YL
599#ifdef CONFIG_X86_32
600# ifdef CONFIG_X86_HT
3da99c97 601 c->apicid = phys_pkg_id(c->initial_apicid, 0);
b89d3b3e 602# else
3da99c97 603 c->apicid = c->initial_apicid;
b89d3b3e
YL
604# endif
605#endif
606
607#ifdef CONFIG_X86_HT
608 c->phys_proc_id = c->initial_apicid;
1e9f28fa 609#endif
3da99c97 610 }
1da177e4 611
1b05d60d 612 get_model_name(c); /* Default name */
1da177e4 613
3da99c97
YL
614 init_scattered_cpuid_features(c);
615 detect_nopl(c);
1da177e4 616}
1da177e4
LT
617
618/*
619 * This does the hard work of actually picking apart the CPU stuff...
620 */
9a250347 621static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
622{
623 int i;
624
625 c->loops_per_jiffy = loops_per_jiffy;
626 c->x86_cache_size = -1;
627 c->x86_vendor = X86_VENDOR_UNKNOWN;
1da177e4
LT
628 c->x86_model = c->x86_mask = 0; /* So far unknown... */
629 c->x86_vendor_id[0] = '\0'; /* Unset */
630 c->x86_model_id[0] = '\0'; /* Unset */
94605eff 631 c->x86_max_cores = 1;
102bbe3a
YL
632#ifdef CONFIG_X86_64
633 c->x86_coreid_bits = 0;
634 c->x86_clflush_size = 64;
635#else
636 c->cpuid_level = -1; /* CPUID not detected */
770d132f 637 c->x86_clflush_size = 32;
102bbe3a
YL
638#endif
639 c->x86_cache_alignment = c->x86_clflush_size;
1da177e4
LT
640 memset(&c->x86_capability, 0, sizeof c->x86_capability);
641
642 if (!have_cpuid_p()) {
34048c9e
PC
643 /*
644 * First of all, decide if this is a 486 or higher
645 * It's a 486 if we can modify the AC flag
646 */
647 if (flag_is_changeable_p(X86_EFLAGS_AC))
1da177e4
LT
648 c->x86 = 4;
649 else
650 c->x86 = 3;
651 }
652
653 generic_identify(c);
654
3898534d 655 if (this_cpu->c_identify)
1da177e4
LT
656 this_cpu->c_identify(c);
657
102bbe3a
YL
658#ifdef CONFIG_X86_64
659 c->apicid = phys_pkg_id(0);
660#endif
661
1da177e4
LT
662 /*
663 * Vendor-specific initialization. In this section we
664 * canonicalize the feature flags, meaning if there are
665 * features a certain CPU supports which CPUID doesn't
666 * tell us, CPUID claiming incorrect flags, or other bugs,
667 * we handle them here.
668 *
669 * At the end of this section, c->x86_capability better
670 * indicate the features this CPU genuinely supports!
671 */
672 if (this_cpu->c_init)
673 this_cpu->c_init(c);
674
675 /* Disable the PN if appropriate */
676 squash_the_stupid_serial_number(c);
677
678 /*
679 * The vendor-specific functions might have changed features. Now
680 * we do "generic changes."
681 */
682
1da177e4 683 /* If the model name is still unset, do table lookup. */
34048c9e 684 if (!c->x86_model_id[0]) {
1da177e4
LT
685 char *p;
686 p = table_lookup_model(c);
34048c9e 687 if (p)
1da177e4
LT
688 strcpy(c->x86_model_id, p);
689 else
690 /* Last resort... */
691 sprintf(c->x86_model_id, "%02x/%02x",
54a20f8c 692 c->x86, c->x86_model);
1da177e4
LT
693 }
694
102bbe3a
YL
695#ifdef CONFIG_X86_64
696 detect_ht(c);
697#endif
698
1da177e4
LT
699 /*
700 * On SMP, boot_cpu_data holds the common feature set between
701 * all CPUs; so make sure that we indicate which features are
702 * common between the CPUs. The first time this routine gets
703 * executed, c == &boot_cpu_data.
704 */
34048c9e 705 if (c != &boot_cpu_data) {
1da177e4 706 /* AND the already accumulated flags with these */
9d31d35b 707 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
708 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
709 }
710
7d851c8d
AK
711 /* Clear all flags overriden by options */
712 for (i = 0; i < NCAPINTS; i++)
12c247a6 713 c->x86_capability[i] &= ~cleared_cpu_caps[i];
7d851c8d 714
102bbe3a 715#ifdef CONFIG_X86_MCE
1da177e4 716 /* Init Machine Check Exception if available. */
1da177e4 717 mcheck_init(c);
102bbe3a 718#endif
30d432df
AK
719
720 select_idle_routine(c);
102bbe3a
YL
721
722#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
723 numa_add_cpu(smp_processor_id());
724#endif
a6c4e076 725}
31ab269a 726
a6c4e076
JF
727void __init identify_boot_cpu(void)
728{
729 identify_cpu(&boot_cpu_data);
102bbe3a 730#ifdef CONFIG_X86_32
a6c4e076 731 sysenter_setup();
6fe940d6 732 enable_sep_cpu();
102bbe3a 733#endif
a6c4e076 734}
3b520b23 735
a6c4e076
JF
736void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
737{
738 BUG_ON(c == &boot_cpu_data);
739 identify_cpu(c);
102bbe3a 740#ifdef CONFIG_X86_32
a6c4e076 741 enable_sep_cpu();
102bbe3a 742#endif
a6c4e076 743 mtrr_ap_init();
1da177e4
LT
744}
745
a0854a46
YL
746struct msr_range {
747 unsigned min;
748 unsigned max;
749};
1da177e4 750
a0854a46
YL
751static struct msr_range msr_range_array[] __cpuinitdata = {
752 { 0x00000000, 0x00000418},
753 { 0xc0000000, 0xc000040b},
754 { 0xc0010000, 0xc0010142},
755 { 0xc0011000, 0xc001103b},
756};
1da177e4 757
a0854a46
YL
758static void __cpuinit print_cpu_msr(void)
759{
760 unsigned index;
761 u64 val;
762 int i;
763 unsigned index_min, index_max;
764
765 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
766 index_min = msr_range_array[i].min;
767 index_max = msr_range_array[i].max;
768 for (index = index_min; index < index_max; index++) {
769 if (rdmsrl_amd_safe(index, &val))
770 continue;
771 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
1da177e4 772 }
a0854a46
YL
773 }
774}
94605eff 775
a0854a46
YL
776static int show_msr __cpuinitdata;
777static __init int setup_show_msr(char *arg)
778{
779 int num;
3dd9d514 780
a0854a46 781 get_option(&arg, &num);
3dd9d514 782
a0854a46
YL
783 if (num > 0)
784 show_msr = num;
785 return 1;
1da177e4 786}
a0854a46 787__setup("show_msr=", setup_show_msr);
1da177e4 788
191679fd
AK
789static __init int setup_noclflush(char *arg)
790{
791 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
792 return 1;
793}
794__setup("noclflush", setup_noclflush);
795
3bc9b76b 796void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
797{
798 char *vendor = NULL;
799
800 if (c->x86_vendor < X86_VENDOR_NUM)
801 vendor = this_cpu->c_vendor;
802 else if (c->cpuid_level >= 0)
803 vendor = c->x86_vendor_id;
804
805 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
9d31d35b 806 printk(KERN_CONT "%s ", vendor);
1da177e4 807
9d31d35b
YL
808 if (c->x86_model_id[0])
809 printk(KERN_CONT "%s", c->x86_model_id);
1da177e4 810 else
9d31d35b 811 printk(KERN_CONT "%d86", c->x86);
1da177e4 812
34048c9e 813 if (c->x86_mask || c->cpuid_level >= 0)
9d31d35b 814 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 815 else
9d31d35b 816 printk(KERN_CONT "\n");
a0854a46
YL
817
818#ifdef CONFIG_SMP
819 if (c->cpu_index < show_msr)
820 print_cpu_msr();
821#else
822 if (show_msr)
823 print_cpu_msr();
824#endif
1da177e4
LT
825}
826
ac72e788
AK
827static __init int setup_disablecpuid(char *arg)
828{
829 int bit;
830 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
831 setup_clear_cpu_cap(bit);
832 else
833 return 0;
834 return 1;
835}
836__setup("clearcpuid=", setup_disablecpuid);
837
3bc9b76b 838cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
1da177e4 839
d5494d4f
YL
840#ifdef CONFIG_X86_64
841struct x8664_pda **_cpu_pda __read_mostly;
842EXPORT_SYMBOL(_cpu_pda);
843
844struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
845
846char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
847
2d9cd6c2 848void __cpuinit pda_init(int cpu)
d5494d4f
YL
849{
850 struct x8664_pda *pda = cpu_pda(cpu);
851
852 /* Setup up data that may be needed in __get_free_pages early */
853 loadsegment(fs, 0);
854 loadsegment(gs, 0);
855 /* Memory clobbers used to order PDA accessed */
856 mb();
857 wrmsrl(MSR_GS_BASE, pda);
858 mb();
859
860 pda->cpunumber = cpu;
861 pda->irqcount = -1;
862 pda->kernelstack = (unsigned long)stack_thread_info() -
863 PDA_STACKOFFSET + THREAD_SIZE;
864 pda->active_mm = &init_mm;
865 pda->mmu_state = 0;
866
867 if (cpu == 0) {
868 /* others are initialized in smpboot.c */
869 pda->pcurrent = &init_task;
870 pda->irqstackptr = boot_cpu_stack;
871 pda->irqstackptr += IRQSTACKSIZE - 64;
872 } else {
873 if (!pda->irqstackptr) {
874 pda->irqstackptr = (char *)
875 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
876 if (!pda->irqstackptr)
877 panic("cannot allocate irqstack for cpu %d",
878 cpu);
879 pda->irqstackptr += IRQSTACKSIZE - 64;
880 }
881
882 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
883 pda->nodenumber = cpu_to_node(cpu);
884 }
885}
886
887char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
888 DEBUG_STKSZ] __page_aligned_bss;
889
890extern asmlinkage void ignore_sysret(void);
891
892/* May not be marked __init: used by software suspend */
893void syscall_init(void)
894{
895 /*
896 * LSTAR and STAR live in a bit strange symbiosis.
897 * They both write to the same internal register. STAR allows to
898 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
899 */
900 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
901 wrmsrl(MSR_LSTAR, system_call);
902 wrmsrl(MSR_CSTAR, ignore_sysret);
903
904#ifdef CONFIG_IA32_EMULATION
905 syscall32_cpu_init();
906#endif
907
908 /* Flags to clear on syscall */
909 wrmsrl(MSR_SYSCALL_MASK,
910 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
911}
912
d5494d4f
YL
913unsigned long kernel_eflags;
914
915/*
916 * Copies of the original ist values from the tss are only accessed during
917 * debugging, no special alignment required.
918 */
919DEFINE_PER_CPU(struct orig_ist, orig_ist);
920
921#else
922
7c3576d2 923/* Make sure %fs is initialized properly in idle threads */
6b2fb3c6 924struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
f95d47ca
JF
925{
926 memset(regs, 0, sizeof(struct pt_regs));
65ea5b03 927 regs->fs = __KERNEL_PERCPU;
f95d47ca
JF
928 return regs;
929}
d5494d4f 930#endif
f95d47ca 931
d2cbcc49
RR
932/*
933 * cpu_init() initializes state that is per-CPU. Some data is already
934 * initialized (naturally) in the bootstrap process, such as the GDT
935 * and IDT. We reload them nevertheless, this function acts as a
936 * 'CPU state barrier', nothing should get across.
1ba76586 937 * A lot of state is already set up in PDA init for 64 bit
d2cbcc49 938 */
1ba76586
YL
939#ifdef CONFIG_X86_64
940void __cpuinit cpu_init(void)
941{
942 int cpu = stack_smp_processor_id();
943 struct tss_struct *t = &per_cpu(init_tss, cpu);
944 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
945 unsigned long v;
946 char *estacks = NULL;
947 struct task_struct *me;
948 int i;
949
950 /* CPU 0 is initialised in head64.c */
951 if (cpu != 0)
952 pda_init(cpu);
953 else
954 estacks = boot_exception_stacks;
955
956 me = current;
957
958 if (cpu_test_and_set(cpu, cpu_initialized))
959 panic("CPU#%d already initialized!\n", cpu);
960
961 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
962
963 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
964
965 /*
966 * Initialize the per-CPU GDT with the boot GDT,
967 * and set up the GDT descriptor:
968 */
969
970 switch_to_new_gdt();
971 load_idt((const struct desc_ptr *)&idt_descr);
972
973 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
974 syscall_init();
975
976 wrmsrl(MSR_FS_BASE, 0);
977 wrmsrl(MSR_KERNEL_GS_BASE, 0);
978 barrier();
979
980 check_efer();
981 if (cpu != 0 && x2apic)
982 enable_x2apic();
983
984 /*
985 * set up and load the per-CPU TSS
986 */
987 if (!orig_ist->ist[0]) {
988 static const unsigned int order[N_EXCEPTION_STACKS] = {
989 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
990 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
991 };
992 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
993 if (cpu) {
994 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
995 if (!estacks)
996 panic("Cannot allocate exception "
997 "stack %ld %d\n", v, cpu);
998 }
999 estacks += PAGE_SIZE << order[v];
1000 orig_ist->ist[v] = t->x86_tss.ist[v] =
1001 (unsigned long)estacks;
1002 }
1003 }
1004
1005 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1006 /*
1007 * <= is required because the CPU will access up to
1008 * 8 bits beyond the end of the IO permission bitmap.
1009 */
1010 for (i = 0; i <= IO_BITMAP_LONGS; i++)
1011 t->io_bitmap[i] = ~0UL;
1012
1013 atomic_inc(&init_mm.mm_count);
1014 me->active_mm = &init_mm;
1015 if (me->mm)
1016 BUG();
1017 enter_lazy_tlb(&init_mm, me);
1018
1019 load_sp0(t, &current->thread);
1020 set_tss_desc(cpu, t);
1021 load_TR_desc();
1022 load_LDT(&init_mm.context);
1023
1024#ifdef CONFIG_KGDB
1025 /*
1026 * If the kgdb is connected no debug regs should be altered. This
1027 * is only applicable when KGDB and a KGDB I/O module are built
1028 * into the kernel and you are using early debugging with
1029 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
1030 */
1031 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1032 arch_kgdb_ops.correct_hw_break();
1033 else {
1034#endif
1035 /*
1036 * Clear all 6 debug registers:
1037 */
1038
1039 set_debugreg(0UL, 0);
1040 set_debugreg(0UL, 1);
1041 set_debugreg(0UL, 2);
1042 set_debugreg(0UL, 3);
1043 set_debugreg(0UL, 6);
1044 set_debugreg(0UL, 7);
1045#ifdef CONFIG_KGDB
1046 /* If the kgdb is connected no debug regs should be altered. */
1047 }
1048#endif
1049
1050 fpu_init();
1051
1052 raw_local_save_flags(kernel_eflags);
1053
1054 if (is_uv_system())
1055 uv_cpu_init();
1056}
1057
1058#else
1059
d2cbcc49 1060void __cpuinit cpu_init(void)
9ee79a3d 1061{
d2cbcc49
RR
1062 int cpu = smp_processor_id();
1063 struct task_struct *curr = current;
34048c9e 1064 struct tss_struct *t = &per_cpu(init_tss, cpu);
9ee79a3d 1065 struct thread_struct *thread = &curr->thread;
62111195
JF
1066
1067 if (cpu_test_and_set(cpu, cpu_initialized)) {
1068 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1069 for (;;) local_irq_enable();
1070 }
1071
1072 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1073
1074 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
1075 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
62111195 1076
4d37e7e3 1077 load_idt(&idt_descr);
c5413fbe 1078 switch_to_new_gdt();
1da177e4 1079
1da177e4
LT
1080 /*
1081 * Set up and load the per-CPU TSS and LDT
1082 */
1083 atomic_inc(&init_mm.mm_count);
62111195
JF
1084 curr->active_mm = &init_mm;
1085 if (curr->mm)
1086 BUG();
1087 enter_lazy_tlb(&init_mm, curr);
1da177e4 1088
faca6227 1089 load_sp0(t, thread);
34048c9e 1090 set_tss_desc(cpu, t);
1da177e4
LT
1091 load_TR_desc();
1092 load_LDT(&init_mm.context);
1093
22c4e308 1094#ifdef CONFIG_DOUBLEFAULT
1da177e4
LT
1095 /* Set up doublefault TSS pointer in the GDT */
1096 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
22c4e308 1097#endif
1da177e4 1098
464d1a78
JF
1099 /* Clear %gs. */
1100 asm volatile ("mov %0, %%gs" : : "r" (0));
1da177e4
LT
1101
1102 /* Clear all 6 debug registers: */
4bb0d3ec
ZA
1103 set_debugreg(0, 0);
1104 set_debugreg(0, 1);
1105 set_debugreg(0, 2);
1106 set_debugreg(0, 3);
1107 set_debugreg(0, 6);
1108 set_debugreg(0, 7);
1da177e4
LT
1109
1110 /*
1111 * Force FPU initialization:
1112 */
b359e8a4
SS
1113 if (cpu_has_xsave)
1114 current_thread_info()->status = TS_XSAVE;
1115 else
1116 current_thread_info()->status = 0;
1da177e4
LT
1117 clear_used_math();
1118 mxcsr_feature_mask_init();
dc1e35c6
SS
1119
1120 /*
1121 * Boot processor to setup the FP and extended state context info.
1122 */
1123 if (!smp_processor_id())
1124 init_thread_xstate();
1125
1126 xsave_init();
1da177e4 1127}
e1367daf
LS
1128
1129#ifdef CONFIG_HOTPLUG_CPU
3bc9b76b 1130void __cpuinit cpu_uninit(void)
e1367daf
LS
1131{
1132 int cpu = raw_smp_processor_id();
1133 cpu_clear(cpu, cpu_initialized);
1134
1135 /* lazy TLB state */
1136 per_cpu(cpu_tlbstate, cpu).state = 0;
1137 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
1138}
1139#endif
1ba76586
YL
1140
1141#endif