]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/cpu/common.c
x86: Remove enabling x2apic message for every CPU
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / common.c
CommitLineData
f0fc4aff 1#include <linux/bootmem.h>
9766cdbc 2#include <linux/linkage.h>
f0fc4aff 3#include <linux/bitops.h>
9766cdbc 4#include <linux/kernel.h>
f0fc4aff 5#include <linux/module.h>
9766cdbc
JSR
6#include <linux/percpu.h>
7#include <linux/string.h>
1da177e4 8#include <linux/delay.h>
9766cdbc
JSR
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/kgdb.h>
1da177e4 12#include <linux/smp.h>
9766cdbc
JSR
13#include <linux/io.h>
14
15#include <asm/stackprotector.h>
cdd6c482 16#include <asm/perf_event.h>
1da177e4 17#include <asm/mmu_context.h>
9766cdbc
JSR
18#include <asm/hypervisor.h>
19#include <asm/processor.h>
20#include <asm/sections.h>
8bdbd962
AC
21#include <linux/topology.h>
22#include <linux/cpumask.h>
9766cdbc
JSR
23#include <asm/pgtable.h>
24#include <asm/atomic.h>
25#include <asm/proto.h>
26#include <asm/setup.h>
27#include <asm/apic.h>
28#include <asm/desc.h>
29#include <asm/i387.h>
27b07da7 30#include <asm/mtrr.h>
8bdbd962 31#include <linux/numa.h>
9766cdbc
JSR
32#include <asm/asm.h>
33#include <asm/cpu.h>
a03a3e28 34#include <asm/mce.h>
9766cdbc 35#include <asm/msr.h>
8d4a4300 36#include <asm/pat.h>
e641f5f5
IM
37
38#ifdef CONFIG_X86_LOCAL_APIC
bdbcdd48 39#include <asm/uv/uv.h>
1da177e4
LT
40#endif
41
42#include "cpu.h"
43
c2d1cec1 44/* all of these masks are initialized in setup_cpu_local_masks() */
c2d1cec1 45cpumask_var_t cpu_initialized_mask;
9766cdbc
JSR
46cpumask_var_t cpu_callout_mask;
47cpumask_var_t cpu_callin_mask;
c2d1cec1
MT
48
49/* representing cpus for which sibling maps can be computed */
50cpumask_var_t cpu_sibling_setup_mask;
51
2f2f52ba 52/* correctly size the local cpu masks */
4369f1fb 53void __init setup_cpu_local_masks(void)
2f2f52ba
BG
54{
55 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
56 alloc_bootmem_cpumask_var(&cpu_callin_mask);
57 alloc_bootmem_cpumask_var(&cpu_callout_mask);
58 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
59}
60
e8055139
OZ
61static void __cpuinit default_init(struct cpuinfo_x86 *c)
62{
63#ifdef CONFIG_X86_64
27c13ece 64 cpu_detect_cache_sizes(c);
e8055139
OZ
65#else
66 /* Not much we can do here... */
67 /* Check if at least it has cpuid */
68 if (c->cpuid_level == -1) {
69 /* No cpuid. It must be an ancient CPU */
70 if (c->x86 == 4)
71 strcpy(c->x86_model_id, "486");
72 else if (c->x86 == 3)
73 strcpy(c->x86_model_id, "386");
74 }
75#endif
76}
77
78static const struct cpu_dev __cpuinitconst default_cpu = {
79 .c_init = default_init,
80 .c_vendor = "Unknown",
81 .c_x86_vendor = X86_VENDOR_UNKNOWN,
82};
83
84static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
0a488a53 85
06deef89 86DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
950ad7ff 87#ifdef CONFIG_X86_64
06deef89
BG
88 /*
89 * We need valid kernel segments for data and code in long mode too
90 * IRET will check the segment types kkeil 2000/10/28
91 * Also sysret mandates a special GDT layout
92 *
9766cdbc 93 * TLS descriptors are currently at a different place compared to i386.
06deef89
BG
94 * Hopefully nobody expects them at a fixed place (Wine?)
95 */
1e5de182
AM
96 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
97 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
98 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
99 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
100 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
101 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
950ad7ff 102#else
1e5de182
AM
103 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
104 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
105 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
106 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
bf504672
RR
107 /*
108 * Segments used for calling PnP BIOS have byte granularity.
109 * They code segments and data segments have fixed 64k limits,
110 * the transfer segment sizes are set at run time.
111 */
6842ef0e 112 /* 32-bit code */
1e5de182 113 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
6842ef0e 114 /* 16-bit code */
1e5de182 115 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
6842ef0e 116 /* 16-bit data */
1e5de182 117 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
6842ef0e 118 /* 16-bit data */
1e5de182 119 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
6842ef0e 120 /* 16-bit data */
1e5de182 121 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
bf504672
RR
122 /*
123 * The APM segments have byte granularity and their bases
124 * are set at run time. All have 64k limits.
125 */
6842ef0e 126 /* 32-bit code */
1e5de182 127 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
bf504672 128 /* 16-bit code */
1e5de182 129 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
6842ef0e 130 /* data */
72c4d853 131 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
bf504672 132
1e5de182
AM
133 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
134 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
60a5317f 135 GDT_STACK_CANARY_INIT
950ad7ff 136#endif
06deef89 137} };
7a61d35d 138EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
ae1ee11b 139
0c752a93
SS
140static int __init x86_xsave_setup(char *s)
141{
142 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
143 return 1;
144}
145__setup("noxsave", x86_xsave_setup);
146
ba51dced 147#ifdef CONFIG_X86_32
3bc9b76b 148static int cachesize_override __cpuinitdata = -1;
3bc9b76b 149static int disable_x86_serial_nr __cpuinitdata = 1;
1da177e4 150
0a488a53
YL
151static int __init cachesize_setup(char *str)
152{
153 get_option(&str, &cachesize_override);
154 return 1;
155}
156__setup("cachesize=", cachesize_setup);
157
0a488a53
YL
158static int __init x86_fxsr_setup(char *s)
159{
160 setup_clear_cpu_cap(X86_FEATURE_FXSR);
161 setup_clear_cpu_cap(X86_FEATURE_XMM);
162 return 1;
163}
164__setup("nofxsr", x86_fxsr_setup);
165
166static int __init x86_sep_setup(char *s)
167{
168 setup_clear_cpu_cap(X86_FEATURE_SEP);
169 return 1;
170}
171__setup("nosep", x86_sep_setup);
172
173/* Standard macro to see if a specific flag is changeable */
174static inline int flag_is_changeable_p(u32 flag)
175{
176 u32 f1, f2;
177
94f6bac1
KH
178 /*
179 * Cyrix and IDT cpus allow disabling of CPUID
180 * so the code below may return different results
181 * when it is executed before and after enabling
182 * the CPUID. Add "volatile" to not allow gcc to
183 * optimize the subsequent calls to this function.
184 */
0f3fa48a
IM
185 asm volatile ("pushfl \n\t"
186 "pushfl \n\t"
187 "popl %0 \n\t"
188 "movl %0, %1 \n\t"
189 "xorl %2, %0 \n\t"
190 "pushl %0 \n\t"
191 "popfl \n\t"
192 "pushfl \n\t"
193 "popl %0 \n\t"
194 "popfl \n\t"
195
94f6bac1
KH
196 : "=&r" (f1), "=&r" (f2)
197 : "ir" (flag));
0a488a53
YL
198
199 return ((f1^f2) & flag) != 0;
200}
201
202/* Probe for the CPUID instruction */
203static int __cpuinit have_cpuid_p(void)
204{
205 return flag_is_changeable_p(X86_EFLAGS_ID);
206}
207
208static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
209{
0f3fa48a
IM
210 unsigned long lo, hi;
211
212 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
213 return;
214
215 /* Disable processor serial number: */
216
217 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
218 lo |= 0x200000;
219 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
220
221 printk(KERN_NOTICE "CPU serial number disabled.\n");
222 clear_cpu_cap(c, X86_FEATURE_PN);
223
224 /* Disabling the serial number may affect the cpuid level */
225 c->cpuid_level = cpuid_eax(0);
0a488a53
YL
226}
227
228static int __init x86_serial_nr_setup(char *s)
229{
230 disable_x86_serial_nr = 0;
231 return 1;
232}
233__setup("serialnumber", x86_serial_nr_setup);
ba51dced 234#else
102bbe3a
YL
235static inline int flag_is_changeable_p(u32 flag)
236{
237 return 1;
238}
ba51dced
YL
239/* Probe for the CPUID instruction */
240static inline int have_cpuid_p(void)
241{
242 return 1;
243}
102bbe3a
YL
244static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
245{
246}
ba51dced 247#endif
0a488a53 248
b38b0665
PA
249/*
250 * Some CPU features depend on higher CPUID levels, which may not always
251 * be available due to CPUID level capping or broken virtualization
252 * software. Add those features to this table to auto-disable them.
253 */
254struct cpuid_dependent_feature {
255 u32 feature;
256 u32 level;
257};
0f3fa48a 258
b38b0665
PA
259static const struct cpuid_dependent_feature __cpuinitconst
260cpuid_dependent_features[] = {
261 { X86_FEATURE_MWAIT, 0x00000005 },
262 { X86_FEATURE_DCA, 0x00000009 },
263 { X86_FEATURE_XSAVE, 0x0000000d },
264 { 0, 0 }
265};
266
267static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
268{
269 const struct cpuid_dependent_feature *df;
9766cdbc 270
b38b0665 271 for (df = cpuid_dependent_features; df->feature; df++) {
0f3fa48a
IM
272
273 if (!cpu_has(c, df->feature))
274 continue;
b38b0665
PA
275 /*
276 * Note: cpuid_level is set to -1 if unavailable, but
277 * extended_extended_level is set to 0 if unavailable
278 * and the legitimate extended levels are all negative
279 * when signed; hence the weird messing around with
280 * signs here...
281 */
0f3fa48a 282 if (!((s32)df->level < 0 ?
f6db44df 283 (u32)df->level > (u32)c->extended_cpuid_level :
0f3fa48a
IM
284 (s32)df->level > (s32)c->cpuid_level))
285 continue;
286
287 clear_cpu_cap(c, df->feature);
288 if (!warn)
289 continue;
290
291 printk(KERN_WARNING
292 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
293 x86_cap_flags[df->feature], df->level);
b38b0665 294 }
f6db44df 295}
b38b0665 296
102bbe3a
YL
297/*
298 * Naming convention should be: <Name> [(<Codename>)]
299 * This table only is used unless init_<vendor>() below doesn't set it;
0f3fa48a
IM
300 * in particular, if CPUID levels 0x80000002..4 are supported, this
301 * isn't used
102bbe3a
YL
302 */
303
304/* Look up CPU names by table lookup. */
02dde8b4 305static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
102bbe3a 306{
02dde8b4 307 const struct cpu_model_info *info;
102bbe3a
YL
308
309 if (c->x86_model >= 16)
310 return NULL; /* Range check */
311
312 if (!this_cpu)
313 return NULL;
314
315 info = this_cpu->c_models;
316
317 while (info && info->family) {
318 if (info->family == c->x86)
319 return info->model_names[c->x86_model];
320 info++;
321 }
322 return NULL; /* Not found */
323}
324
3e0c3737
YL
325__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
326__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
7d851c8d 327
11e3a840
JF
328void load_percpu_segment(int cpu)
329{
330#ifdef CONFIG_X86_32
331 loadsegment(fs, __KERNEL_PERCPU);
332#else
333 loadsegment(gs, 0);
334 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
335#endif
60a5317f 336 load_stack_canary_segment();
11e3a840
JF
337}
338
0f3fa48a
IM
339/*
340 * Current gdt points %fs at the "master" per-cpu area: after this,
341 * it's on the real one.
342 */
552be871 343void switch_to_new_gdt(int cpu)
9d31d35b
YL
344{
345 struct desc_ptr gdt_descr;
346
2697fbd5 347 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
9d31d35b
YL
348 gdt_descr.size = GDT_SIZE - 1;
349 load_gdt(&gdt_descr);
2697fbd5 350 /* Reload the per-cpu base */
11e3a840
JF
351
352 load_percpu_segment(cpu);
9d31d35b
YL
353}
354
02dde8b4 355static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
1da177e4 356
1b05d60d 357static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
358{
359 unsigned int *v;
360 char *p, *q;
361
3da99c97 362 if (c->extended_cpuid_level < 0x80000004)
1b05d60d 363 return;
1da177e4 364
0f3fa48a 365 v = (unsigned int *)c->x86_model_id;
1da177e4
LT
366 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
367 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
368 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
369 c->x86_model_id[48] = 0;
370
0f3fa48a
IM
371 /*
372 * Intel chips right-justify this string for some dumb reason;
373 * undo that brain damage:
374 */
1da177e4 375 p = q = &c->x86_model_id[0];
34048c9e 376 while (*p == ' ')
9766cdbc 377 p++;
34048c9e 378 if (p != q) {
9766cdbc
JSR
379 while (*p)
380 *q++ = *p++;
381 while (q <= &c->x86_model_id[48])
382 *q++ = '\0'; /* Zero-pad the rest */
1da177e4 383 }
1da177e4
LT
384}
385
27c13ece 386void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
1da177e4 387{
9d31d35b 388 unsigned int n, dummy, ebx, ecx, edx, l2size;
1da177e4 389
3da99c97 390 n = c->extended_cpuid_level;
1da177e4
LT
391
392 if (n >= 0x80000005) {
9d31d35b 393 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
9d31d35b 394 c->x86_cache_size = (ecx>>24) + (edx>>24);
140fc727
YL
395#ifdef CONFIG_X86_64
396 /* On K8 L1 TLB is inclusive, so don't count it */
397 c->x86_tlbsize = 0;
398#endif
1da177e4
LT
399 }
400
401 if (n < 0x80000006) /* Some chips just has a large L1. */
402 return;
403
0a488a53 404 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1da177e4 405 l2size = ecx >> 16;
34048c9e 406
140fc727
YL
407#ifdef CONFIG_X86_64
408 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
409#else
1da177e4
LT
410 /* do processor-specific cache resizing */
411 if (this_cpu->c_size_cache)
34048c9e 412 l2size = this_cpu->c_size_cache(c, l2size);
1da177e4
LT
413
414 /* Allow user to override all this if necessary. */
415 if (cachesize_override != -1)
416 l2size = cachesize_override;
417
34048c9e 418 if (l2size == 0)
1da177e4 419 return; /* Again, no L2 cache is possible */
140fc727 420#endif
1da177e4
LT
421
422 c->x86_cache_size = l2size;
1da177e4
LT
423}
424
9d31d35b 425void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4 426{
97e4db7c 427#ifdef CONFIG_X86_HT
0a488a53
YL
428 u32 eax, ebx, ecx, edx;
429 int index_msb, core_bits;
1da177e4 430
0a488a53 431 if (!cpu_has(c, X86_FEATURE_HT))
9d31d35b 432 return;
1da177e4 433
0a488a53
YL
434 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
435 goto out;
1da177e4 436
1cd78776
YL
437 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
438 return;
1da177e4 439
0a488a53 440 cpuid(1, &eax, &ebx, &ecx, &edx);
1da177e4 441
9d31d35b
YL
442 smp_num_siblings = (ebx & 0xff0000) >> 16;
443
444 if (smp_num_siblings == 1) {
445 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
0f3fa48a
IM
446 goto out;
447 }
9d31d35b 448
0f3fa48a
IM
449 if (smp_num_siblings <= 1)
450 goto out;
9d31d35b 451
0f3fa48a
IM
452 if (smp_num_siblings > nr_cpu_ids) {
453 pr_warning("CPU: Unsupported number of siblings %d",
454 smp_num_siblings);
455 smp_num_siblings = 1;
456 return;
457 }
9d31d35b 458
0f3fa48a
IM
459 index_msb = get_count_order(smp_num_siblings);
460 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
9d31d35b 461
0f3fa48a 462 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
9d31d35b 463
0f3fa48a 464 index_msb = get_count_order(smp_num_siblings);
9d31d35b 465
0f3fa48a 466 core_bits = get_count_order(c->x86_max_cores);
9d31d35b 467
0f3fa48a
IM
468 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
469 ((1 << core_bits) - 1);
1da177e4 470
0a488a53
YL
471out:
472 if ((c->x86_max_cores * smp_num_siblings) > 1) {
473 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
474 c->phys_proc_id);
475 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
476 c->cpu_core_id);
9d31d35b 477 }
9d31d35b 478#endif
97e4db7c 479}
1da177e4 480
3da99c97 481static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
482{
483 char *v = c->x86_vendor_id;
0f3fa48a 484 int i;
1da177e4
LT
485
486 for (i = 0; i < X86_VENDOR_NUM; i++) {
10a434fc
YL
487 if (!cpu_devs[i])
488 break;
489
490 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
491 (cpu_devs[i]->c_ident[1] &&
492 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
0f3fa48a 493
10a434fc
YL
494 this_cpu = cpu_devs[i];
495 c->x86_vendor = this_cpu->c_x86_vendor;
496 return;
1da177e4
LT
497 }
498 }
10a434fc 499
a9c56953
MK
500 printk_once(KERN_ERR
501 "CPU: vendor_id '%s' unknown, using generic init.\n" \
502 "CPU: Your system may be unstable.\n", v);
10a434fc 503
fe38d855
CE
504 c->x86_vendor = X86_VENDOR_UNKNOWN;
505 this_cpu = &default_cpu;
1da177e4
LT
506}
507
9d31d35b 508void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
1da177e4 509{
1da177e4 510 /* Get vendor name */
4a148513
HH
511 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
512 (unsigned int *)&c->x86_vendor_id[0],
513 (unsigned int *)&c->x86_vendor_id[8],
514 (unsigned int *)&c->x86_vendor_id[4]);
1da177e4 515
1da177e4 516 c->x86 = 4;
9d31d35b 517 /* Intel-defined flags: level 0x00000001 */
1da177e4
LT
518 if (c->cpuid_level >= 0x00000001) {
519 u32 junk, tfms, cap0, misc;
0f3fa48a 520
1da177e4 521 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
9d31d35b
YL
522 c->x86 = (tfms >> 8) & 0xf;
523 c->x86_model = (tfms >> 4) & 0xf;
524 c->x86_mask = tfms & 0xf;
0f3fa48a 525
f5f786d0 526 if (c->x86 == 0xf)
1da177e4 527 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 528 if (c->x86 >= 0x6)
9d31d35b 529 c->x86_model += ((tfms >> 16) & 0xf) << 4;
0f3fa48a 530
d4387bd3 531 if (cap0 & (1<<19)) {
d4387bd3 532 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
9d31d35b 533 c->x86_cache_alignment = c->x86_clflush_size;
d4387bd3 534 }
1da177e4 535 }
1da177e4 536}
3da99c97
YL
537
538static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
093af8d7
YL
539{
540 u32 tfms, xlvl;
3da99c97 541 u32 ebx;
093af8d7 542
3da99c97
YL
543 /* Intel-defined flags: level 0x00000001 */
544 if (c->cpuid_level >= 0x00000001) {
545 u32 capability, excap;
0f3fa48a 546
3da99c97
YL
547 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
548 c->x86_capability[0] = capability;
549 c->x86_capability[4] = excap;
550 }
093af8d7 551
3da99c97
YL
552 /* AMD-defined flags: level 0x80000001 */
553 xlvl = cpuid_eax(0x80000000);
554 c->extended_cpuid_level = xlvl;
0f3fa48a 555
3da99c97
YL
556 if ((xlvl & 0xffff0000) == 0x80000000) {
557 if (xlvl >= 0x80000001) {
558 c->x86_capability[1] = cpuid_edx(0x80000001);
559 c->x86_capability[6] = cpuid_ecx(0x80000001);
093af8d7 560 }
093af8d7 561 }
093af8d7 562
5122c890
YL
563 if (c->extended_cpuid_level >= 0x80000008) {
564 u32 eax = cpuid_eax(0x80000008);
565
566 c->x86_virt_bits = (eax >> 8) & 0xff;
567 c->x86_phys_bits = eax & 0xff;
093af8d7 568 }
13c6c532
JB
569#ifdef CONFIG_X86_32
570 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
571 c->x86_phys_bits = 36;
5122c890 572#endif
e3224234
YL
573
574 if (c->extended_cpuid_level >= 0x80000007)
575 c->x86_power = cpuid_edx(0x80000007);
093af8d7
YL
576
577}
1da177e4 578
aef93c8b
YL
579static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
580{
581#ifdef CONFIG_X86_32
582 int i;
583
584 /*
585 * First of all, decide if this is a 486 or higher
586 * It's a 486 if we can modify the AC flag
587 */
588 if (flag_is_changeable_p(X86_EFLAGS_AC))
589 c->x86 = 4;
590 else
591 c->x86 = 3;
592
593 for (i = 0; i < X86_VENDOR_NUM; i++)
594 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
595 c->x86_vendor_id[0] = 0;
596 cpu_devs[i]->c_identify(c);
597 if (c->x86_vendor_id[0]) {
598 get_cpu_vendor(c);
599 break;
600 }
601 }
602#endif
603}
604
34048c9e
PC
605/*
606 * Do minimum CPU detection early.
607 * Fields really needed: vendor, cpuid_level, family, model, mask,
608 * cache alignment.
609 * The others are not touched to avoid unwanted side effects.
610 *
611 * WARNING: this function is only called on the BP. Don't add code here
612 * that is supposed to run on all CPUs.
613 */
3da99c97 614static void __init early_identify_cpu(struct cpuinfo_x86 *c)
d7cd5611 615{
6627d242
YL
616#ifdef CONFIG_X86_64
617 c->x86_clflush_size = 64;
13c6c532
JB
618 c->x86_phys_bits = 36;
619 c->x86_virt_bits = 48;
6627d242 620#else
d4387bd3 621 c->x86_clflush_size = 32;
13c6c532
JB
622 c->x86_phys_bits = 32;
623 c->x86_virt_bits = 32;
6627d242 624#endif
0a488a53 625 c->x86_cache_alignment = c->x86_clflush_size;
d7cd5611 626
3da99c97 627 memset(&c->x86_capability, 0, sizeof c->x86_capability);
0a488a53 628 c->extended_cpuid_level = 0;
d7cd5611 629
aef93c8b
YL
630 if (!have_cpuid_p())
631 identify_cpu_without_cpuid(c);
632
633 /* cyrix could have cpuid enabled via c_identify()*/
d7cd5611
RR
634 if (!have_cpuid_p())
635 return;
636
637 cpu_detect(c);
638
3da99c97 639 get_cpu_vendor(c);
2b16a235 640
3da99c97 641 get_cpu_cap(c);
12cf105c 642
10a434fc
YL
643 if (this_cpu->c_early_init)
644 this_cpu->c_early_init(c);
093af8d7 645
1c4acdb4 646#ifdef CONFIG_SMP
bfcb4c1b 647 c->cpu_index = boot_cpu_id;
1c4acdb4 648#endif
b38b0665 649 filter_cpuid_features(c, false);
d7cd5611
RR
650}
651
9d31d35b
YL
652void __init early_cpu_init(void)
653{
02dde8b4 654 const struct cpu_dev *const *cdev;
10a434fc
YL
655 int count = 0;
656
31c997ca 657#ifdef PROCESSOR_SELECT
9766cdbc 658 printk(KERN_INFO "KERNEL supported cpus:\n");
31c997ca
IM
659#endif
660
10a434fc 661 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
02dde8b4 662 const struct cpu_dev *cpudev = *cdev;
9d31d35b 663
10a434fc
YL
664 if (count >= X86_VENDOR_NUM)
665 break;
666 cpu_devs[count] = cpudev;
667 count++;
668
31c997ca
IM
669#ifdef PROCESSOR_SELECT
670 {
671 unsigned int j;
672
673 for (j = 0; j < 2; j++) {
674 if (!cpudev->c_ident[j])
675 continue;
676 printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
677 cpudev->c_ident[j]);
678 }
10a434fc 679 }
0388423d 680#endif
10a434fc 681 }
9d31d35b 682 early_identify_cpu(&boot_cpu_data);
d7cd5611 683}
093af8d7 684
b6734c35
PA
685/*
686 * The NOPL instruction is supposed to exist on all CPUs with
ba0593bf 687 * family >= 6; unfortunately, that's not true in practice because
b6734c35 688 * of early VIA chips and (more importantly) broken virtualizers that
ba0593bf
PA
689 * are not easy to detect. In the latter case it doesn't even *fail*
690 * reliably, so probing for it doesn't even work. Disable it completely
691 * unless we can find a reliable way to detect all the broken cases.
b6734c35
PA
692 */
693static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
694{
b6734c35 695 clear_cpu_cap(c, X86_FEATURE_NOPL);
d7cd5611
RR
696}
697
34048c9e 698static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
1da177e4 699{
aef93c8b 700 c->extended_cpuid_level = 0;
1da177e4 701
3da99c97 702 if (!have_cpuid_p())
aef93c8b 703 identify_cpu_without_cpuid(c);
1d67953f 704
aef93c8b 705 /* cyrix could have cpuid enabled via c_identify()*/
a9853dd6 706 if (!have_cpuid_p())
aef93c8b 707 return;
1da177e4 708
3da99c97 709 cpu_detect(c);
1da177e4 710
3da99c97 711 get_cpu_vendor(c);
1da177e4 712
3da99c97 713 get_cpu_cap(c);
1da177e4 714
3da99c97
YL
715 if (c->cpuid_level >= 0x00000001) {
716 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
b89d3b3e
YL
717#ifdef CONFIG_X86_32
718# ifdef CONFIG_X86_HT
cb8cc442 719 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
b89d3b3e 720# else
3da99c97 721 c->apicid = c->initial_apicid;
b89d3b3e
YL
722# endif
723#endif
1da177e4 724
b89d3b3e
YL
725#ifdef CONFIG_X86_HT
726 c->phys_proc_id = c->initial_apicid;
1e9f28fa 727#endif
3da99c97 728 }
1da177e4 729
1b05d60d 730 get_model_name(c); /* Default name */
1da177e4 731
3da99c97
YL
732 init_scattered_cpuid_features(c);
733 detect_nopl(c);
1da177e4 734}
1da177e4
LT
735
736/*
737 * This does the hard work of actually picking apart the CPU stuff...
738 */
9a250347 739static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
740{
741 int i;
742
743 c->loops_per_jiffy = loops_per_jiffy;
744 c->x86_cache_size = -1;
745 c->x86_vendor = X86_VENDOR_UNKNOWN;
1da177e4
LT
746 c->x86_model = c->x86_mask = 0; /* So far unknown... */
747 c->x86_vendor_id[0] = '\0'; /* Unset */
748 c->x86_model_id[0] = '\0'; /* Unset */
94605eff 749 c->x86_max_cores = 1;
102bbe3a 750 c->x86_coreid_bits = 0;
11fdd252 751#ifdef CONFIG_X86_64
102bbe3a 752 c->x86_clflush_size = 64;
13c6c532
JB
753 c->x86_phys_bits = 36;
754 c->x86_virt_bits = 48;
102bbe3a
YL
755#else
756 c->cpuid_level = -1; /* CPUID not detected */
770d132f 757 c->x86_clflush_size = 32;
13c6c532
JB
758 c->x86_phys_bits = 32;
759 c->x86_virt_bits = 32;
102bbe3a
YL
760#endif
761 c->x86_cache_alignment = c->x86_clflush_size;
1da177e4
LT
762 memset(&c->x86_capability, 0, sizeof c->x86_capability);
763
1da177e4
LT
764 generic_identify(c);
765
3898534d 766 if (this_cpu->c_identify)
1da177e4
LT
767 this_cpu->c_identify(c);
768
2759c328
YL
769 /* Clear/Set all flags overriden by options, after probe */
770 for (i = 0; i < NCAPINTS; i++) {
771 c->x86_capability[i] &= ~cpu_caps_cleared[i];
772 c->x86_capability[i] |= cpu_caps_set[i];
773 }
774
102bbe3a 775#ifdef CONFIG_X86_64
cb8cc442 776 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
102bbe3a
YL
777#endif
778
1da177e4
LT
779 /*
780 * Vendor-specific initialization. In this section we
781 * canonicalize the feature flags, meaning if there are
782 * features a certain CPU supports which CPUID doesn't
783 * tell us, CPUID claiming incorrect flags, or other bugs,
784 * we handle them here.
785 *
786 * At the end of this section, c->x86_capability better
787 * indicate the features this CPU genuinely supports!
788 */
789 if (this_cpu->c_init)
790 this_cpu->c_init(c);
791
792 /* Disable the PN if appropriate */
793 squash_the_stupid_serial_number(c);
794
795 /*
0f3fa48a
IM
796 * The vendor-specific functions might have changed features.
797 * Now we do "generic changes."
1da177e4
LT
798 */
799
b38b0665
PA
800 /* Filter out anything that depends on CPUID levels we don't have */
801 filter_cpuid_features(c, true);
802
1da177e4 803 /* If the model name is still unset, do table lookup. */
34048c9e 804 if (!c->x86_model_id[0]) {
02dde8b4 805 const char *p;
1da177e4 806 p = table_lookup_model(c);
34048c9e 807 if (p)
1da177e4
LT
808 strcpy(c->x86_model_id, p);
809 else
810 /* Last resort... */
811 sprintf(c->x86_model_id, "%02x/%02x",
54a20f8c 812 c->x86, c->x86_model);
1da177e4
LT
813 }
814
102bbe3a
YL
815#ifdef CONFIG_X86_64
816 detect_ht(c);
817#endif
818
88b094fb 819 init_hypervisor(c);
3e0c3737
YL
820
821 /*
822 * Clear/Set all flags overriden by options, need do it
823 * before following smp all cpus cap AND.
824 */
825 for (i = 0; i < NCAPINTS; i++) {
826 c->x86_capability[i] &= ~cpu_caps_cleared[i];
827 c->x86_capability[i] |= cpu_caps_set[i];
828 }
829
1da177e4
LT
830 /*
831 * On SMP, boot_cpu_data holds the common feature set between
832 * all CPUs; so make sure that we indicate which features are
833 * common between the CPUs. The first time this routine gets
834 * executed, c == &boot_cpu_data.
835 */
34048c9e 836 if (c != &boot_cpu_data) {
1da177e4 837 /* AND the already accumulated flags with these */
9d31d35b 838 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
839 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
840 }
841
842 /* Init Machine Check Exception if available. */
5e09954a 843 mcheck_cpu_init(c);
30d432df
AK
844
845 select_idle_routine(c);
102bbe3a
YL
846
847#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
848 numa_add_cpu(smp_processor_id());
849#endif
a6c4e076 850}
31ab269a 851
e04d645f
GC
852#ifdef CONFIG_X86_64
853static void vgetcpu_set_mode(void)
854{
855 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
856 vgetcpu_mode = VGETCPU_RDTSCP;
857 else
858 vgetcpu_mode = VGETCPU_LSL;
859}
860#endif
861
a6c4e076
JF
862void __init identify_boot_cpu(void)
863{
864 identify_cpu(&boot_cpu_data);
30e1e6d1 865 init_c1e_mask();
102bbe3a 866#ifdef CONFIG_X86_32
a6c4e076 867 sysenter_setup();
6fe940d6 868 enable_sep_cpu();
e04d645f
GC
869#else
870 vgetcpu_set_mode();
102bbe3a 871#endif
cdd6c482 872 init_hw_perf_events();
a6c4e076 873}
3b520b23 874
a6c4e076
JF
875void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
876{
877 BUG_ON(c == &boot_cpu_data);
878 identify_cpu(c);
102bbe3a 879#ifdef CONFIG_X86_32
a6c4e076 880 enable_sep_cpu();
102bbe3a 881#endif
a6c4e076 882 mtrr_ap_init();
1da177e4
LT
883}
884
a0854a46 885struct msr_range {
0f3fa48a
IM
886 unsigned min;
887 unsigned max;
a0854a46 888};
1da177e4 889
02dde8b4 890static const struct msr_range msr_range_array[] __cpuinitconst = {
a0854a46
YL
891 { 0x00000000, 0x00000418},
892 { 0xc0000000, 0xc000040b},
893 { 0xc0010000, 0xc0010142},
894 { 0xc0011000, 0xc001103b},
895};
1da177e4 896
a0854a46
YL
897static void __cpuinit print_cpu_msr(void)
898{
0f3fa48a 899 unsigned index_min, index_max;
a0854a46
YL
900 unsigned index;
901 u64 val;
902 int i;
a0854a46
YL
903
904 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
905 index_min = msr_range_array[i].min;
906 index_max = msr_range_array[i].max;
0f3fa48a 907
a0854a46
YL
908 for (index = index_min; index < index_max; index++) {
909 if (rdmsrl_amd_safe(index, &val))
910 continue;
911 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
1da177e4 912 }
a0854a46
YL
913 }
914}
94605eff 915
a0854a46 916static int show_msr __cpuinitdata;
0f3fa48a 917
a0854a46
YL
918static __init int setup_show_msr(char *arg)
919{
920 int num;
3dd9d514 921
a0854a46 922 get_option(&arg, &num);
3dd9d514 923
a0854a46
YL
924 if (num > 0)
925 show_msr = num;
926 return 1;
1da177e4 927}
a0854a46 928__setup("show_msr=", setup_show_msr);
1da177e4 929
191679fd
AK
930static __init int setup_noclflush(char *arg)
931{
932 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
933 return 1;
934}
935__setup("noclflush", setup_noclflush);
936
3bc9b76b 937void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4 938{
02dde8b4 939 const char *vendor = NULL;
1da177e4 940
0f3fa48a 941 if (c->x86_vendor < X86_VENDOR_NUM) {
1da177e4 942 vendor = this_cpu->c_vendor;
0f3fa48a
IM
943 } else {
944 if (c->cpuid_level >= 0)
945 vendor = c->x86_vendor_id;
946 }
1da177e4 947
bd32a8cf 948 if (vendor && !strstr(c->x86_model_id, vendor))
9d31d35b 949 printk(KERN_CONT "%s ", vendor);
1da177e4 950
9d31d35b
YL
951 if (c->x86_model_id[0])
952 printk(KERN_CONT "%s", c->x86_model_id);
1da177e4 953 else
9d31d35b 954 printk(KERN_CONT "%d86", c->x86);
1da177e4 955
34048c9e 956 if (c->x86_mask || c->cpuid_level >= 0)
9d31d35b 957 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 958 else
9d31d35b 959 printk(KERN_CONT "\n");
a0854a46
YL
960
961#ifdef CONFIG_SMP
962 if (c->cpu_index < show_msr)
963 print_cpu_msr();
964#else
965 if (show_msr)
966 print_cpu_msr();
967#endif
1da177e4
LT
968}
969
ac72e788
AK
970static __init int setup_disablecpuid(char *arg)
971{
972 int bit;
0f3fa48a 973
ac72e788
AK
974 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
975 setup_clear_cpu_cap(bit);
976 else
977 return 0;
0f3fa48a 978
ac72e788
AK
979 return 1;
980}
981__setup("clearcpuid=", setup_disablecpuid);
982
d5494d4f 983#ifdef CONFIG_X86_64
9ff80942 984struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
d5494d4f 985
947e76cd
BG
986DEFINE_PER_CPU_FIRST(union irq_stack_union,
987 irq_stack_union) __aligned(PAGE_SIZE);
0f3fa48a 988
bdf977b3
TH
989/*
990 * The following four percpu variables are hot. Align current_task to
991 * cacheline size such that all four fall in the same cacheline.
992 */
993DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
994 &init_task;
995EXPORT_PER_CPU_SYMBOL(current_task);
d5494d4f 996
9af45651
BG
997DEFINE_PER_CPU(unsigned long, kernel_stack) =
998 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
999EXPORT_PER_CPU_SYMBOL(kernel_stack);
1000
bdf977b3
TH
1001DEFINE_PER_CPU(char *, irq_stack_ptr) =
1002 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1003
56895530 1004DEFINE_PER_CPU(unsigned int, irq_count) = -1;
d5494d4f 1005
0f3fa48a
IM
1006/*
1007 * Special IST stacks which the CPU switches to when it calls
1008 * an IST-marked descriptor entry. Up to 7 stacks (hardware
1009 * limit), all of them are 4K, except the debug stack which
1010 * is 8K.
1011 */
1012static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1013 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1014 [DEBUG_STACK - 1] = DEBUG_STKSZ
1015};
1016
92d65b23 1017static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
3e352aa8 1018 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
d5494d4f 1019
d5494d4f
YL
1020/* May not be marked __init: used by software suspend */
1021void syscall_init(void)
1da177e4 1022{
d5494d4f
YL
1023 /*
1024 * LSTAR and STAR live in a bit strange symbiosis.
1025 * They both write to the same internal register. STAR allows to
1026 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
1027 */
1028 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
1029 wrmsrl(MSR_LSTAR, system_call);
1030 wrmsrl(MSR_CSTAR, ignore_sysret);
03ae5768 1031
d5494d4f
YL
1032#ifdef CONFIG_IA32_EMULATION
1033 syscall32_cpu_init();
1034#endif
03ae5768 1035
d5494d4f
YL
1036 /* Flags to clear on syscall */
1037 wrmsrl(MSR_SYSCALL_MASK,
1038 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
1da177e4 1039}
62111195 1040
d5494d4f
YL
1041unsigned long kernel_eflags;
1042
1043/*
1044 * Copies of the original ist values from the tss are only accessed during
1045 * debugging, no special alignment required.
1046 */
1047DEFINE_PER_CPU(struct orig_ist, orig_ist);
1048
0f3fa48a 1049#else /* CONFIG_X86_64 */
d5494d4f 1050
bdf977b3
TH
1051DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1052EXPORT_PER_CPU_SYMBOL(current_task);
1053
60a5317f 1054#ifdef CONFIG_CC_STACKPROTECTOR
53f82452 1055DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
60a5317f 1056#endif
d5494d4f 1057
60a5317f 1058/* Make sure %fs and %gs are initialized properly in idle threads */
6b2fb3c6 1059struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
f95d47ca
JF
1060{
1061 memset(regs, 0, sizeof(struct pt_regs));
65ea5b03 1062 regs->fs = __KERNEL_PERCPU;
60a5317f 1063 regs->gs = __KERNEL_STACK_CANARY;
0f3fa48a 1064
f95d47ca
JF
1065 return regs;
1066}
0f3fa48a 1067#endif /* CONFIG_X86_64 */
c5413fbe 1068
9766cdbc
JSR
1069/*
1070 * Clear all 6 debug registers:
1071 */
1072static void clear_all_debug_regs(void)
1073{
1074 int i;
1075
1076 for (i = 0; i < 8; i++) {
1077 /* Ignore db4, db5 */
1078 if ((i == 4) || (i == 5))
1079 continue;
1080
1081 set_debugreg(0, i);
1082 }
1083}
c5413fbe 1084
d2cbcc49
RR
1085/*
1086 * cpu_init() initializes state that is per-CPU. Some data is already
1087 * initialized (naturally) in the bootstrap process, such as the GDT
1088 * and IDT. We reload them nevertheless, this function acts as a
1089 * 'CPU state barrier', nothing should get across.
1ba76586 1090 * A lot of state is already set up in PDA init for 64 bit
d2cbcc49 1091 */
1ba76586 1092#ifdef CONFIG_X86_64
0f3fa48a 1093
1ba76586
YL
1094void __cpuinit cpu_init(void)
1095{
0f3fa48a 1096 struct orig_ist *orig_ist;
1ba76586 1097 struct task_struct *me;
0f3fa48a
IM
1098 struct tss_struct *t;
1099 unsigned long v;
1100 int cpu;
1ba76586
YL
1101 int i;
1102
0f3fa48a
IM
1103 cpu = stack_smp_processor_id();
1104 t = &per_cpu(init_tss, cpu);
1105 orig_ist = &per_cpu(orig_ist, cpu);
1106
e7a22c1e
BG
1107#ifdef CONFIG_NUMA
1108 if (cpu != 0 && percpu_read(node_number) == 0 &&
1109 cpu_to_node(cpu) != NUMA_NO_NODE)
1110 percpu_write(node_number, cpu_to_node(cpu));
1111#endif
1ba76586
YL
1112
1113 me = current;
1114
c2d1cec1 1115 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
1ba76586
YL
1116 panic("CPU#%d already initialized!\n", cpu);
1117
1118 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1119
1120 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1121
1122 /*
1123 * Initialize the per-CPU GDT with the boot GDT,
1124 * and set up the GDT descriptor:
1125 */
1126
552be871 1127 switch_to_new_gdt(cpu);
2697fbd5
BG
1128 loadsegment(fs, 0);
1129
1ba76586
YL
1130 load_idt((const struct desc_ptr *)&idt_descr);
1131
1132 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1133 syscall_init();
1134
1135 wrmsrl(MSR_FS_BASE, 0);
1136 wrmsrl(MSR_KERNEL_GS_BASE, 0);
1137 barrier();
1138
4763ed4d 1139 x86_configure_nx();
06cd9a7d 1140 if (cpu != 0)
1ba76586
YL
1141 enable_x2apic();
1142
1143 /*
1144 * set up and load the per-CPU TSS
1145 */
1146 if (!orig_ist->ist[0]) {
92d65b23 1147 char *estacks = per_cpu(exception_stacks, cpu);
0f3fa48a 1148
1ba76586 1149 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
0f3fa48a 1150 estacks += exception_stack_sizes[v];
1ba76586
YL
1151 orig_ist->ist[v] = t->x86_tss.ist[v] =
1152 (unsigned long)estacks;
1153 }
1154 }
1155
1156 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
0f3fa48a 1157
1ba76586
YL
1158 /*
1159 * <= is required because the CPU will access up to
1160 * 8 bits beyond the end of the IO permission bitmap.
1161 */
1162 for (i = 0; i <= IO_BITMAP_LONGS; i++)
1163 t->io_bitmap[i] = ~0UL;
1164
1165 atomic_inc(&init_mm.mm_count);
1166 me->active_mm = &init_mm;
8c5dfd25 1167 BUG_ON(me->mm);
1ba76586
YL
1168 enter_lazy_tlb(&init_mm, me);
1169
1170 load_sp0(t, &current->thread);
1171 set_tss_desc(cpu, t);
1172 load_TR_desc();
1173 load_LDT(&init_mm.context);
1174
1175#ifdef CONFIG_KGDB
1176 /*
1177 * If the kgdb is connected no debug regs should be altered. This
1178 * is only applicable when KGDB and a KGDB I/O module are built
1179 * into the kernel and you are using early debugging with
1180 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
1181 */
1182 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1183 arch_kgdb_ops.correct_hw_break();
8f6d86dc 1184 else
1ba76586 1185#endif
9766cdbc 1186 clear_all_debug_regs();
1ba76586
YL
1187
1188 fpu_init();
1189
1190 raw_local_save_flags(kernel_eflags);
1191
1192 if (is_uv_system())
1193 uv_cpu_init();
1194}
1195
1196#else
1197
d2cbcc49 1198void __cpuinit cpu_init(void)
9ee79a3d 1199{
d2cbcc49
RR
1200 int cpu = smp_processor_id();
1201 struct task_struct *curr = current;
34048c9e 1202 struct tss_struct *t = &per_cpu(init_tss, cpu);
9ee79a3d 1203 struct thread_struct *thread = &curr->thread;
62111195 1204
c2d1cec1 1205 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
62111195 1206 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
9766cdbc
JSR
1207 for (;;)
1208 local_irq_enable();
62111195
JF
1209 }
1210
1211 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1212
1213 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
1214 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
62111195 1215
4d37e7e3 1216 load_idt(&idt_descr);
552be871 1217 switch_to_new_gdt(cpu);
1da177e4 1218
1da177e4
LT
1219 /*
1220 * Set up and load the per-CPU TSS and LDT
1221 */
1222 atomic_inc(&init_mm.mm_count);
62111195 1223 curr->active_mm = &init_mm;
8c5dfd25 1224 BUG_ON(curr->mm);
62111195 1225 enter_lazy_tlb(&init_mm, curr);
1da177e4 1226
faca6227 1227 load_sp0(t, thread);
34048c9e 1228 set_tss_desc(cpu, t);
1da177e4
LT
1229 load_TR_desc();
1230 load_LDT(&init_mm.context);
1231
f9a196b8
TG
1232 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1233
22c4e308 1234#ifdef CONFIG_DOUBLEFAULT
1da177e4
LT
1235 /* Set up doublefault TSS pointer in the GDT */
1236 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
22c4e308 1237#endif
1da177e4 1238
9766cdbc 1239 clear_all_debug_regs();
1da177e4
LT
1240
1241 /*
1242 * Force FPU initialization:
1243 */
b359e8a4
SS
1244 if (cpu_has_xsave)
1245 current_thread_info()->status = TS_XSAVE;
1246 else
1247 current_thread_info()->status = 0;
1da177e4
LT
1248 clear_used_math();
1249 mxcsr_feature_mask_init();
dc1e35c6
SS
1250
1251 /*
1252 * Boot processor to setup the FP and extended state context info.
1253 */
b3572e36 1254 if (smp_processor_id() == boot_cpu_id)
dc1e35c6
SS
1255 init_thread_xstate();
1256
1257 xsave_init();
1da177e4 1258}
1ba76586 1259#endif