]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kernel/cpu/common.c
x86, percpu: Fix DECLARE/DEFINE_PER_CPU_PAGE_ALIGNED()
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / cpu / common.c
CommitLineData
f0fc4aff 1#include <linux/bootmem.h>
9766cdbc 2#include <linux/linkage.h>
f0fc4aff 3#include <linux/bitops.h>
9766cdbc 4#include <linux/kernel.h>
f0fc4aff 5#include <linux/module.h>
9766cdbc
JSR
6#include <linux/percpu.h>
7#include <linux/string.h>
1da177e4 8#include <linux/delay.h>
9766cdbc
JSR
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/kgdb.h>
1da177e4 12#include <linux/smp.h>
9766cdbc
JSR
13#include <linux/io.h>
14
15#include <asm/stackprotector.h>
f541ae32 16#include <asm/perf_counter.h>
1da177e4 17#include <asm/mmu_context.h>
9766cdbc
JSR
18#include <asm/hypervisor.h>
19#include <asm/processor.h>
20#include <asm/sections.h>
0f3fa48a 21#include <asm/topology.h>
9766cdbc
JSR
22#include <asm/cpumask.h>
23#include <asm/pgtable.h>
24#include <asm/atomic.h>
25#include <asm/proto.h>
26#include <asm/setup.h>
27#include <asm/apic.h>
28#include <asm/desc.h>
29#include <asm/i387.h>
27b07da7 30#include <asm/mtrr.h>
9766cdbc
JSR
31#include <asm/numa.h>
32#include <asm/asm.h>
33#include <asm/cpu.h>
a03a3e28 34#include <asm/mce.h>
9766cdbc 35#include <asm/msr.h>
8d4a4300 36#include <asm/pat.h>
b342797c 37#include <asm/smp.h>
e641f5f5
IM
38
39#ifdef CONFIG_X86_LOCAL_APIC
bdbcdd48 40#include <asm/uv/uv.h>
1da177e4
LT
41#endif
42
43#include "cpu.h"
44
c2d1cec1 45/* all of these masks are initialized in setup_cpu_local_masks() */
c2d1cec1 46cpumask_var_t cpu_initialized_mask;
9766cdbc
JSR
47cpumask_var_t cpu_callout_mask;
48cpumask_var_t cpu_callin_mask;
c2d1cec1
MT
49
50/* representing cpus for which sibling maps can be computed */
51cpumask_var_t cpu_sibling_setup_mask;
52
2f2f52ba 53/* correctly size the local cpu masks */
4369f1fb 54void __init setup_cpu_local_masks(void)
2f2f52ba
BG
55{
56 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
57 alloc_bootmem_cpumask_var(&cpu_callin_mask);
58 alloc_bootmem_cpumask_var(&cpu_callout_mask);
59 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
60}
61
02dde8b4 62static const struct cpu_dev *this_cpu __cpuinitdata;
0a488a53 63
06deef89 64DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
950ad7ff 65#ifdef CONFIG_X86_64
06deef89
BG
66 /*
67 * We need valid kernel segments for data and code in long mode too
68 * IRET will check the segment types kkeil 2000/10/28
69 * Also sysret mandates a special GDT layout
70 *
9766cdbc 71 * TLS descriptors are currently at a different place compared to i386.
06deef89
BG
72 * Hopefully nobody expects them at a fixed place (Wine?)
73 */
0f3fa48a
IM
74 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
75 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
76 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
77 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
78 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
79 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
950ad7ff 80#else
0f3fa48a
IM
81 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
82 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
83 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
84 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
bf504672
RR
85 /*
86 * Segments used for calling PnP BIOS have byte granularity.
87 * They code segments and data segments have fixed 64k limits,
88 * the transfer segment sizes are set at run time.
89 */
6842ef0e 90 /* 32-bit code */
0f3fa48a 91 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
6842ef0e 92 /* 16-bit code */
0f3fa48a 93 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
6842ef0e 94 /* 16-bit data */
0f3fa48a 95 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
6842ef0e 96 /* 16-bit data */
0f3fa48a 97 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
6842ef0e 98 /* 16-bit data */
0f3fa48a 99 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
bf504672
RR
100 /*
101 * The APM segments have byte granularity and their bases
102 * are set at run time. All have 64k limits.
103 */
6842ef0e 104 /* 32-bit code */
0f3fa48a 105 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
bf504672 106 /* 16-bit code */
0f3fa48a 107 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
6842ef0e 108 /* data */
0f3fa48a 109 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
bf504672 110
dc4c2a0a 111 [GDT_ENTRY_ESPFIX_SS] = { { { 0x0000ffff, 0x00cf9200 } } },
0f3fa48a 112 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
60a5317f 113 GDT_STACK_CANARY_INIT
950ad7ff 114#endif
06deef89 115} };
7a61d35d 116EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
ae1ee11b 117
0c752a93
SS
118static int __init x86_xsave_setup(char *s)
119{
120 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
121 return 1;
122}
123__setup("noxsave", x86_xsave_setup);
124
ba51dced 125#ifdef CONFIG_X86_32
3bc9b76b 126static int cachesize_override __cpuinitdata = -1;
3bc9b76b 127static int disable_x86_serial_nr __cpuinitdata = 1;
1da177e4 128
0a488a53
YL
129static int __init cachesize_setup(char *str)
130{
131 get_option(&str, &cachesize_override);
132 return 1;
133}
134__setup("cachesize=", cachesize_setup);
135
0a488a53
YL
136static int __init x86_fxsr_setup(char *s)
137{
138 setup_clear_cpu_cap(X86_FEATURE_FXSR);
139 setup_clear_cpu_cap(X86_FEATURE_XMM);
140 return 1;
141}
142__setup("nofxsr", x86_fxsr_setup);
143
144static int __init x86_sep_setup(char *s)
145{
146 setup_clear_cpu_cap(X86_FEATURE_SEP);
147 return 1;
148}
149__setup("nosep", x86_sep_setup);
150
151/* Standard macro to see if a specific flag is changeable */
152static inline int flag_is_changeable_p(u32 flag)
153{
154 u32 f1, f2;
155
94f6bac1
KH
156 /*
157 * Cyrix and IDT cpus allow disabling of CPUID
158 * so the code below may return different results
159 * when it is executed before and after enabling
160 * the CPUID. Add "volatile" to not allow gcc to
161 * optimize the subsequent calls to this function.
162 */
0f3fa48a
IM
163 asm volatile ("pushfl \n\t"
164 "pushfl \n\t"
165 "popl %0 \n\t"
166 "movl %0, %1 \n\t"
167 "xorl %2, %0 \n\t"
168 "pushl %0 \n\t"
169 "popfl \n\t"
170 "pushfl \n\t"
171 "popl %0 \n\t"
172 "popfl \n\t"
173
94f6bac1
KH
174 : "=&r" (f1), "=&r" (f2)
175 : "ir" (flag));
0a488a53
YL
176
177 return ((f1^f2) & flag) != 0;
178}
179
180/* Probe for the CPUID instruction */
181static int __cpuinit have_cpuid_p(void)
182{
183 return flag_is_changeable_p(X86_EFLAGS_ID);
184}
185
186static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
187{
0f3fa48a
IM
188 unsigned long lo, hi;
189
190 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
191 return;
192
193 /* Disable processor serial number: */
194
195 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
196 lo |= 0x200000;
197 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
198
199 printk(KERN_NOTICE "CPU serial number disabled.\n");
200 clear_cpu_cap(c, X86_FEATURE_PN);
201
202 /* Disabling the serial number may affect the cpuid level */
203 c->cpuid_level = cpuid_eax(0);
0a488a53
YL
204}
205
206static int __init x86_serial_nr_setup(char *s)
207{
208 disable_x86_serial_nr = 0;
209 return 1;
210}
211__setup("serialnumber", x86_serial_nr_setup);
ba51dced 212#else
102bbe3a
YL
213static inline int flag_is_changeable_p(u32 flag)
214{
215 return 1;
216}
ba51dced
YL
217/* Probe for the CPUID instruction */
218static inline int have_cpuid_p(void)
219{
220 return 1;
221}
102bbe3a
YL
222static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
223{
224}
ba51dced 225#endif
0a488a53 226
b38b0665
PA
227/*
228 * Some CPU features depend on higher CPUID levels, which may not always
229 * be available due to CPUID level capping or broken virtualization
230 * software. Add those features to this table to auto-disable them.
231 */
232struct cpuid_dependent_feature {
233 u32 feature;
234 u32 level;
235};
0f3fa48a 236
b38b0665
PA
237static const struct cpuid_dependent_feature __cpuinitconst
238cpuid_dependent_features[] = {
239 { X86_FEATURE_MWAIT, 0x00000005 },
240 { X86_FEATURE_DCA, 0x00000009 },
241 { X86_FEATURE_XSAVE, 0x0000000d },
242 { 0, 0 }
243};
244
245static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
246{
247 const struct cpuid_dependent_feature *df;
9766cdbc 248
b38b0665 249 for (df = cpuid_dependent_features; df->feature; df++) {
0f3fa48a
IM
250
251 if (!cpu_has(c, df->feature))
252 continue;
b38b0665
PA
253 /*
254 * Note: cpuid_level is set to -1 if unavailable, but
255 * extended_extended_level is set to 0 if unavailable
256 * and the legitimate extended levels are all negative
257 * when signed; hence the weird messing around with
258 * signs here...
259 */
0f3fa48a 260 if (!((s32)df->level < 0 ?
f6db44df 261 (u32)df->level > (u32)c->extended_cpuid_level :
0f3fa48a
IM
262 (s32)df->level > (s32)c->cpuid_level))
263 continue;
264
265 clear_cpu_cap(c, df->feature);
266 if (!warn)
267 continue;
268
269 printk(KERN_WARNING
270 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
271 x86_cap_flags[df->feature], df->level);
b38b0665 272 }
f6db44df 273}
b38b0665 274
102bbe3a
YL
275/*
276 * Naming convention should be: <Name> [(<Codename>)]
277 * This table only is used unless init_<vendor>() below doesn't set it;
0f3fa48a
IM
278 * in particular, if CPUID levels 0x80000002..4 are supported, this
279 * isn't used
102bbe3a
YL
280 */
281
282/* Look up CPU names by table lookup. */
02dde8b4 283static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
102bbe3a 284{
02dde8b4 285 const struct cpu_model_info *info;
102bbe3a
YL
286
287 if (c->x86_model >= 16)
288 return NULL; /* Range check */
289
290 if (!this_cpu)
291 return NULL;
292
293 info = this_cpu->c_models;
294
295 while (info && info->family) {
296 if (info->family == c->x86)
297 return info->model_names[c->x86_model];
298 info++;
299 }
300 return NULL; /* Not found */
301}
302
3e0c3737
YL
303__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
304__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
7d851c8d 305
11e3a840
JF
306void load_percpu_segment(int cpu)
307{
308#ifdef CONFIG_X86_32
309 loadsegment(fs, __KERNEL_PERCPU);
310#else
311 loadsegment(gs, 0);
312 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
313#endif
60a5317f 314 load_stack_canary_segment();
11e3a840
JF
315}
316
0f3fa48a
IM
317/*
318 * Current gdt points %fs at the "master" per-cpu area: after this,
319 * it's on the real one.
320 */
552be871 321void switch_to_new_gdt(int cpu)
9d31d35b
YL
322{
323 struct desc_ptr gdt_descr;
324
2697fbd5 325 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
9d31d35b
YL
326 gdt_descr.size = GDT_SIZE - 1;
327 load_gdt(&gdt_descr);
2697fbd5 328 /* Reload the per-cpu base */
11e3a840
JF
329
330 load_percpu_segment(cpu);
9d31d35b
YL
331}
332
02dde8b4 333static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
1da177e4 334
34048c9e 335static void __cpuinit default_init(struct cpuinfo_x86 *c)
1da177e4 336{
b9e67f00
YL
337#ifdef CONFIG_X86_64
338 display_cacheinfo(c);
339#else
1da177e4
LT
340 /* Not much we can do here... */
341 /* Check if at least it has cpuid */
342 if (c->cpuid_level == -1) {
343 /* No cpuid. It must be an ancient CPU */
344 if (c->x86 == 4)
345 strcpy(c->x86_model_id, "486");
346 else if (c->x86 == 3)
347 strcpy(c->x86_model_id, "386");
348 }
b9e67f00 349#endif
1da177e4
LT
350}
351
02dde8b4 352static const struct cpu_dev __cpuinitconst default_cpu = {
1da177e4 353 .c_init = default_init,
fe38d855 354 .c_vendor = "Unknown",
10a434fc 355 .c_x86_vendor = X86_VENDOR_UNKNOWN,
1da177e4 356};
1da177e4 357
1b05d60d 358static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
359{
360 unsigned int *v;
361 char *p, *q;
362
3da99c97 363 if (c->extended_cpuid_level < 0x80000004)
1b05d60d 364 return;
1da177e4 365
0f3fa48a 366 v = (unsigned int *)c->x86_model_id;
1da177e4
LT
367 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
368 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
369 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
370 c->x86_model_id[48] = 0;
371
0f3fa48a
IM
372 /*
373 * Intel chips right-justify this string for some dumb reason;
374 * undo that brain damage:
375 */
1da177e4 376 p = q = &c->x86_model_id[0];
34048c9e 377 while (*p == ' ')
9766cdbc 378 p++;
34048c9e 379 if (p != q) {
9766cdbc
JSR
380 while (*p)
381 *q++ = *p++;
382 while (q <= &c->x86_model_id[48])
383 *q++ = '\0'; /* Zero-pad the rest */
1da177e4 384 }
1da177e4
LT
385}
386
3bc9b76b 387void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4 388{
9d31d35b 389 unsigned int n, dummy, ebx, ecx, edx, l2size;
1da177e4 390
3da99c97 391 n = c->extended_cpuid_level;
1da177e4
LT
392
393 if (n >= 0x80000005) {
9d31d35b 394 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
1da177e4 395 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
9d31d35b
YL
396 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
397 c->x86_cache_size = (ecx>>24) + (edx>>24);
140fc727
YL
398#ifdef CONFIG_X86_64
399 /* On K8 L1 TLB is inclusive, so don't count it */
400 c->x86_tlbsize = 0;
401#endif
1da177e4
LT
402 }
403
404 if (n < 0x80000006) /* Some chips just has a large L1. */
405 return;
406
0a488a53 407 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1da177e4 408 l2size = ecx >> 16;
34048c9e 409
140fc727
YL
410#ifdef CONFIG_X86_64
411 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
412#else
1da177e4
LT
413 /* do processor-specific cache resizing */
414 if (this_cpu->c_size_cache)
34048c9e 415 l2size = this_cpu->c_size_cache(c, l2size);
1da177e4
LT
416
417 /* Allow user to override all this if necessary. */
418 if (cachesize_override != -1)
419 l2size = cachesize_override;
420
34048c9e 421 if (l2size == 0)
1da177e4 422 return; /* Again, no L2 cache is possible */
140fc727 423#endif
1da177e4
LT
424
425 c->x86_cache_size = l2size;
426
427 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
0a488a53 428 l2size, ecx & 0xFF);
1da177e4
LT
429}
430
9d31d35b 431void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4 432{
97e4db7c 433#ifdef CONFIG_X86_HT
0a488a53
YL
434 u32 eax, ebx, ecx, edx;
435 int index_msb, core_bits;
1da177e4 436
0a488a53 437 if (!cpu_has(c, X86_FEATURE_HT))
9d31d35b 438 return;
1da177e4 439
0a488a53
YL
440 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
441 goto out;
1da177e4 442
1cd78776
YL
443 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
444 return;
1da177e4 445
0a488a53 446 cpuid(1, &eax, &ebx, &ecx, &edx);
1da177e4 447
9d31d35b
YL
448 smp_num_siblings = (ebx & 0xff0000) >> 16;
449
450 if (smp_num_siblings == 1) {
451 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
0f3fa48a
IM
452 goto out;
453 }
9d31d35b 454
0f3fa48a
IM
455 if (smp_num_siblings <= 1)
456 goto out;
9d31d35b 457
0f3fa48a
IM
458 if (smp_num_siblings > nr_cpu_ids) {
459 pr_warning("CPU: Unsupported number of siblings %d",
460 smp_num_siblings);
461 smp_num_siblings = 1;
462 return;
463 }
9d31d35b 464
0f3fa48a
IM
465 index_msb = get_count_order(smp_num_siblings);
466 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
9d31d35b 467
0f3fa48a 468 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
9d31d35b 469
0f3fa48a 470 index_msb = get_count_order(smp_num_siblings);
9d31d35b 471
0f3fa48a 472 core_bits = get_count_order(c->x86_max_cores);
9d31d35b 473
0f3fa48a
IM
474 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
475 ((1 << core_bits) - 1);
1da177e4 476
0a488a53
YL
477out:
478 if ((c->x86_max_cores * smp_num_siblings) > 1) {
479 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
480 c->phys_proc_id);
481 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
482 c->cpu_core_id);
9d31d35b 483 }
9d31d35b 484#endif
97e4db7c 485}
1da177e4 486
3da99c97 487static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
488{
489 char *v = c->x86_vendor_id;
0f3fa48a 490 int i;
1da177e4
LT
491
492 for (i = 0; i < X86_VENDOR_NUM; i++) {
10a434fc
YL
493 if (!cpu_devs[i])
494 break;
495
496 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
497 (cpu_devs[i]->c_ident[1] &&
498 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
0f3fa48a 499
10a434fc
YL
500 this_cpu = cpu_devs[i];
501 c->x86_vendor = this_cpu->c_x86_vendor;
502 return;
1da177e4
LT
503 }
504 }
10a434fc 505
a9c56953
MK
506 printk_once(KERN_ERR
507 "CPU: vendor_id '%s' unknown, using generic init.\n" \
508 "CPU: Your system may be unstable.\n", v);
10a434fc 509
fe38d855
CE
510 c->x86_vendor = X86_VENDOR_UNKNOWN;
511 this_cpu = &default_cpu;
1da177e4
LT
512}
513
9d31d35b 514void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
1da177e4 515{
1da177e4 516 /* Get vendor name */
4a148513
HH
517 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
518 (unsigned int *)&c->x86_vendor_id[0],
519 (unsigned int *)&c->x86_vendor_id[8],
520 (unsigned int *)&c->x86_vendor_id[4]);
1da177e4 521
1da177e4 522 c->x86 = 4;
9d31d35b 523 /* Intel-defined flags: level 0x00000001 */
1da177e4
LT
524 if (c->cpuid_level >= 0x00000001) {
525 u32 junk, tfms, cap0, misc;
0f3fa48a 526
1da177e4 527 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
9d31d35b
YL
528 c->x86 = (tfms >> 8) & 0xf;
529 c->x86_model = (tfms >> 4) & 0xf;
530 c->x86_mask = tfms & 0xf;
0f3fa48a 531
f5f786d0 532 if (c->x86 == 0xf)
1da177e4 533 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 534 if (c->x86 >= 0x6)
9d31d35b 535 c->x86_model += ((tfms >> 16) & 0xf) << 4;
0f3fa48a 536
d4387bd3 537 if (cap0 & (1<<19)) {
d4387bd3 538 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
9d31d35b 539 c->x86_cache_alignment = c->x86_clflush_size;
d4387bd3 540 }
1da177e4 541 }
1da177e4 542}
3da99c97
YL
543
544static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
093af8d7
YL
545{
546 u32 tfms, xlvl;
3da99c97 547 u32 ebx;
093af8d7 548
3da99c97
YL
549 /* Intel-defined flags: level 0x00000001 */
550 if (c->cpuid_level >= 0x00000001) {
551 u32 capability, excap;
0f3fa48a 552
3da99c97
YL
553 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
554 c->x86_capability[0] = capability;
555 c->x86_capability[4] = excap;
556 }
093af8d7 557
3da99c97
YL
558 /* AMD-defined flags: level 0x80000001 */
559 xlvl = cpuid_eax(0x80000000);
560 c->extended_cpuid_level = xlvl;
0f3fa48a 561
3da99c97
YL
562 if ((xlvl & 0xffff0000) == 0x80000000) {
563 if (xlvl >= 0x80000001) {
564 c->x86_capability[1] = cpuid_edx(0x80000001);
565 c->x86_capability[6] = cpuid_ecx(0x80000001);
093af8d7 566 }
093af8d7 567 }
093af8d7 568
5122c890
YL
569 if (c->extended_cpuid_level >= 0x80000008) {
570 u32 eax = cpuid_eax(0x80000008);
571
572 c->x86_virt_bits = (eax >> 8) & 0xff;
573 c->x86_phys_bits = eax & 0xff;
093af8d7 574 }
13c6c532
JB
575#ifdef CONFIG_X86_32
576 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
577 c->x86_phys_bits = 36;
5122c890 578#endif
e3224234
YL
579
580 if (c->extended_cpuid_level >= 0x80000007)
581 c->x86_power = cpuid_edx(0x80000007);
093af8d7
YL
582
583}
1da177e4 584
aef93c8b
YL
585static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
586{
587#ifdef CONFIG_X86_32
588 int i;
589
590 /*
591 * First of all, decide if this is a 486 or higher
592 * It's a 486 if we can modify the AC flag
593 */
594 if (flag_is_changeable_p(X86_EFLAGS_AC))
595 c->x86 = 4;
596 else
597 c->x86 = 3;
598
599 for (i = 0; i < X86_VENDOR_NUM; i++)
600 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
601 c->x86_vendor_id[0] = 0;
602 cpu_devs[i]->c_identify(c);
603 if (c->x86_vendor_id[0]) {
604 get_cpu_vendor(c);
605 break;
606 }
607 }
608#endif
609}
610
34048c9e
PC
611/*
612 * Do minimum CPU detection early.
613 * Fields really needed: vendor, cpuid_level, family, model, mask,
614 * cache alignment.
615 * The others are not touched to avoid unwanted side effects.
616 *
617 * WARNING: this function is only called on the BP. Don't add code here
618 * that is supposed to run on all CPUs.
619 */
3da99c97 620static void __init early_identify_cpu(struct cpuinfo_x86 *c)
d7cd5611 621{
6627d242
YL
622#ifdef CONFIG_X86_64
623 c->x86_clflush_size = 64;
13c6c532
JB
624 c->x86_phys_bits = 36;
625 c->x86_virt_bits = 48;
6627d242 626#else
d4387bd3 627 c->x86_clflush_size = 32;
13c6c532
JB
628 c->x86_phys_bits = 32;
629 c->x86_virt_bits = 32;
6627d242 630#endif
0a488a53 631 c->x86_cache_alignment = c->x86_clflush_size;
d7cd5611 632
3da99c97 633 memset(&c->x86_capability, 0, sizeof c->x86_capability);
0a488a53 634 c->extended_cpuid_level = 0;
d7cd5611 635
aef93c8b
YL
636 if (!have_cpuid_p())
637 identify_cpu_without_cpuid(c);
638
639 /* cyrix could have cpuid enabled via c_identify()*/
d7cd5611
RR
640 if (!have_cpuid_p())
641 return;
642
643 cpu_detect(c);
644
3da99c97 645 get_cpu_vendor(c);
2b16a235 646
3da99c97 647 get_cpu_cap(c);
12cf105c 648
10a434fc
YL
649 if (this_cpu->c_early_init)
650 this_cpu->c_early_init(c);
093af8d7 651
1c4acdb4 652#ifdef CONFIG_SMP
bfcb4c1b 653 c->cpu_index = boot_cpu_id;
1c4acdb4 654#endif
b38b0665 655 filter_cpuid_features(c, false);
d7cd5611
RR
656}
657
9d31d35b
YL
658void __init early_cpu_init(void)
659{
02dde8b4 660 const struct cpu_dev *const *cdev;
10a434fc
YL
661 int count = 0;
662
9766cdbc 663 printk(KERN_INFO "KERNEL supported cpus:\n");
10a434fc 664 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
02dde8b4 665 const struct cpu_dev *cpudev = *cdev;
10a434fc 666 unsigned int j;
9d31d35b 667
10a434fc
YL
668 if (count >= X86_VENDOR_NUM)
669 break;
670 cpu_devs[count] = cpudev;
671 count++;
672
673 for (j = 0; j < 2; j++) {
674 if (!cpudev->c_ident[j])
675 continue;
9766cdbc 676 printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
10a434fc
YL
677 cpudev->c_ident[j]);
678 }
679 }
9d31d35b 680
9d31d35b 681 early_identify_cpu(&boot_cpu_data);
d7cd5611 682}
093af8d7 683
b6734c35
PA
684/*
685 * The NOPL instruction is supposed to exist on all CPUs with
ba0593bf 686 * family >= 6; unfortunately, that's not true in practice because
b6734c35 687 * of early VIA chips and (more importantly) broken virtualizers that
ba0593bf
PA
688 * are not easy to detect. In the latter case it doesn't even *fail*
689 * reliably, so probing for it doesn't even work. Disable it completely
690 * unless we can find a reliable way to detect all the broken cases.
b6734c35
PA
691 */
692static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
693{
b6734c35 694 clear_cpu_cap(c, X86_FEATURE_NOPL);
d7cd5611
RR
695}
696
34048c9e 697static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
1da177e4 698{
aef93c8b 699 c->extended_cpuid_level = 0;
1da177e4 700
3da99c97 701 if (!have_cpuid_p())
aef93c8b 702 identify_cpu_without_cpuid(c);
1d67953f 703
aef93c8b 704 /* cyrix could have cpuid enabled via c_identify()*/
a9853dd6 705 if (!have_cpuid_p())
aef93c8b 706 return;
1da177e4 707
3da99c97 708 cpu_detect(c);
1da177e4 709
3da99c97 710 get_cpu_vendor(c);
1da177e4 711
3da99c97 712 get_cpu_cap(c);
1da177e4 713
3da99c97
YL
714 if (c->cpuid_level >= 0x00000001) {
715 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
b89d3b3e
YL
716#ifdef CONFIG_X86_32
717# ifdef CONFIG_X86_HT
cb8cc442 718 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
b89d3b3e 719# else
3da99c97 720 c->apicid = c->initial_apicid;
b89d3b3e
YL
721# endif
722#endif
1da177e4 723
b89d3b3e
YL
724#ifdef CONFIG_X86_HT
725 c->phys_proc_id = c->initial_apicid;
1e9f28fa 726#endif
3da99c97 727 }
1da177e4 728
1b05d60d 729 get_model_name(c); /* Default name */
1da177e4 730
3da99c97
YL
731 init_scattered_cpuid_features(c);
732 detect_nopl(c);
1da177e4 733}
1da177e4
LT
734
735/*
736 * This does the hard work of actually picking apart the CPU stuff...
737 */
9a250347 738static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
739{
740 int i;
741
742 c->loops_per_jiffy = loops_per_jiffy;
743 c->x86_cache_size = -1;
744 c->x86_vendor = X86_VENDOR_UNKNOWN;
1da177e4
LT
745 c->x86_model = c->x86_mask = 0; /* So far unknown... */
746 c->x86_vendor_id[0] = '\0'; /* Unset */
747 c->x86_model_id[0] = '\0'; /* Unset */
94605eff 748 c->x86_max_cores = 1;
102bbe3a 749 c->x86_coreid_bits = 0;
11fdd252 750#ifdef CONFIG_X86_64
102bbe3a 751 c->x86_clflush_size = 64;
13c6c532
JB
752 c->x86_phys_bits = 36;
753 c->x86_virt_bits = 48;
102bbe3a
YL
754#else
755 c->cpuid_level = -1; /* CPUID not detected */
770d132f 756 c->x86_clflush_size = 32;
13c6c532
JB
757 c->x86_phys_bits = 32;
758 c->x86_virt_bits = 32;
102bbe3a
YL
759#endif
760 c->x86_cache_alignment = c->x86_clflush_size;
1da177e4
LT
761 memset(&c->x86_capability, 0, sizeof c->x86_capability);
762
1da177e4
LT
763 generic_identify(c);
764
3898534d 765 if (this_cpu->c_identify)
1da177e4
LT
766 this_cpu->c_identify(c);
767
2759c328
YL
768 /* Clear/Set all flags overriden by options, after probe */
769 for (i = 0; i < NCAPINTS; i++) {
770 c->x86_capability[i] &= ~cpu_caps_cleared[i];
771 c->x86_capability[i] |= cpu_caps_set[i];
772 }
773
102bbe3a 774#ifdef CONFIG_X86_64
cb8cc442 775 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
102bbe3a
YL
776#endif
777
1da177e4
LT
778 /*
779 * Vendor-specific initialization. In this section we
780 * canonicalize the feature flags, meaning if there are
781 * features a certain CPU supports which CPUID doesn't
782 * tell us, CPUID claiming incorrect flags, or other bugs,
783 * we handle them here.
784 *
785 * At the end of this section, c->x86_capability better
786 * indicate the features this CPU genuinely supports!
787 */
788 if (this_cpu->c_init)
789 this_cpu->c_init(c);
790
791 /* Disable the PN if appropriate */
792 squash_the_stupid_serial_number(c);
793
794 /*
0f3fa48a
IM
795 * The vendor-specific functions might have changed features.
796 * Now we do "generic changes."
1da177e4
LT
797 */
798
b38b0665
PA
799 /* Filter out anything that depends on CPUID levels we don't have */
800 filter_cpuid_features(c, true);
801
1da177e4 802 /* If the model name is still unset, do table lookup. */
34048c9e 803 if (!c->x86_model_id[0]) {
02dde8b4 804 const char *p;
1da177e4 805 p = table_lookup_model(c);
34048c9e 806 if (p)
1da177e4
LT
807 strcpy(c->x86_model_id, p);
808 else
809 /* Last resort... */
810 sprintf(c->x86_model_id, "%02x/%02x",
54a20f8c 811 c->x86, c->x86_model);
1da177e4
LT
812 }
813
102bbe3a
YL
814#ifdef CONFIG_X86_64
815 detect_ht(c);
816#endif
817
88b094fb 818 init_hypervisor(c);
3e0c3737
YL
819
820 /*
821 * Clear/Set all flags overriden by options, need do it
822 * before following smp all cpus cap AND.
823 */
824 for (i = 0; i < NCAPINTS; i++) {
825 c->x86_capability[i] &= ~cpu_caps_cleared[i];
826 c->x86_capability[i] |= cpu_caps_set[i];
827 }
828
1da177e4
LT
829 /*
830 * On SMP, boot_cpu_data holds the common feature set between
831 * all CPUs; so make sure that we indicate which features are
832 * common between the CPUs. The first time this routine gets
833 * executed, c == &boot_cpu_data.
834 */
34048c9e 835 if (c != &boot_cpu_data) {
1da177e4 836 /* AND the already accumulated flags with these */
9d31d35b 837 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
838 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
839 }
840
102bbe3a 841#ifdef CONFIG_X86_MCE
1da177e4 842 /* Init Machine Check Exception if available. */
1da177e4 843 mcheck_init(c);
102bbe3a 844#endif
30d432df
AK
845
846 select_idle_routine(c);
102bbe3a
YL
847
848#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
849 numa_add_cpu(smp_processor_id());
850#endif
a6c4e076 851}
31ab269a 852
e04d645f
GC
853#ifdef CONFIG_X86_64
854static void vgetcpu_set_mode(void)
855{
856 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
857 vgetcpu_mode = VGETCPU_RDTSCP;
858 else
859 vgetcpu_mode = VGETCPU_LSL;
860}
861#endif
862
a6c4e076
JF
863void __init identify_boot_cpu(void)
864{
865 identify_cpu(&boot_cpu_data);
30e1e6d1 866 init_c1e_mask();
102bbe3a 867#ifdef CONFIG_X86_32
a6c4e076 868 sysenter_setup();
6fe940d6 869 enable_sep_cpu();
e04d645f
GC
870#else
871 vgetcpu_set_mode();
102bbe3a 872#endif
241771ef 873 init_hw_perf_counters();
a6c4e076 874}
3b520b23 875
a6c4e076
JF
876void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
877{
878 BUG_ON(c == &boot_cpu_data);
879 identify_cpu(c);
102bbe3a 880#ifdef CONFIG_X86_32
a6c4e076 881 enable_sep_cpu();
102bbe3a 882#endif
a6c4e076 883 mtrr_ap_init();
1da177e4
LT
884}
885
a0854a46 886struct msr_range {
0f3fa48a
IM
887 unsigned min;
888 unsigned max;
a0854a46 889};
1da177e4 890
02dde8b4 891static const struct msr_range msr_range_array[] __cpuinitconst = {
a0854a46
YL
892 { 0x00000000, 0x00000418},
893 { 0xc0000000, 0xc000040b},
894 { 0xc0010000, 0xc0010142},
895 { 0xc0011000, 0xc001103b},
896};
1da177e4 897
a0854a46
YL
898static void __cpuinit print_cpu_msr(void)
899{
0f3fa48a 900 unsigned index_min, index_max;
a0854a46
YL
901 unsigned index;
902 u64 val;
903 int i;
a0854a46
YL
904
905 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
906 index_min = msr_range_array[i].min;
907 index_max = msr_range_array[i].max;
0f3fa48a 908
a0854a46
YL
909 for (index = index_min; index < index_max; index++) {
910 if (rdmsrl_amd_safe(index, &val))
911 continue;
912 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
1da177e4 913 }
a0854a46
YL
914 }
915}
94605eff 916
a0854a46 917static int show_msr __cpuinitdata;
0f3fa48a 918
a0854a46
YL
919static __init int setup_show_msr(char *arg)
920{
921 int num;
3dd9d514 922
a0854a46 923 get_option(&arg, &num);
3dd9d514 924
a0854a46
YL
925 if (num > 0)
926 show_msr = num;
927 return 1;
1da177e4 928}
a0854a46 929__setup("show_msr=", setup_show_msr);
1da177e4 930
191679fd
AK
931static __init int setup_noclflush(char *arg)
932{
933 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
934 return 1;
935}
936__setup("noclflush", setup_noclflush);
937
3bc9b76b 938void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4 939{
02dde8b4 940 const char *vendor = NULL;
1da177e4 941
0f3fa48a 942 if (c->x86_vendor < X86_VENDOR_NUM) {
1da177e4 943 vendor = this_cpu->c_vendor;
0f3fa48a
IM
944 } else {
945 if (c->cpuid_level >= 0)
946 vendor = c->x86_vendor_id;
947 }
1da177e4 948
bd32a8cf 949 if (vendor && !strstr(c->x86_model_id, vendor))
9d31d35b 950 printk(KERN_CONT "%s ", vendor);
1da177e4 951
9d31d35b
YL
952 if (c->x86_model_id[0])
953 printk(KERN_CONT "%s", c->x86_model_id);
1da177e4 954 else
9d31d35b 955 printk(KERN_CONT "%d86", c->x86);
1da177e4 956
34048c9e 957 if (c->x86_mask || c->cpuid_level >= 0)
9d31d35b 958 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 959 else
9d31d35b 960 printk(KERN_CONT "\n");
a0854a46
YL
961
962#ifdef CONFIG_SMP
963 if (c->cpu_index < show_msr)
964 print_cpu_msr();
965#else
966 if (show_msr)
967 print_cpu_msr();
968#endif
1da177e4
LT
969}
970
ac72e788
AK
971static __init int setup_disablecpuid(char *arg)
972{
973 int bit;
0f3fa48a 974
ac72e788
AK
975 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
976 setup_clear_cpu_cap(bit);
977 else
978 return 0;
0f3fa48a 979
ac72e788
AK
980 return 1;
981}
982__setup("clearcpuid=", setup_disablecpuid);
983
d5494d4f 984#ifdef CONFIG_X86_64
d5494d4f
YL
985struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
986
947e76cd
BG
987DEFINE_PER_CPU_FIRST(union irq_stack_union,
988 irq_stack_union) __aligned(PAGE_SIZE);
0f3fa48a 989
26f80bd6 990DEFINE_PER_CPU(char *, irq_stack_ptr) =
2add8e23 991 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
d5494d4f 992
9af45651
BG
993DEFINE_PER_CPU(unsigned long, kernel_stack) =
994 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
995EXPORT_PER_CPU_SYMBOL(kernel_stack);
996
56895530 997DEFINE_PER_CPU(unsigned int, irq_count) = -1;
d5494d4f 998
0f3fa48a
IM
999/*
1000 * Special IST stacks which the CPU switches to when it calls
1001 * an IST-marked descriptor entry. Up to 7 stacks (hardware
1002 * limit), all of them are 4K, except the debug stack which
1003 * is 8K.
1004 */
1005static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1006 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1007 [DEBUG_STACK - 1] = DEBUG_STKSZ
1008};
1009
92d65b23 1010static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
3e352aa8 1011 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
d5494d4f 1012
d5494d4f
YL
1013/* May not be marked __init: used by software suspend */
1014void syscall_init(void)
1da177e4 1015{
d5494d4f
YL
1016 /*
1017 * LSTAR and STAR live in a bit strange symbiosis.
1018 * They both write to the same internal register. STAR allows to
1019 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
1020 */
1021 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
1022 wrmsrl(MSR_LSTAR, system_call);
1023 wrmsrl(MSR_CSTAR, ignore_sysret);
03ae5768 1024
d5494d4f
YL
1025#ifdef CONFIG_IA32_EMULATION
1026 syscall32_cpu_init();
1027#endif
03ae5768 1028
d5494d4f
YL
1029 /* Flags to clear on syscall */
1030 wrmsrl(MSR_SYSCALL_MASK,
1031 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
1da177e4 1032}
62111195 1033
d5494d4f
YL
1034unsigned long kernel_eflags;
1035
1036/*
1037 * Copies of the original ist values from the tss are only accessed during
1038 * debugging, no special alignment required.
1039 */
1040DEFINE_PER_CPU(struct orig_ist, orig_ist);
1041
0f3fa48a 1042#else /* CONFIG_X86_64 */
d5494d4f 1043
60a5317f
TH
1044#ifdef CONFIG_CC_STACKPROTECTOR
1045DEFINE_PER_CPU(unsigned long, stack_canary);
1046#endif
d5494d4f 1047
60a5317f 1048/* Make sure %fs and %gs are initialized properly in idle threads */
6b2fb3c6 1049struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
f95d47ca
JF
1050{
1051 memset(regs, 0, sizeof(struct pt_regs));
65ea5b03 1052 regs->fs = __KERNEL_PERCPU;
60a5317f 1053 regs->gs = __KERNEL_STACK_CANARY;
0f3fa48a 1054
f95d47ca
JF
1055 return regs;
1056}
0f3fa48a 1057#endif /* CONFIG_X86_64 */
c5413fbe 1058
9766cdbc
JSR
1059/*
1060 * Clear all 6 debug registers:
1061 */
1062static void clear_all_debug_regs(void)
1063{
1064 int i;
1065
1066 for (i = 0; i < 8; i++) {
1067 /* Ignore db4, db5 */
1068 if ((i == 4) || (i == 5))
1069 continue;
1070
1071 set_debugreg(0, i);
1072 }
1073}
c5413fbe 1074
d2cbcc49
RR
1075/*
1076 * cpu_init() initializes state that is per-CPU. Some data is already
1077 * initialized (naturally) in the bootstrap process, such as the GDT
1078 * and IDT. We reload them nevertheless, this function acts as a
1079 * 'CPU state barrier', nothing should get across.
1ba76586 1080 * A lot of state is already set up in PDA init for 64 bit
d2cbcc49 1081 */
1ba76586 1082#ifdef CONFIG_X86_64
0f3fa48a 1083
1ba76586
YL
1084void __cpuinit cpu_init(void)
1085{
0f3fa48a 1086 struct orig_ist *orig_ist;
1ba76586 1087 struct task_struct *me;
0f3fa48a
IM
1088 struct tss_struct *t;
1089 unsigned long v;
1090 int cpu;
1ba76586
YL
1091 int i;
1092
0f3fa48a
IM
1093 cpu = stack_smp_processor_id();
1094 t = &per_cpu(init_tss, cpu);
1095 orig_ist = &per_cpu(orig_ist, cpu);
1096
e7a22c1e
BG
1097#ifdef CONFIG_NUMA
1098 if (cpu != 0 && percpu_read(node_number) == 0 &&
1099 cpu_to_node(cpu) != NUMA_NO_NODE)
1100 percpu_write(node_number, cpu_to_node(cpu));
1101#endif
1ba76586
YL
1102
1103 me = current;
1104
c2d1cec1 1105 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
1ba76586
YL
1106 panic("CPU#%d already initialized!\n", cpu);
1107
1108 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1109
1110 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1111
1112 /*
1113 * Initialize the per-CPU GDT with the boot GDT,
1114 * and set up the GDT descriptor:
1115 */
1116
552be871 1117 switch_to_new_gdt(cpu);
2697fbd5
BG
1118 loadsegment(fs, 0);
1119
1ba76586
YL
1120 load_idt((const struct desc_ptr *)&idt_descr);
1121
1122 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1123 syscall_init();
1124
1125 wrmsrl(MSR_FS_BASE, 0);
1126 wrmsrl(MSR_KERNEL_GS_BASE, 0);
1127 barrier();
1128
1129 check_efer();
06cd9a7d 1130 if (cpu != 0)
1ba76586
YL
1131 enable_x2apic();
1132
1133 /*
1134 * set up and load the per-CPU TSS
1135 */
1136 if (!orig_ist->ist[0]) {
92d65b23 1137 char *estacks = per_cpu(exception_stacks, cpu);
0f3fa48a 1138
1ba76586 1139 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
0f3fa48a 1140 estacks += exception_stack_sizes[v];
1ba76586
YL
1141 orig_ist->ist[v] = t->x86_tss.ist[v] =
1142 (unsigned long)estacks;
1143 }
1144 }
1145
1146 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
0f3fa48a 1147
1ba76586
YL
1148 /*
1149 * <= is required because the CPU will access up to
1150 * 8 bits beyond the end of the IO permission bitmap.
1151 */
1152 for (i = 0; i <= IO_BITMAP_LONGS; i++)
1153 t->io_bitmap[i] = ~0UL;
1154
1155 atomic_inc(&init_mm.mm_count);
1156 me->active_mm = &init_mm;
8c5dfd25 1157 BUG_ON(me->mm);
1ba76586
YL
1158 enter_lazy_tlb(&init_mm, me);
1159
1160 load_sp0(t, &current->thread);
1161 set_tss_desc(cpu, t);
1162 load_TR_desc();
1163 load_LDT(&init_mm.context);
1164
1165#ifdef CONFIG_KGDB
1166 /*
1167 * If the kgdb is connected no debug regs should be altered. This
1168 * is only applicable when KGDB and a KGDB I/O module are built
1169 * into the kernel and you are using early debugging with
1170 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
1171 */
1172 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1173 arch_kgdb_ops.correct_hw_break();
8f6d86dc 1174 else
1ba76586 1175#endif
9766cdbc 1176 clear_all_debug_regs();
1ba76586
YL
1177
1178 fpu_init();
1179
1180 raw_local_save_flags(kernel_eflags);
1181
1182 if (is_uv_system())
1183 uv_cpu_init();
1184}
1185
1186#else
1187
d2cbcc49 1188void __cpuinit cpu_init(void)
9ee79a3d 1189{
d2cbcc49
RR
1190 int cpu = smp_processor_id();
1191 struct task_struct *curr = current;
34048c9e 1192 struct tss_struct *t = &per_cpu(init_tss, cpu);
9ee79a3d 1193 struct thread_struct *thread = &curr->thread;
62111195 1194
c2d1cec1 1195 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
62111195 1196 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
9766cdbc
JSR
1197 for (;;)
1198 local_irq_enable();
62111195
JF
1199 }
1200
1201 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1202
1203 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
1204 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
62111195 1205
4d37e7e3 1206 load_idt(&idt_descr);
552be871 1207 switch_to_new_gdt(cpu);
1da177e4 1208
1da177e4
LT
1209 /*
1210 * Set up and load the per-CPU TSS and LDT
1211 */
1212 atomic_inc(&init_mm.mm_count);
62111195 1213 curr->active_mm = &init_mm;
8c5dfd25 1214 BUG_ON(curr->mm);
62111195 1215 enter_lazy_tlb(&init_mm, curr);
1da177e4 1216
faca6227 1217 load_sp0(t, thread);
34048c9e 1218 set_tss_desc(cpu, t);
1da177e4
LT
1219 load_TR_desc();
1220 load_LDT(&init_mm.context);
1221
f9a196b8
TG
1222 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1223
22c4e308 1224#ifdef CONFIG_DOUBLEFAULT
1da177e4
LT
1225 /* Set up doublefault TSS pointer in the GDT */
1226 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
22c4e308 1227#endif
1da177e4 1228
9766cdbc 1229 clear_all_debug_regs();
1da177e4
LT
1230
1231 /*
1232 * Force FPU initialization:
1233 */
b359e8a4
SS
1234 if (cpu_has_xsave)
1235 current_thread_info()->status = TS_XSAVE;
1236 else
1237 current_thread_info()->status = 0;
1da177e4
LT
1238 clear_used_math();
1239 mxcsr_feature_mask_init();
dc1e35c6
SS
1240
1241 /*
1242 * Boot processor to setup the FP and extended state context info.
1243 */
b3572e36 1244 if (smp_processor_id() == boot_cpu_id)
dc1e35c6
SS
1245 init_thread_xstate();
1246
1247 xsave_init();
1da177e4 1248}
1ba76586 1249#endif