]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/cpu/common.c
x86: Reset the debug_stack update counter
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / common.c
CommitLineData
f0fc4aff 1#include <linux/bootmem.h>
9766cdbc 2#include <linux/linkage.h>
f0fc4aff 3#include <linux/bitops.h>
9766cdbc 4#include <linux/kernel.h>
f0fc4aff 5#include <linux/module.h>
9766cdbc
JSR
6#include <linux/percpu.h>
7#include <linux/string.h>
1da177e4 8#include <linux/delay.h>
9766cdbc
JSR
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/kgdb.h>
1da177e4 12#include <linux/smp.h>
9766cdbc
JSR
13#include <linux/io.h>
14
15#include <asm/stackprotector.h>
cdd6c482 16#include <asm/perf_event.h>
1da177e4 17#include <asm/mmu_context.h>
49d859d7 18#include <asm/archrandom.h>
9766cdbc
JSR
19#include <asm/hypervisor.h>
20#include <asm/processor.h>
f649e938 21#include <asm/debugreg.h>
9766cdbc 22#include <asm/sections.h>
8bdbd962
AC
23#include <linux/topology.h>
24#include <linux/cpumask.h>
9766cdbc 25#include <asm/pgtable.h>
60063497 26#include <linux/atomic.h>
9766cdbc
JSR
27#include <asm/proto.h>
28#include <asm/setup.h>
29#include <asm/apic.h>
30#include <asm/desc.h>
31#include <asm/i387.h>
1361b83a 32#include <asm/fpu-internal.h>
27b07da7 33#include <asm/mtrr.h>
8bdbd962 34#include <linux/numa.h>
9766cdbc
JSR
35#include <asm/asm.h>
36#include <asm/cpu.h>
a03a3e28 37#include <asm/mce.h>
9766cdbc 38#include <asm/msr.h>
8d4a4300 39#include <asm/pat.h>
e641f5f5
IM
40
41#ifdef CONFIG_X86_LOCAL_APIC
bdbcdd48 42#include <asm/uv/uv.h>
1da177e4
LT
43#endif
44
45#include "cpu.h"
46
c2d1cec1 47/* all of these masks are initialized in setup_cpu_local_masks() */
c2d1cec1 48cpumask_var_t cpu_initialized_mask;
9766cdbc
JSR
49cpumask_var_t cpu_callout_mask;
50cpumask_var_t cpu_callin_mask;
c2d1cec1
MT
51
52/* representing cpus for which sibling maps can be computed */
53cpumask_var_t cpu_sibling_setup_mask;
54
2f2f52ba 55/* correctly size the local cpu masks */
4369f1fb 56void __init setup_cpu_local_masks(void)
2f2f52ba
BG
57{
58 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
59 alloc_bootmem_cpumask_var(&cpu_callin_mask);
60 alloc_bootmem_cpumask_var(&cpu_callout_mask);
61 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
62}
63
e8055139
OZ
64static void __cpuinit default_init(struct cpuinfo_x86 *c)
65{
66#ifdef CONFIG_X86_64
27c13ece 67 cpu_detect_cache_sizes(c);
e8055139
OZ
68#else
69 /* Not much we can do here... */
70 /* Check if at least it has cpuid */
71 if (c->cpuid_level == -1) {
72 /* No cpuid. It must be an ancient CPU */
73 if (c->x86 == 4)
74 strcpy(c->x86_model_id, "486");
75 else if (c->x86 == 3)
76 strcpy(c->x86_model_id, "386");
77 }
78#endif
79}
80
81static const struct cpu_dev __cpuinitconst default_cpu = {
82 .c_init = default_init,
83 .c_vendor = "Unknown",
84 .c_x86_vendor = X86_VENDOR_UNKNOWN,
85};
86
87static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
0a488a53 88
06deef89 89DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
950ad7ff 90#ifdef CONFIG_X86_64
06deef89
BG
91 /*
92 * We need valid kernel segments for data and code in long mode too
93 * IRET will check the segment types kkeil 2000/10/28
94 * Also sysret mandates a special GDT layout
95 *
9766cdbc 96 * TLS descriptors are currently at a different place compared to i386.
06deef89
BG
97 * Hopefully nobody expects them at a fixed place (Wine?)
98 */
1e5de182
AM
99 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
100 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
101 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
102 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
103 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
104 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
950ad7ff 105#else
1e5de182
AM
106 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
107 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
108 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
109 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
bf504672
RR
110 /*
111 * Segments used for calling PnP BIOS have byte granularity.
112 * They code segments and data segments have fixed 64k limits,
113 * the transfer segment sizes are set at run time.
114 */
6842ef0e 115 /* 32-bit code */
1e5de182 116 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
6842ef0e 117 /* 16-bit code */
1e5de182 118 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
6842ef0e 119 /* 16-bit data */
1e5de182 120 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
6842ef0e 121 /* 16-bit data */
1e5de182 122 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
6842ef0e 123 /* 16-bit data */
1e5de182 124 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
bf504672
RR
125 /*
126 * The APM segments have byte granularity and their bases
127 * are set at run time. All have 64k limits.
128 */
6842ef0e 129 /* 32-bit code */
1e5de182 130 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
bf504672 131 /* 16-bit code */
1e5de182 132 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
6842ef0e 133 /* data */
72c4d853 134 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
bf504672 135
1e5de182
AM
136 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
137 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
60a5317f 138 GDT_STACK_CANARY_INIT
950ad7ff 139#endif
06deef89 140} };
7a61d35d 141EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
ae1ee11b 142
0c752a93
SS
143static int __init x86_xsave_setup(char *s)
144{
145 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
6bad06b7 146 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
0c752a93
SS
147 return 1;
148}
149__setup("noxsave", x86_xsave_setup);
150
6bad06b7
SS
151static int __init x86_xsaveopt_setup(char *s)
152{
153 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
154 return 1;
155}
156__setup("noxsaveopt", x86_xsaveopt_setup);
157
ba51dced 158#ifdef CONFIG_X86_32
3bc9b76b 159static int cachesize_override __cpuinitdata = -1;
3bc9b76b 160static int disable_x86_serial_nr __cpuinitdata = 1;
1da177e4 161
0a488a53
YL
162static int __init cachesize_setup(char *str)
163{
164 get_option(&str, &cachesize_override);
165 return 1;
166}
167__setup("cachesize=", cachesize_setup);
168
0a488a53
YL
169static int __init x86_fxsr_setup(char *s)
170{
171 setup_clear_cpu_cap(X86_FEATURE_FXSR);
172 setup_clear_cpu_cap(X86_FEATURE_XMM);
173 return 1;
174}
175__setup("nofxsr", x86_fxsr_setup);
176
177static int __init x86_sep_setup(char *s)
178{
179 setup_clear_cpu_cap(X86_FEATURE_SEP);
180 return 1;
181}
182__setup("nosep", x86_sep_setup);
183
184/* Standard macro to see if a specific flag is changeable */
185static inline int flag_is_changeable_p(u32 flag)
186{
187 u32 f1, f2;
188
94f6bac1
KH
189 /*
190 * Cyrix and IDT cpus allow disabling of CPUID
191 * so the code below may return different results
192 * when it is executed before and after enabling
193 * the CPUID. Add "volatile" to not allow gcc to
194 * optimize the subsequent calls to this function.
195 */
0f3fa48a
IM
196 asm volatile ("pushfl \n\t"
197 "pushfl \n\t"
198 "popl %0 \n\t"
199 "movl %0, %1 \n\t"
200 "xorl %2, %0 \n\t"
201 "pushl %0 \n\t"
202 "popfl \n\t"
203 "pushfl \n\t"
204 "popl %0 \n\t"
205 "popfl \n\t"
206
94f6bac1
KH
207 : "=&r" (f1), "=&r" (f2)
208 : "ir" (flag));
0a488a53
YL
209
210 return ((f1^f2) & flag) != 0;
211}
212
213/* Probe for the CPUID instruction */
214static int __cpuinit have_cpuid_p(void)
215{
216 return flag_is_changeable_p(X86_EFLAGS_ID);
217}
218
219static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
220{
0f3fa48a
IM
221 unsigned long lo, hi;
222
223 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
224 return;
225
226 /* Disable processor serial number: */
227
228 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
229 lo |= 0x200000;
230 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
231
232 printk(KERN_NOTICE "CPU serial number disabled.\n");
233 clear_cpu_cap(c, X86_FEATURE_PN);
234
235 /* Disabling the serial number may affect the cpuid level */
236 c->cpuid_level = cpuid_eax(0);
0a488a53
YL
237}
238
239static int __init x86_serial_nr_setup(char *s)
240{
241 disable_x86_serial_nr = 0;
242 return 1;
243}
244__setup("serialnumber", x86_serial_nr_setup);
ba51dced 245#else
102bbe3a
YL
246static inline int flag_is_changeable_p(u32 flag)
247{
248 return 1;
249}
ba51dced
YL
250/* Probe for the CPUID instruction */
251static inline int have_cpuid_p(void)
252{
253 return 1;
254}
102bbe3a
YL
255static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
256{
257}
ba51dced 258#endif
0a488a53 259
82da65da 260static int disable_smep __cpuinitdata;
de5397ad
FY
261static __init int setup_disable_smep(char *arg)
262{
263 disable_smep = 1;
264 return 1;
265}
266__setup("nosmep", setup_disable_smep);
267
82da65da 268static __cpuinit void setup_smep(struct cpuinfo_x86 *c)
de5397ad
FY
269{
270 if (cpu_has(c, X86_FEATURE_SMEP)) {
271 if (unlikely(disable_smep)) {
272 setup_clear_cpu_cap(X86_FEATURE_SMEP);
273 clear_in_cr4(X86_CR4_SMEP);
274 } else
275 set_in_cr4(X86_CR4_SMEP);
276 }
277}
278
b38b0665
PA
279/*
280 * Some CPU features depend on higher CPUID levels, which may not always
281 * be available due to CPUID level capping or broken virtualization
282 * software. Add those features to this table to auto-disable them.
283 */
284struct cpuid_dependent_feature {
285 u32 feature;
286 u32 level;
287};
0f3fa48a 288
b38b0665
PA
289static const struct cpuid_dependent_feature __cpuinitconst
290cpuid_dependent_features[] = {
291 { X86_FEATURE_MWAIT, 0x00000005 },
292 { X86_FEATURE_DCA, 0x00000009 },
293 { X86_FEATURE_XSAVE, 0x0000000d },
294 { 0, 0 }
295};
296
297static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
298{
299 const struct cpuid_dependent_feature *df;
9766cdbc 300
b38b0665 301 for (df = cpuid_dependent_features; df->feature; df++) {
0f3fa48a
IM
302
303 if (!cpu_has(c, df->feature))
304 continue;
b38b0665
PA
305 /*
306 * Note: cpuid_level is set to -1 if unavailable, but
307 * extended_extended_level is set to 0 if unavailable
308 * and the legitimate extended levels are all negative
309 * when signed; hence the weird messing around with
310 * signs here...
311 */
0f3fa48a 312 if (!((s32)df->level < 0 ?
f6db44df 313 (u32)df->level > (u32)c->extended_cpuid_level :
0f3fa48a
IM
314 (s32)df->level > (s32)c->cpuid_level))
315 continue;
316
317 clear_cpu_cap(c, df->feature);
318 if (!warn)
319 continue;
320
321 printk(KERN_WARNING
322 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
323 x86_cap_flags[df->feature], df->level);
b38b0665 324 }
f6db44df 325}
b38b0665 326
102bbe3a
YL
327/*
328 * Naming convention should be: <Name> [(<Codename>)]
329 * This table only is used unless init_<vendor>() below doesn't set it;
0f3fa48a
IM
330 * in particular, if CPUID levels 0x80000002..4 are supported, this
331 * isn't used
102bbe3a
YL
332 */
333
334/* Look up CPU names by table lookup. */
02dde8b4 335static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
102bbe3a 336{
02dde8b4 337 const struct cpu_model_info *info;
102bbe3a
YL
338
339 if (c->x86_model >= 16)
340 return NULL; /* Range check */
341
342 if (!this_cpu)
343 return NULL;
344
345 info = this_cpu->c_models;
346
347 while (info && info->family) {
348 if (info->family == c->x86)
349 return info->model_names[c->x86_model];
350 info++;
351 }
352 return NULL; /* Not found */
353}
354
3e0c3737
YL
355__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
356__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
7d851c8d 357
11e3a840
JF
358void load_percpu_segment(int cpu)
359{
360#ifdef CONFIG_X86_32
361 loadsegment(fs, __KERNEL_PERCPU);
362#else
363 loadsegment(gs, 0);
364 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
365#endif
60a5317f 366 load_stack_canary_segment();
11e3a840
JF
367}
368
0f3fa48a
IM
369/*
370 * Current gdt points %fs at the "master" per-cpu area: after this,
371 * it's on the real one.
372 */
552be871 373void switch_to_new_gdt(int cpu)
9d31d35b
YL
374{
375 struct desc_ptr gdt_descr;
376
2697fbd5 377 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
9d31d35b
YL
378 gdt_descr.size = GDT_SIZE - 1;
379 load_gdt(&gdt_descr);
2697fbd5 380 /* Reload the per-cpu base */
11e3a840
JF
381
382 load_percpu_segment(cpu);
9d31d35b
YL
383}
384
02dde8b4 385static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
1da177e4 386
1b05d60d 387static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
388{
389 unsigned int *v;
390 char *p, *q;
391
3da99c97 392 if (c->extended_cpuid_level < 0x80000004)
1b05d60d 393 return;
1da177e4 394
0f3fa48a 395 v = (unsigned int *)c->x86_model_id;
1da177e4
LT
396 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
397 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
398 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
399 c->x86_model_id[48] = 0;
400
0f3fa48a
IM
401 /*
402 * Intel chips right-justify this string for some dumb reason;
403 * undo that brain damage:
404 */
1da177e4 405 p = q = &c->x86_model_id[0];
34048c9e 406 while (*p == ' ')
9766cdbc 407 p++;
34048c9e 408 if (p != q) {
9766cdbc
JSR
409 while (*p)
410 *q++ = *p++;
411 while (q <= &c->x86_model_id[48])
412 *q++ = '\0'; /* Zero-pad the rest */
1da177e4 413 }
1da177e4
LT
414}
415
27c13ece 416void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
1da177e4 417{
9d31d35b 418 unsigned int n, dummy, ebx, ecx, edx, l2size;
1da177e4 419
3da99c97 420 n = c->extended_cpuid_level;
1da177e4
LT
421
422 if (n >= 0x80000005) {
9d31d35b 423 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
9d31d35b 424 c->x86_cache_size = (ecx>>24) + (edx>>24);
140fc727
YL
425#ifdef CONFIG_X86_64
426 /* On K8 L1 TLB is inclusive, so don't count it */
427 c->x86_tlbsize = 0;
428#endif
1da177e4
LT
429 }
430
431 if (n < 0x80000006) /* Some chips just has a large L1. */
432 return;
433
0a488a53 434 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1da177e4 435 l2size = ecx >> 16;
34048c9e 436
140fc727
YL
437#ifdef CONFIG_X86_64
438 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
439#else
1da177e4
LT
440 /* do processor-specific cache resizing */
441 if (this_cpu->c_size_cache)
34048c9e 442 l2size = this_cpu->c_size_cache(c, l2size);
1da177e4
LT
443
444 /* Allow user to override all this if necessary. */
445 if (cachesize_override != -1)
446 l2size = cachesize_override;
447
34048c9e 448 if (l2size == 0)
1da177e4 449 return; /* Again, no L2 cache is possible */
140fc727 450#endif
1da177e4
LT
451
452 c->x86_cache_size = l2size;
1da177e4
LT
453}
454
9d31d35b 455void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4 456{
97e4db7c 457#ifdef CONFIG_X86_HT
0a488a53
YL
458 u32 eax, ebx, ecx, edx;
459 int index_msb, core_bits;
2eaad1fd 460 static bool printed;
1da177e4 461
0a488a53 462 if (!cpu_has(c, X86_FEATURE_HT))
9d31d35b 463 return;
1da177e4 464
0a488a53
YL
465 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
466 goto out;
1da177e4 467
1cd78776
YL
468 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
469 return;
1da177e4 470
0a488a53 471 cpuid(1, &eax, &ebx, &ecx, &edx);
1da177e4 472
9d31d35b
YL
473 smp_num_siblings = (ebx & 0xff0000) >> 16;
474
475 if (smp_num_siblings == 1) {
2eaad1fd 476 printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
0f3fa48a
IM
477 goto out;
478 }
9d31d35b 479
0f3fa48a
IM
480 if (smp_num_siblings <= 1)
481 goto out;
9d31d35b 482
0f3fa48a
IM
483 index_msb = get_count_order(smp_num_siblings);
484 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
9d31d35b 485
0f3fa48a 486 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
9d31d35b 487
0f3fa48a 488 index_msb = get_count_order(smp_num_siblings);
9d31d35b 489
0f3fa48a 490 core_bits = get_count_order(c->x86_max_cores);
9d31d35b 491
0f3fa48a
IM
492 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
493 ((1 << core_bits) - 1);
1da177e4 494
0a488a53 495out:
2eaad1fd 496 if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
0a488a53
YL
497 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
498 c->phys_proc_id);
499 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
500 c->cpu_core_id);
2eaad1fd 501 printed = 1;
9d31d35b 502 }
9d31d35b 503#endif
97e4db7c 504}
1da177e4 505
3da99c97 506static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
507{
508 char *v = c->x86_vendor_id;
0f3fa48a 509 int i;
1da177e4
LT
510
511 for (i = 0; i < X86_VENDOR_NUM; i++) {
10a434fc
YL
512 if (!cpu_devs[i])
513 break;
514
515 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
516 (cpu_devs[i]->c_ident[1] &&
517 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
0f3fa48a 518
10a434fc
YL
519 this_cpu = cpu_devs[i];
520 c->x86_vendor = this_cpu->c_x86_vendor;
521 return;
1da177e4
LT
522 }
523 }
10a434fc 524
a9c56953
MK
525 printk_once(KERN_ERR
526 "CPU: vendor_id '%s' unknown, using generic init.\n" \
527 "CPU: Your system may be unstable.\n", v);
10a434fc 528
fe38d855
CE
529 c->x86_vendor = X86_VENDOR_UNKNOWN;
530 this_cpu = &default_cpu;
1da177e4
LT
531}
532
9d31d35b 533void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
1da177e4 534{
1da177e4 535 /* Get vendor name */
4a148513
HH
536 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
537 (unsigned int *)&c->x86_vendor_id[0],
538 (unsigned int *)&c->x86_vendor_id[8],
539 (unsigned int *)&c->x86_vendor_id[4]);
1da177e4 540
1da177e4 541 c->x86 = 4;
9d31d35b 542 /* Intel-defined flags: level 0x00000001 */
1da177e4
LT
543 if (c->cpuid_level >= 0x00000001) {
544 u32 junk, tfms, cap0, misc;
0f3fa48a 545
1da177e4 546 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
9d31d35b
YL
547 c->x86 = (tfms >> 8) & 0xf;
548 c->x86_model = (tfms >> 4) & 0xf;
549 c->x86_mask = tfms & 0xf;
0f3fa48a 550
f5f786d0 551 if (c->x86 == 0xf)
1da177e4 552 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 553 if (c->x86 >= 0x6)
9d31d35b 554 c->x86_model += ((tfms >> 16) & 0xf) << 4;
0f3fa48a 555
d4387bd3 556 if (cap0 & (1<<19)) {
d4387bd3 557 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
9d31d35b 558 c->x86_cache_alignment = c->x86_clflush_size;
d4387bd3 559 }
1da177e4 560 }
1da177e4 561}
3da99c97 562
d900329e 563void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
093af8d7
YL
564{
565 u32 tfms, xlvl;
3da99c97 566 u32 ebx;
093af8d7 567
3da99c97
YL
568 /* Intel-defined flags: level 0x00000001 */
569 if (c->cpuid_level >= 0x00000001) {
570 u32 capability, excap;
0f3fa48a 571
3da99c97
YL
572 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
573 c->x86_capability[0] = capability;
574 c->x86_capability[4] = excap;
575 }
093af8d7 576
bdc802dc
PA
577 /* Additional Intel-defined flags: level 0x00000007 */
578 if (c->cpuid_level >= 0x00000007) {
579 u32 eax, ebx, ecx, edx;
580
581 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
582
2494b030 583 c->x86_capability[9] = ebx;
bdc802dc
PA
584 }
585
3da99c97
YL
586 /* AMD-defined flags: level 0x80000001 */
587 xlvl = cpuid_eax(0x80000000);
588 c->extended_cpuid_level = xlvl;
0f3fa48a 589
3da99c97
YL
590 if ((xlvl & 0xffff0000) == 0x80000000) {
591 if (xlvl >= 0x80000001) {
592 c->x86_capability[1] = cpuid_edx(0x80000001);
593 c->x86_capability[6] = cpuid_ecx(0x80000001);
093af8d7 594 }
093af8d7 595 }
093af8d7 596
5122c890
YL
597 if (c->extended_cpuid_level >= 0x80000008) {
598 u32 eax = cpuid_eax(0x80000008);
599
600 c->x86_virt_bits = (eax >> 8) & 0xff;
601 c->x86_phys_bits = eax & 0xff;
093af8d7 602 }
13c6c532
JB
603#ifdef CONFIG_X86_32
604 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
605 c->x86_phys_bits = 36;
5122c890 606#endif
e3224234
YL
607
608 if (c->extended_cpuid_level >= 0x80000007)
609 c->x86_power = cpuid_edx(0x80000007);
093af8d7 610
1dedefd1 611 init_scattered_cpuid_features(c);
093af8d7 612}
1da177e4 613
aef93c8b
YL
614static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
615{
616#ifdef CONFIG_X86_32
617 int i;
618
619 /*
620 * First of all, decide if this is a 486 or higher
621 * It's a 486 if we can modify the AC flag
622 */
623 if (flag_is_changeable_p(X86_EFLAGS_AC))
624 c->x86 = 4;
625 else
626 c->x86 = 3;
627
628 for (i = 0; i < X86_VENDOR_NUM; i++)
629 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
630 c->x86_vendor_id[0] = 0;
631 cpu_devs[i]->c_identify(c);
632 if (c->x86_vendor_id[0]) {
633 get_cpu_vendor(c);
634 break;
635 }
636 }
637#endif
638}
639
34048c9e
PC
640/*
641 * Do minimum CPU detection early.
642 * Fields really needed: vendor, cpuid_level, family, model, mask,
643 * cache alignment.
644 * The others are not touched to avoid unwanted side effects.
645 *
646 * WARNING: this function is only called on the BP. Don't add code here
647 * that is supposed to run on all CPUs.
648 */
3da99c97 649static void __init early_identify_cpu(struct cpuinfo_x86 *c)
d7cd5611 650{
6627d242
YL
651#ifdef CONFIG_X86_64
652 c->x86_clflush_size = 64;
13c6c532
JB
653 c->x86_phys_bits = 36;
654 c->x86_virt_bits = 48;
6627d242 655#else
d4387bd3 656 c->x86_clflush_size = 32;
13c6c532
JB
657 c->x86_phys_bits = 32;
658 c->x86_virt_bits = 32;
6627d242 659#endif
0a488a53 660 c->x86_cache_alignment = c->x86_clflush_size;
d7cd5611 661
3da99c97 662 memset(&c->x86_capability, 0, sizeof c->x86_capability);
0a488a53 663 c->extended_cpuid_level = 0;
d7cd5611 664
aef93c8b
YL
665 if (!have_cpuid_p())
666 identify_cpu_without_cpuid(c);
667
668 /* cyrix could have cpuid enabled via c_identify()*/
d7cd5611
RR
669 if (!have_cpuid_p())
670 return;
671
672 cpu_detect(c);
673
3da99c97 674 get_cpu_vendor(c);
2b16a235 675
3da99c97 676 get_cpu_cap(c);
12cf105c 677
10a434fc
YL
678 if (this_cpu->c_early_init)
679 this_cpu->c_early_init(c);
093af8d7 680
f6e9456c 681 c->cpu_index = 0;
b38b0665 682 filter_cpuid_features(c, false);
de5397ad
FY
683
684 setup_smep(c);
a110b5ec
BP
685
686 if (this_cpu->c_bsp_init)
687 this_cpu->c_bsp_init(c);
d7cd5611
RR
688}
689
9d31d35b
YL
690void __init early_cpu_init(void)
691{
02dde8b4 692 const struct cpu_dev *const *cdev;
10a434fc
YL
693 int count = 0;
694
ac23f253 695#ifdef CONFIG_PROCESSOR_SELECT
9766cdbc 696 printk(KERN_INFO "KERNEL supported cpus:\n");
31c997ca
IM
697#endif
698
10a434fc 699 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
02dde8b4 700 const struct cpu_dev *cpudev = *cdev;
9d31d35b 701
10a434fc
YL
702 if (count >= X86_VENDOR_NUM)
703 break;
704 cpu_devs[count] = cpudev;
705 count++;
706
ac23f253 707#ifdef CONFIG_PROCESSOR_SELECT
31c997ca
IM
708 {
709 unsigned int j;
710
711 for (j = 0; j < 2; j++) {
712 if (!cpudev->c_ident[j])
713 continue;
714 printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
715 cpudev->c_ident[j]);
716 }
10a434fc 717 }
0388423d 718#endif
10a434fc 719 }
9d31d35b 720 early_identify_cpu(&boot_cpu_data);
d7cd5611 721}
093af8d7 722
b6734c35 723/*
366d4a43
BP
724 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
725 * unfortunately, that's not true in practice because of early VIA
726 * chips and (more importantly) broken virtualizers that are not easy
727 * to detect. In the latter case it doesn't even *fail* reliably, so
728 * probing for it doesn't even work. Disable it completely on 32-bit
ba0593bf 729 * unless we can find a reliable way to detect all the broken cases.
366d4a43 730 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
b6734c35
PA
731 */
732static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
733{
366d4a43 734#ifdef CONFIG_X86_32
b6734c35 735 clear_cpu_cap(c, X86_FEATURE_NOPL);
366d4a43
BP
736#else
737 set_cpu_cap(c, X86_FEATURE_NOPL);
738#endif
d7cd5611
RR
739}
740
34048c9e 741static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
1da177e4 742{
aef93c8b 743 c->extended_cpuid_level = 0;
1da177e4 744
3da99c97 745 if (!have_cpuid_p())
aef93c8b 746 identify_cpu_without_cpuid(c);
1d67953f 747
aef93c8b 748 /* cyrix could have cpuid enabled via c_identify()*/
a9853dd6 749 if (!have_cpuid_p())
aef93c8b 750 return;
1da177e4 751
3da99c97 752 cpu_detect(c);
1da177e4 753
3da99c97 754 get_cpu_vendor(c);
1da177e4 755
3da99c97 756 get_cpu_cap(c);
1da177e4 757
3da99c97
YL
758 if (c->cpuid_level >= 0x00000001) {
759 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
b89d3b3e
YL
760#ifdef CONFIG_X86_32
761# ifdef CONFIG_X86_HT
cb8cc442 762 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
b89d3b3e 763# else
3da99c97 764 c->apicid = c->initial_apicid;
b89d3b3e
YL
765# endif
766#endif
b89d3b3e 767 c->phys_proc_id = c->initial_apicid;
3da99c97 768 }
1da177e4 769
de5397ad
FY
770 setup_smep(c);
771
1b05d60d 772 get_model_name(c); /* Default name */
1da177e4 773
3da99c97 774 detect_nopl(c);
1da177e4 775}
1da177e4
LT
776
777/*
778 * This does the hard work of actually picking apart the CPU stuff...
779 */
9a250347 780static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
781{
782 int i;
783
784 c->loops_per_jiffy = loops_per_jiffy;
785 c->x86_cache_size = -1;
786 c->x86_vendor = X86_VENDOR_UNKNOWN;
1da177e4
LT
787 c->x86_model = c->x86_mask = 0; /* So far unknown... */
788 c->x86_vendor_id[0] = '\0'; /* Unset */
789 c->x86_model_id[0] = '\0'; /* Unset */
94605eff 790 c->x86_max_cores = 1;
102bbe3a 791 c->x86_coreid_bits = 0;
11fdd252 792#ifdef CONFIG_X86_64
102bbe3a 793 c->x86_clflush_size = 64;
13c6c532
JB
794 c->x86_phys_bits = 36;
795 c->x86_virt_bits = 48;
102bbe3a
YL
796#else
797 c->cpuid_level = -1; /* CPUID not detected */
770d132f 798 c->x86_clflush_size = 32;
13c6c532
JB
799 c->x86_phys_bits = 32;
800 c->x86_virt_bits = 32;
102bbe3a
YL
801#endif
802 c->x86_cache_alignment = c->x86_clflush_size;
1da177e4
LT
803 memset(&c->x86_capability, 0, sizeof c->x86_capability);
804
1da177e4
LT
805 generic_identify(c);
806
3898534d 807 if (this_cpu->c_identify)
1da177e4
LT
808 this_cpu->c_identify(c);
809
2759c328
YL
810 /* Clear/Set all flags overriden by options, after probe */
811 for (i = 0; i < NCAPINTS; i++) {
812 c->x86_capability[i] &= ~cpu_caps_cleared[i];
813 c->x86_capability[i] |= cpu_caps_set[i];
814 }
815
102bbe3a 816#ifdef CONFIG_X86_64
cb8cc442 817 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
102bbe3a
YL
818#endif
819
1da177e4
LT
820 /*
821 * Vendor-specific initialization. In this section we
822 * canonicalize the feature flags, meaning if there are
823 * features a certain CPU supports which CPUID doesn't
824 * tell us, CPUID claiming incorrect flags, or other bugs,
825 * we handle them here.
826 *
827 * At the end of this section, c->x86_capability better
828 * indicate the features this CPU genuinely supports!
829 */
830 if (this_cpu->c_init)
831 this_cpu->c_init(c);
832
833 /* Disable the PN if appropriate */
834 squash_the_stupid_serial_number(c);
835
836 /*
0f3fa48a
IM
837 * The vendor-specific functions might have changed features.
838 * Now we do "generic changes."
1da177e4
LT
839 */
840
b38b0665
PA
841 /* Filter out anything that depends on CPUID levels we don't have */
842 filter_cpuid_features(c, true);
843
1da177e4 844 /* If the model name is still unset, do table lookup. */
34048c9e 845 if (!c->x86_model_id[0]) {
02dde8b4 846 const char *p;
1da177e4 847 p = table_lookup_model(c);
34048c9e 848 if (p)
1da177e4
LT
849 strcpy(c->x86_model_id, p);
850 else
851 /* Last resort... */
852 sprintf(c->x86_model_id, "%02x/%02x",
54a20f8c 853 c->x86, c->x86_model);
1da177e4
LT
854 }
855
102bbe3a
YL
856#ifdef CONFIG_X86_64
857 detect_ht(c);
858#endif
859
88b094fb 860 init_hypervisor(c);
49d859d7 861 x86_init_rdrand(c);
3e0c3737
YL
862
863 /*
864 * Clear/Set all flags overriden by options, need do it
865 * before following smp all cpus cap AND.
866 */
867 for (i = 0; i < NCAPINTS; i++) {
868 c->x86_capability[i] &= ~cpu_caps_cleared[i];
869 c->x86_capability[i] |= cpu_caps_set[i];
870 }
871
1da177e4
LT
872 /*
873 * On SMP, boot_cpu_data holds the common feature set between
874 * all CPUs; so make sure that we indicate which features are
875 * common between the CPUs. The first time this routine gets
876 * executed, c == &boot_cpu_data.
877 */
34048c9e 878 if (c != &boot_cpu_data) {
1da177e4 879 /* AND the already accumulated flags with these */
9d31d35b 880 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
881 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
882 }
883
884 /* Init Machine Check Exception if available. */
5e09954a 885 mcheck_cpu_init(c);
30d432df
AK
886
887 select_idle_routine(c);
102bbe3a 888
de2d9445 889#ifdef CONFIG_NUMA
102bbe3a
YL
890 numa_add_cpu(smp_processor_id());
891#endif
a6c4e076 892}
31ab269a 893
e04d645f
GC
894#ifdef CONFIG_X86_64
895static void vgetcpu_set_mode(void)
896{
897 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
898 vgetcpu_mode = VGETCPU_RDTSCP;
899 else
900 vgetcpu_mode = VGETCPU_LSL;
901}
902#endif
903
a6c4e076
JF
904void __init identify_boot_cpu(void)
905{
906 identify_cpu(&boot_cpu_data);
02c68a02 907 init_amd_e400_c1e_mask();
102bbe3a 908#ifdef CONFIG_X86_32
a6c4e076 909 sysenter_setup();
6fe940d6 910 enable_sep_cpu();
e04d645f
GC
911#else
912 vgetcpu_set_mode();
102bbe3a 913#endif
a6c4e076 914}
3b520b23 915
a6c4e076
JF
916void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
917{
918 BUG_ON(c == &boot_cpu_data);
919 identify_cpu(c);
102bbe3a 920#ifdef CONFIG_X86_32
a6c4e076 921 enable_sep_cpu();
102bbe3a 922#endif
a6c4e076 923 mtrr_ap_init();
1da177e4
LT
924}
925
a0854a46 926struct msr_range {
0f3fa48a
IM
927 unsigned min;
928 unsigned max;
a0854a46 929};
1da177e4 930
02dde8b4 931static const struct msr_range msr_range_array[] __cpuinitconst = {
a0854a46
YL
932 { 0x00000000, 0x00000418},
933 { 0xc0000000, 0xc000040b},
934 { 0xc0010000, 0xc0010142},
935 { 0xc0011000, 0xc001103b},
936};
1da177e4 937
21c3fcf3 938static void __cpuinit __print_cpu_msr(void)
a0854a46 939{
0f3fa48a 940 unsigned index_min, index_max;
a0854a46
YL
941 unsigned index;
942 u64 val;
943 int i;
a0854a46
YL
944
945 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
946 index_min = msr_range_array[i].min;
947 index_max = msr_range_array[i].max;
0f3fa48a 948
a0854a46
YL
949 for (index = index_min; index < index_max; index++) {
950 if (rdmsrl_amd_safe(index, &val))
951 continue;
952 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
1da177e4 953 }
a0854a46
YL
954 }
955}
94605eff 956
a0854a46 957static int show_msr __cpuinitdata;
0f3fa48a 958
a0854a46
YL
959static __init int setup_show_msr(char *arg)
960{
961 int num;
3dd9d514 962
a0854a46 963 get_option(&arg, &num);
3dd9d514 964
a0854a46
YL
965 if (num > 0)
966 show_msr = num;
967 return 1;
1da177e4 968}
a0854a46 969__setup("show_msr=", setup_show_msr);
1da177e4 970
191679fd
AK
971static __init int setup_noclflush(char *arg)
972{
973 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
974 return 1;
975}
976__setup("noclflush", setup_noclflush);
977
3bc9b76b 978void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4 979{
02dde8b4 980 const char *vendor = NULL;
1da177e4 981
0f3fa48a 982 if (c->x86_vendor < X86_VENDOR_NUM) {
1da177e4 983 vendor = this_cpu->c_vendor;
0f3fa48a
IM
984 } else {
985 if (c->cpuid_level >= 0)
986 vendor = c->x86_vendor_id;
987 }
1da177e4 988
bd32a8cf 989 if (vendor && !strstr(c->x86_model_id, vendor))
9d31d35b 990 printk(KERN_CONT "%s ", vendor);
1da177e4 991
9d31d35b
YL
992 if (c->x86_model_id[0])
993 printk(KERN_CONT "%s", c->x86_model_id);
1da177e4 994 else
9d31d35b 995 printk(KERN_CONT "%d86", c->x86);
1da177e4 996
34048c9e 997 if (c->x86_mask || c->cpuid_level >= 0)
9d31d35b 998 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 999 else
9d31d35b 1000 printk(KERN_CONT "\n");
a0854a46 1001
0b8b8078 1002 print_cpu_msr(c);
21c3fcf3
YL
1003}
1004
1005void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c)
1006{
a0854a46 1007 if (c->cpu_index < show_msr)
21c3fcf3 1008 __print_cpu_msr();
1da177e4
LT
1009}
1010
ac72e788
AK
1011static __init int setup_disablecpuid(char *arg)
1012{
1013 int bit;
0f3fa48a 1014
ac72e788
AK
1015 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1016 setup_clear_cpu_cap(bit);
1017 else
1018 return 0;
0f3fa48a 1019
ac72e788
AK
1020 return 1;
1021}
1022__setup("clearcpuid=", setup_disablecpuid);
1023
d5494d4f 1024#ifdef CONFIG_X86_64
9ff80942 1025struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
228bdaa9
SR
1026struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
1027 (unsigned long) nmi_idt_table };
d5494d4f 1028
947e76cd
BG
1029DEFINE_PER_CPU_FIRST(union irq_stack_union,
1030 irq_stack_union) __aligned(PAGE_SIZE);
0f3fa48a 1031
bdf977b3
TH
1032/*
1033 * The following four percpu variables are hot. Align current_task to
1034 * cacheline size such that all four fall in the same cacheline.
1035 */
1036DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1037 &init_task;
1038EXPORT_PER_CPU_SYMBOL(current_task);
d5494d4f 1039
9af45651
BG
1040DEFINE_PER_CPU(unsigned long, kernel_stack) =
1041 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
1042EXPORT_PER_CPU_SYMBOL(kernel_stack);
1043
bdf977b3
TH
1044DEFINE_PER_CPU(char *, irq_stack_ptr) =
1045 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1046
56895530 1047DEFINE_PER_CPU(unsigned int, irq_count) = -1;
d5494d4f 1048
7e16838d
LT
1049DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
1050
0f3fa48a
IM
1051/*
1052 * Special IST stacks which the CPU switches to when it calls
1053 * an IST-marked descriptor entry. Up to 7 stacks (hardware
1054 * limit), all of them are 4K, except the debug stack which
1055 * is 8K.
1056 */
1057static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1058 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1059 [DEBUG_STACK - 1] = DEBUG_STKSZ
1060};
1061
92d65b23 1062static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
3e352aa8 1063 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
d5494d4f 1064
d5494d4f
YL
1065/* May not be marked __init: used by software suspend */
1066void syscall_init(void)
1da177e4 1067{
d5494d4f
YL
1068 /*
1069 * LSTAR and STAR live in a bit strange symbiosis.
1070 * They both write to the same internal register. STAR allows to
1071 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
1072 */
1073 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
1074 wrmsrl(MSR_LSTAR, system_call);
1075 wrmsrl(MSR_CSTAR, ignore_sysret);
03ae5768 1076
d5494d4f
YL
1077#ifdef CONFIG_IA32_EMULATION
1078 syscall32_cpu_init();
1079#endif
03ae5768 1080
d5494d4f
YL
1081 /* Flags to clear on syscall */
1082 wrmsrl(MSR_SYSCALL_MASK,
1083 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
1da177e4 1084}
62111195 1085
d5494d4f
YL
1086unsigned long kernel_eflags;
1087
1088/*
1089 * Copies of the original ist values from the tss are only accessed during
1090 * debugging, no special alignment required.
1091 */
1092DEFINE_PER_CPU(struct orig_ist, orig_ist);
1093
228bdaa9 1094static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
42181186 1095DEFINE_PER_CPU(int, debug_stack_usage);
228bdaa9
SR
1096
1097int is_debug_stack(unsigned long addr)
1098{
42181186
SR
1099 return __get_cpu_var(debug_stack_usage) ||
1100 (addr <= __get_cpu_var(debug_stack_addr) &&
1101 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
228bdaa9
SR
1102}
1103
1104void debug_stack_set_zero(void)
1105{
1106 load_idt((const struct desc_ptr *)&nmi_idt_descr);
1107}
1108
1109void debug_stack_reset(void)
1110{
1111 load_idt((const struct desc_ptr *)&idt_descr);
1112}
1113
0f3fa48a 1114#else /* CONFIG_X86_64 */
d5494d4f 1115
bdf977b3
TH
1116DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1117EXPORT_PER_CPU_SYMBOL(current_task);
27e74da9 1118DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
bdf977b3 1119
60a5317f 1120#ifdef CONFIG_CC_STACKPROTECTOR
53f82452 1121DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
60a5317f 1122#endif
d5494d4f 1123
60a5317f 1124/* Make sure %fs and %gs are initialized properly in idle threads */
6b2fb3c6 1125struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
f95d47ca
JF
1126{
1127 memset(regs, 0, sizeof(struct pt_regs));
65ea5b03 1128 regs->fs = __KERNEL_PERCPU;
60a5317f 1129 regs->gs = __KERNEL_STACK_CANARY;
0f3fa48a 1130
f95d47ca
JF
1131 return regs;
1132}
0f3fa48a 1133#endif /* CONFIG_X86_64 */
c5413fbe 1134
9766cdbc
JSR
1135/*
1136 * Clear all 6 debug registers:
1137 */
1138static void clear_all_debug_regs(void)
1139{
1140 int i;
1141
1142 for (i = 0; i < 8; i++) {
1143 /* Ignore db4, db5 */
1144 if ((i == 4) || (i == 5))
1145 continue;
1146
1147 set_debugreg(0, i);
1148 }
1149}
c5413fbe 1150
0bb9fef9
JW
1151#ifdef CONFIG_KGDB
1152/*
1153 * Restore debug regs if using kgdbwait and you have a kernel debugger
1154 * connection established.
1155 */
1156static void dbg_restore_debug_regs(void)
1157{
1158 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1159 arch_kgdb_ops.correct_hw_break();
1160}
1161#else /* ! CONFIG_KGDB */
1162#define dbg_restore_debug_regs()
1163#endif /* ! CONFIG_KGDB */
1164
d2cbcc49
RR
1165/*
1166 * cpu_init() initializes state that is per-CPU. Some data is already
1167 * initialized (naturally) in the bootstrap process, such as the GDT
1168 * and IDT. We reload them nevertheless, this function acts as a
1169 * 'CPU state barrier', nothing should get across.
1ba76586 1170 * A lot of state is already set up in PDA init for 64 bit
d2cbcc49 1171 */
1ba76586 1172#ifdef CONFIG_X86_64
0f3fa48a 1173
1ba76586
YL
1174void __cpuinit cpu_init(void)
1175{
0fe1e009 1176 struct orig_ist *oist;
1ba76586 1177 struct task_struct *me;
0f3fa48a
IM
1178 struct tss_struct *t;
1179 unsigned long v;
1180 int cpu;
1ba76586
YL
1181 int i;
1182
0f3fa48a
IM
1183 cpu = stack_smp_processor_id();
1184 t = &per_cpu(init_tss, cpu);
0fe1e009 1185 oist = &per_cpu(orig_ist, cpu);
0f3fa48a 1186
e7a22c1e 1187#ifdef CONFIG_NUMA
c6ae41e7 1188 if (cpu != 0 && this_cpu_read(numa_node) == 0 &&
e534c7c5
LS
1189 early_cpu_to_node(cpu) != NUMA_NO_NODE)
1190 set_numa_node(early_cpu_to_node(cpu));
e7a22c1e 1191#endif
1ba76586
YL
1192
1193 me = current;
1194
c2d1cec1 1195 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
1ba76586
YL
1196 panic("CPU#%d already initialized!\n", cpu);
1197
2eaad1fd 1198 pr_debug("Initializing CPU#%d\n", cpu);
1ba76586
YL
1199
1200 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1201
1202 /*
1203 * Initialize the per-CPU GDT with the boot GDT,
1204 * and set up the GDT descriptor:
1205 */
1206
552be871 1207 switch_to_new_gdt(cpu);
2697fbd5
BG
1208 loadsegment(fs, 0);
1209
1ba76586
YL
1210 load_idt((const struct desc_ptr *)&idt_descr);
1211
1212 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1213 syscall_init();
1214
1215 wrmsrl(MSR_FS_BASE, 0);
1216 wrmsrl(MSR_KERNEL_GS_BASE, 0);
1217 barrier();
1218
4763ed4d 1219 x86_configure_nx();
06cd9a7d 1220 if (cpu != 0)
1ba76586
YL
1221 enable_x2apic();
1222
1223 /*
1224 * set up and load the per-CPU TSS
1225 */
0fe1e009 1226 if (!oist->ist[0]) {
92d65b23 1227 char *estacks = per_cpu(exception_stacks, cpu);
0f3fa48a 1228
1ba76586 1229 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
0f3fa48a 1230 estacks += exception_stack_sizes[v];
0fe1e009 1231 oist->ist[v] = t->x86_tss.ist[v] =
1ba76586 1232 (unsigned long)estacks;
228bdaa9
SR
1233 if (v == DEBUG_STACK-1)
1234 per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
1ba76586
YL
1235 }
1236 }
1237
1238 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
0f3fa48a 1239
1ba76586
YL
1240 /*
1241 * <= is required because the CPU will access up to
1242 * 8 bits beyond the end of the IO permission bitmap.
1243 */
1244 for (i = 0; i <= IO_BITMAP_LONGS; i++)
1245 t->io_bitmap[i] = ~0UL;
1246
1247 atomic_inc(&init_mm.mm_count);
1248 me->active_mm = &init_mm;
8c5dfd25 1249 BUG_ON(me->mm);
1ba76586
YL
1250 enter_lazy_tlb(&init_mm, me);
1251
1252 load_sp0(t, &current->thread);
1253 set_tss_desc(cpu, t);
1254 load_TR_desc();
1255 load_LDT(&init_mm.context);
1256
0bb9fef9
JW
1257 clear_all_debug_regs();
1258 dbg_restore_debug_regs();
1ba76586
YL
1259
1260 fpu_init();
0e49bf66 1261 xsave_init();
1ba76586
YL
1262
1263 raw_local_save_flags(kernel_eflags);
1264
1265 if (is_uv_system())
1266 uv_cpu_init();
1267}
1268
1269#else
1270
d2cbcc49 1271void __cpuinit cpu_init(void)
9ee79a3d 1272{
d2cbcc49
RR
1273 int cpu = smp_processor_id();
1274 struct task_struct *curr = current;
34048c9e 1275 struct tss_struct *t = &per_cpu(init_tss, cpu);
9ee79a3d 1276 struct thread_struct *thread = &curr->thread;
62111195 1277
c2d1cec1 1278 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
62111195 1279 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
9766cdbc
JSR
1280 for (;;)
1281 local_irq_enable();
62111195
JF
1282 }
1283
1284 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1285
1286 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
1287 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
62111195 1288
4d37e7e3 1289 load_idt(&idt_descr);
552be871 1290 switch_to_new_gdt(cpu);
1da177e4 1291
1da177e4
LT
1292 /*
1293 * Set up and load the per-CPU TSS and LDT
1294 */
1295 atomic_inc(&init_mm.mm_count);
62111195 1296 curr->active_mm = &init_mm;
8c5dfd25 1297 BUG_ON(curr->mm);
62111195 1298 enter_lazy_tlb(&init_mm, curr);
1da177e4 1299
faca6227 1300 load_sp0(t, thread);
34048c9e 1301 set_tss_desc(cpu, t);
1da177e4
LT
1302 load_TR_desc();
1303 load_LDT(&init_mm.context);
1304
f9a196b8
TG
1305 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1306
22c4e308 1307#ifdef CONFIG_DOUBLEFAULT
1da177e4
LT
1308 /* Set up doublefault TSS pointer in the GDT */
1309 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
22c4e308 1310#endif
1da177e4 1311
9766cdbc 1312 clear_all_debug_regs();
0bb9fef9 1313 dbg_restore_debug_regs();
1da177e4 1314
0e49bf66 1315 fpu_init();
dc1e35c6 1316 xsave_init();
1da177e4 1317}
1ba76586 1318#endif