]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/cpu/common.c
x86/tsc: Add missing Cherrytrail frequency to the table
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / common.c
CommitLineData
f0fc4aff 1#include <linux/bootmem.h>
9766cdbc 2#include <linux/linkage.h>
f0fc4aff 3#include <linux/bitops.h>
9766cdbc 4#include <linux/kernel.h>
f0fc4aff 5#include <linux/module.h>
9766cdbc
JSR
6#include <linux/percpu.h>
7#include <linux/string.h>
ee098e1a 8#include <linux/ctype.h>
1da177e4 9#include <linux/delay.h>
9766cdbc
JSR
10#include <linux/sched.h>
11#include <linux/init.h>
0f46efeb 12#include <linux/kprobes.h>
9766cdbc 13#include <linux/kgdb.h>
1da177e4 14#include <linux/smp.h>
9766cdbc 15#include <linux/io.h>
b51ef52d 16#include <linux/syscore_ops.h>
9766cdbc
JSR
17
18#include <asm/stackprotector.h>
cdd6c482 19#include <asm/perf_event.h>
1da177e4 20#include <asm/mmu_context.h>
49d859d7 21#include <asm/archrandom.h>
9766cdbc
JSR
22#include <asm/hypervisor.h>
23#include <asm/processor.h>
1e02ce4c 24#include <asm/tlbflush.h>
f649e938 25#include <asm/debugreg.h>
9766cdbc 26#include <asm/sections.h>
f40c3300 27#include <asm/vsyscall.h>
8bdbd962
AC
28#include <linux/topology.h>
29#include <linux/cpumask.h>
9766cdbc 30#include <asm/pgtable.h>
60063497 31#include <linux/atomic.h>
9766cdbc
JSR
32#include <asm/proto.h>
33#include <asm/setup.h>
34#include <asm/apic.h>
35#include <asm/desc.h>
78f7f1e5 36#include <asm/fpu/internal.h>
27b07da7 37#include <asm/mtrr.h>
8bdbd962 38#include <linux/numa.h>
9766cdbc
JSR
39#include <asm/asm.h>
40#include <asm/cpu.h>
a03a3e28 41#include <asm/mce.h>
9766cdbc 42#include <asm/msr.h>
8d4a4300 43#include <asm/pat.h>
d288e1cf
FY
44#include <asm/microcode.h>
45#include <asm/microcode_intel.h>
e641f5f5
IM
46
47#ifdef CONFIG_X86_LOCAL_APIC
bdbcdd48 48#include <asm/uv/uv.h>
1da177e4
LT
49#endif
50
51#include "cpu.h"
52
c2d1cec1 53/* all of these masks are initialized in setup_cpu_local_masks() */
c2d1cec1 54cpumask_var_t cpu_initialized_mask;
9766cdbc
JSR
55cpumask_var_t cpu_callout_mask;
56cpumask_var_t cpu_callin_mask;
c2d1cec1
MT
57
58/* representing cpus for which sibling maps can be computed */
59cpumask_var_t cpu_sibling_setup_mask;
60
2f2f52ba 61/* correctly size the local cpu masks */
4369f1fb 62void __init setup_cpu_local_masks(void)
2f2f52ba
BG
63{
64 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
65 alloc_bootmem_cpumask_var(&cpu_callin_mask);
66 alloc_bootmem_cpumask_var(&cpu_callout_mask);
67 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
68}
69
148f9bb8 70static void default_init(struct cpuinfo_x86 *c)
e8055139
OZ
71{
72#ifdef CONFIG_X86_64
27c13ece 73 cpu_detect_cache_sizes(c);
e8055139
OZ
74#else
75 /* Not much we can do here... */
76 /* Check if at least it has cpuid */
77 if (c->cpuid_level == -1) {
78 /* No cpuid. It must be an ancient CPU */
79 if (c->x86 == 4)
80 strcpy(c->x86_model_id, "486");
81 else if (c->x86 == 3)
82 strcpy(c->x86_model_id, "386");
83 }
84#endif
85}
86
148f9bb8 87static const struct cpu_dev default_cpu = {
e8055139
OZ
88 .c_init = default_init,
89 .c_vendor = "Unknown",
90 .c_x86_vendor = X86_VENDOR_UNKNOWN,
91};
92
148f9bb8 93static const struct cpu_dev *this_cpu = &default_cpu;
0a488a53 94
06deef89 95DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
950ad7ff 96#ifdef CONFIG_X86_64
06deef89
BG
97 /*
98 * We need valid kernel segments for data and code in long mode too
99 * IRET will check the segment types kkeil 2000/10/28
100 * Also sysret mandates a special GDT layout
101 *
9766cdbc 102 * TLS descriptors are currently at a different place compared to i386.
06deef89
BG
103 * Hopefully nobody expects them at a fixed place (Wine?)
104 */
1e5de182
AM
105 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
106 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
107 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
108 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
109 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
110 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
950ad7ff 111#else
1e5de182
AM
112 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
113 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
114 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
115 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
bf504672
RR
116 /*
117 * Segments used for calling PnP BIOS have byte granularity.
118 * They code segments and data segments have fixed 64k limits,
119 * the transfer segment sizes are set at run time.
120 */
6842ef0e 121 /* 32-bit code */
1e5de182 122 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
6842ef0e 123 /* 16-bit code */
1e5de182 124 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
6842ef0e 125 /* 16-bit data */
1e5de182 126 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
6842ef0e 127 /* 16-bit data */
1e5de182 128 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
6842ef0e 129 /* 16-bit data */
1e5de182 130 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
bf504672
RR
131 /*
132 * The APM segments have byte granularity and their bases
133 * are set at run time. All have 64k limits.
134 */
6842ef0e 135 /* 32-bit code */
1e5de182 136 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
bf504672 137 /* 16-bit code */
1e5de182 138 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
6842ef0e 139 /* data */
72c4d853 140 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
bf504672 141
1e5de182
AM
142 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
143 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
60a5317f 144 GDT_STACK_CANARY_INIT
950ad7ff 145#endif
06deef89 146} };
7a61d35d 147EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
ae1ee11b 148
8c3641e9 149static int __init x86_mpx_setup(char *s)
0c752a93 150{
8c3641e9 151 /* require an exact match without trailing characters */
2cd3949f
DH
152 if (strlen(s))
153 return 0;
0c752a93 154
8c3641e9
DH
155 /* do not emit a message if the feature is not present */
156 if (!boot_cpu_has(X86_FEATURE_MPX))
157 return 1;
6bad06b7 158
8c3641e9
DH
159 setup_clear_cpu_cap(X86_FEATURE_MPX);
160 pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
b6f42a4a
FY
161 return 1;
162}
8c3641e9 163__setup("nompx", x86_mpx_setup);
b6f42a4a 164
d12a72b8
AL
165static int __init x86_noinvpcid_setup(char *s)
166{
167 /* noinvpcid doesn't accept parameters */
168 if (s)
169 return -EINVAL;
170
171 /* do not emit a message if the feature is not present */
172 if (!boot_cpu_has(X86_FEATURE_INVPCID))
173 return 0;
174
175 setup_clear_cpu_cap(X86_FEATURE_INVPCID);
176 pr_info("noinvpcid: INVPCID feature disabled\n");
177 return 0;
178}
179early_param("noinvpcid", x86_noinvpcid_setup);
180
ba51dced 181#ifdef CONFIG_X86_32
148f9bb8
PG
182static int cachesize_override = -1;
183static int disable_x86_serial_nr = 1;
1da177e4 184
0a488a53
YL
185static int __init cachesize_setup(char *str)
186{
187 get_option(&str, &cachesize_override);
188 return 1;
189}
190__setup("cachesize=", cachesize_setup);
191
0a488a53
YL
192static int __init x86_sep_setup(char *s)
193{
194 setup_clear_cpu_cap(X86_FEATURE_SEP);
195 return 1;
196}
197__setup("nosep", x86_sep_setup);
198
199/* Standard macro to see if a specific flag is changeable */
200static inline int flag_is_changeable_p(u32 flag)
201{
202 u32 f1, f2;
203
94f6bac1
KH
204 /*
205 * Cyrix and IDT cpus allow disabling of CPUID
206 * so the code below may return different results
207 * when it is executed before and after enabling
208 * the CPUID. Add "volatile" to not allow gcc to
209 * optimize the subsequent calls to this function.
210 */
0f3fa48a
IM
211 asm volatile ("pushfl \n\t"
212 "pushfl \n\t"
213 "popl %0 \n\t"
214 "movl %0, %1 \n\t"
215 "xorl %2, %0 \n\t"
216 "pushl %0 \n\t"
217 "popfl \n\t"
218 "pushfl \n\t"
219 "popl %0 \n\t"
220 "popfl \n\t"
221
94f6bac1
KH
222 : "=&r" (f1), "=&r" (f2)
223 : "ir" (flag));
0a488a53
YL
224
225 return ((f1^f2) & flag) != 0;
226}
227
228/* Probe for the CPUID instruction */
148f9bb8 229int have_cpuid_p(void)
0a488a53
YL
230{
231 return flag_is_changeable_p(X86_EFLAGS_ID);
232}
233
148f9bb8 234static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
0a488a53 235{
0f3fa48a
IM
236 unsigned long lo, hi;
237
238 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
239 return;
240
241 /* Disable processor serial number: */
242
243 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
244 lo |= 0x200000;
245 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
246
1b74dde7 247 pr_notice("CPU serial number disabled.\n");
0f3fa48a
IM
248 clear_cpu_cap(c, X86_FEATURE_PN);
249
250 /* Disabling the serial number may affect the cpuid level */
251 c->cpuid_level = cpuid_eax(0);
0a488a53
YL
252}
253
254static int __init x86_serial_nr_setup(char *s)
255{
256 disable_x86_serial_nr = 0;
257 return 1;
258}
259__setup("serialnumber", x86_serial_nr_setup);
ba51dced 260#else
102bbe3a
YL
261static inline int flag_is_changeable_p(u32 flag)
262{
263 return 1;
264}
102bbe3a
YL
265static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
266{
267}
ba51dced 268#endif
0a488a53 269
de5397ad
FY
270static __init int setup_disable_smep(char *arg)
271{
b2cc2a07 272 setup_clear_cpu_cap(X86_FEATURE_SMEP);
de5397ad
FY
273 return 1;
274}
275__setup("nosmep", setup_disable_smep);
276
b2cc2a07 277static __always_inline void setup_smep(struct cpuinfo_x86 *c)
de5397ad 278{
b2cc2a07 279 if (cpu_has(c, X86_FEATURE_SMEP))
375074cc 280 cr4_set_bits(X86_CR4_SMEP);
de5397ad
FY
281}
282
52b6179a
PA
283static __init int setup_disable_smap(char *arg)
284{
b2cc2a07 285 setup_clear_cpu_cap(X86_FEATURE_SMAP);
52b6179a
PA
286 return 1;
287}
288__setup("nosmap", setup_disable_smap);
289
b2cc2a07
PA
290static __always_inline void setup_smap(struct cpuinfo_x86 *c)
291{
581b7f15 292 unsigned long eflags = native_save_fl();
b2cc2a07
PA
293
294 /* This should have been cleared long ago */
b2cc2a07
PA
295 BUG_ON(eflags & X86_EFLAGS_AC);
296
03bbd596
PA
297 if (cpu_has(c, X86_FEATURE_SMAP)) {
298#ifdef CONFIG_X86_SMAP
375074cc 299 cr4_set_bits(X86_CR4_SMAP);
03bbd596 300#else
375074cc 301 cr4_clear_bits(X86_CR4_SMAP);
03bbd596
PA
302#endif
303 }
de5397ad
FY
304}
305
06976945
DH
306/*
307 * Protection Keys are not available in 32-bit mode.
308 */
309static bool pku_disabled;
310
311static __always_inline void setup_pku(struct cpuinfo_x86 *c)
312{
313 if (!cpu_has(c, X86_FEATURE_PKU))
314 return;
315 if (pku_disabled)
316 return;
317
318 cr4_set_bits(X86_CR4_PKE);
319 /*
320 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
321 * cpuid bit to be set. We need to ensure that we
322 * update that bit in this CPU's "cpu_info".
323 */
324 get_cpu_cap(c);
325}
326
327#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
328static __init int setup_disable_pku(char *arg)
329{
330 /*
331 * Do not clear the X86_FEATURE_PKU bit. All of the
332 * runtime checks are against OSPKE so clearing the
333 * bit does nothing.
334 *
335 * This way, we will see "pku" in cpuinfo, but not
336 * "ospke", which is exactly what we want. It shows
337 * that the CPU has PKU, but the OS has not enabled it.
338 * This happens to be exactly how a system would look
339 * if we disabled the config option.
340 */
341 pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
342 pku_disabled = true;
343 return 1;
344}
345__setup("nopku", setup_disable_pku);
346#endif /* CONFIG_X86_64 */
347
b38b0665
PA
348/*
349 * Some CPU features depend on higher CPUID levels, which may not always
350 * be available due to CPUID level capping or broken virtualization
351 * software. Add those features to this table to auto-disable them.
352 */
353struct cpuid_dependent_feature {
354 u32 feature;
355 u32 level;
356};
0f3fa48a 357
148f9bb8 358static const struct cpuid_dependent_feature
b38b0665
PA
359cpuid_dependent_features[] = {
360 { X86_FEATURE_MWAIT, 0x00000005 },
361 { X86_FEATURE_DCA, 0x00000009 },
362 { X86_FEATURE_XSAVE, 0x0000000d },
363 { 0, 0 }
364};
365
148f9bb8 366static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
b38b0665
PA
367{
368 const struct cpuid_dependent_feature *df;
9766cdbc 369
b38b0665 370 for (df = cpuid_dependent_features; df->feature; df++) {
0f3fa48a
IM
371
372 if (!cpu_has(c, df->feature))
373 continue;
b38b0665
PA
374 /*
375 * Note: cpuid_level is set to -1 if unavailable, but
376 * extended_extended_level is set to 0 if unavailable
377 * and the legitimate extended levels are all negative
378 * when signed; hence the weird messing around with
379 * signs here...
380 */
0f3fa48a 381 if (!((s32)df->level < 0 ?
f6db44df 382 (u32)df->level > (u32)c->extended_cpuid_level :
0f3fa48a
IM
383 (s32)df->level > (s32)c->cpuid_level))
384 continue;
385
386 clear_cpu_cap(c, df->feature);
387 if (!warn)
388 continue;
389
1b74dde7
CY
390 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
391 x86_cap_flag(df->feature), df->level);
b38b0665 392 }
f6db44df 393}
b38b0665 394
102bbe3a
YL
395/*
396 * Naming convention should be: <Name> [(<Codename>)]
397 * This table only is used unless init_<vendor>() below doesn't set it;
0f3fa48a
IM
398 * in particular, if CPUID levels 0x80000002..4 are supported, this
399 * isn't used
102bbe3a
YL
400 */
401
402/* Look up CPU names by table lookup. */
148f9bb8 403static const char *table_lookup_model(struct cpuinfo_x86 *c)
102bbe3a 404{
09dc68d9
JB
405#ifdef CONFIG_X86_32
406 const struct legacy_cpu_model_info *info;
102bbe3a
YL
407
408 if (c->x86_model >= 16)
409 return NULL; /* Range check */
410
411 if (!this_cpu)
412 return NULL;
413
09dc68d9 414 info = this_cpu->legacy_models;
102bbe3a 415
09dc68d9 416 while (info->family) {
102bbe3a
YL
417 if (info->family == c->x86)
418 return info->model_names[c->x86_model];
419 info++;
420 }
09dc68d9 421#endif
102bbe3a
YL
422 return NULL; /* Not found */
423}
424
148f9bb8
PG
425__u32 cpu_caps_cleared[NCAPINTS];
426__u32 cpu_caps_set[NCAPINTS];
7d851c8d 427
11e3a840
JF
428void load_percpu_segment(int cpu)
429{
430#ifdef CONFIG_X86_32
431 loadsegment(fs, __KERNEL_PERCPU);
432#else
433 loadsegment(gs, 0);
434 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
435#endif
60a5317f 436 load_stack_canary_segment();
11e3a840
JF
437}
438
0f3fa48a
IM
439/*
440 * Current gdt points %fs at the "master" per-cpu area: after this,
441 * it's on the real one.
442 */
552be871 443void switch_to_new_gdt(int cpu)
9d31d35b
YL
444{
445 struct desc_ptr gdt_descr;
446
2697fbd5 447 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
9d31d35b
YL
448 gdt_descr.size = GDT_SIZE - 1;
449 load_gdt(&gdt_descr);
2697fbd5 450 /* Reload the per-cpu base */
11e3a840
JF
451
452 load_percpu_segment(cpu);
9d31d35b
YL
453}
454
148f9bb8 455static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
1da177e4 456
148f9bb8 457static void get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
458{
459 unsigned int *v;
ee098e1a 460 char *p, *q, *s;
1da177e4 461
3da99c97 462 if (c->extended_cpuid_level < 0x80000004)
1b05d60d 463 return;
1da177e4 464
0f3fa48a 465 v = (unsigned int *)c->x86_model_id;
1da177e4
LT
466 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
467 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
468 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
469 c->x86_model_id[48] = 0;
470
ee098e1a
BP
471 /* Trim whitespace */
472 p = q = s = &c->x86_model_id[0];
473
474 while (*p == ' ')
475 p++;
476
477 while (*p) {
478 /* Note the last non-whitespace index */
479 if (!isspace(*p))
480 s = q;
481
482 *q++ = *p++;
483 }
484
485 *(s + 1) = '\0';
1da177e4
LT
486}
487
148f9bb8 488void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
1da177e4 489{
9d31d35b 490 unsigned int n, dummy, ebx, ecx, edx, l2size;
1da177e4 491
3da99c97 492 n = c->extended_cpuid_level;
1da177e4
LT
493
494 if (n >= 0x80000005) {
9d31d35b 495 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
9d31d35b 496 c->x86_cache_size = (ecx>>24) + (edx>>24);
140fc727
YL
497#ifdef CONFIG_X86_64
498 /* On K8 L1 TLB is inclusive, so don't count it */
499 c->x86_tlbsize = 0;
500#endif
1da177e4
LT
501 }
502
503 if (n < 0x80000006) /* Some chips just has a large L1. */
504 return;
505
0a488a53 506 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1da177e4 507 l2size = ecx >> 16;
34048c9e 508
140fc727
YL
509#ifdef CONFIG_X86_64
510 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
511#else
1da177e4 512 /* do processor-specific cache resizing */
09dc68d9
JB
513 if (this_cpu->legacy_cache_size)
514 l2size = this_cpu->legacy_cache_size(c, l2size);
1da177e4
LT
515
516 /* Allow user to override all this if necessary. */
517 if (cachesize_override != -1)
518 l2size = cachesize_override;
519
34048c9e 520 if (l2size == 0)
1da177e4 521 return; /* Again, no L2 cache is possible */
140fc727 522#endif
1da177e4
LT
523
524 c->x86_cache_size = l2size;
1da177e4
LT
525}
526
e0ba94f1
AS
527u16 __read_mostly tlb_lli_4k[NR_INFO];
528u16 __read_mostly tlb_lli_2m[NR_INFO];
529u16 __read_mostly tlb_lli_4m[NR_INFO];
530u16 __read_mostly tlb_lld_4k[NR_INFO];
531u16 __read_mostly tlb_lld_2m[NR_INFO];
532u16 __read_mostly tlb_lld_4m[NR_INFO];
dd360393 533u16 __read_mostly tlb_lld_1g[NR_INFO];
e0ba94f1 534
f94fe119 535static void cpu_detect_tlb(struct cpuinfo_x86 *c)
e0ba94f1
AS
536{
537 if (this_cpu->c_detect_tlb)
538 this_cpu->c_detect_tlb(c);
539
f94fe119 540 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
e0ba94f1 541 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
f94fe119
SH
542 tlb_lli_4m[ENTRIES]);
543
544 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
545 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
546 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
e0ba94f1
AS
547}
548
148f9bb8 549void detect_ht(struct cpuinfo_x86 *c)
1da177e4 550{
c8e56d20 551#ifdef CONFIG_SMP
0a488a53
YL
552 u32 eax, ebx, ecx, edx;
553 int index_msb, core_bits;
2eaad1fd 554 static bool printed;
1da177e4 555
0a488a53 556 if (!cpu_has(c, X86_FEATURE_HT))
9d31d35b 557 return;
1da177e4 558
0a488a53
YL
559 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
560 goto out;
1da177e4 561
1cd78776
YL
562 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
563 return;
1da177e4 564
0a488a53 565 cpuid(1, &eax, &ebx, &ecx, &edx);
1da177e4 566
9d31d35b
YL
567 smp_num_siblings = (ebx & 0xff0000) >> 16;
568
569 if (smp_num_siblings == 1) {
1b74dde7 570 pr_info_once("CPU0: Hyper-Threading is disabled\n");
0f3fa48a
IM
571 goto out;
572 }
9d31d35b 573
0f3fa48a
IM
574 if (smp_num_siblings <= 1)
575 goto out;
9d31d35b 576
0f3fa48a
IM
577 index_msb = get_count_order(smp_num_siblings);
578 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
9d31d35b 579
0f3fa48a 580 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
9d31d35b 581
0f3fa48a 582 index_msb = get_count_order(smp_num_siblings);
9d31d35b 583
0f3fa48a 584 core_bits = get_count_order(c->x86_max_cores);
9d31d35b 585
0f3fa48a
IM
586 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
587 ((1 << core_bits) - 1);
1da177e4 588
0a488a53 589out:
2eaad1fd 590 if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
1b74dde7
CY
591 pr_info("CPU: Physical Processor ID: %d\n",
592 c->phys_proc_id);
593 pr_info("CPU: Processor Core ID: %d\n",
594 c->cpu_core_id);
2eaad1fd 595 printed = 1;
9d31d35b 596 }
9d31d35b 597#endif
97e4db7c 598}
1da177e4 599
148f9bb8 600static void get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
601{
602 char *v = c->x86_vendor_id;
0f3fa48a 603 int i;
1da177e4
LT
604
605 for (i = 0; i < X86_VENDOR_NUM; i++) {
10a434fc
YL
606 if (!cpu_devs[i])
607 break;
608
609 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
610 (cpu_devs[i]->c_ident[1] &&
611 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
0f3fa48a 612
10a434fc
YL
613 this_cpu = cpu_devs[i];
614 c->x86_vendor = this_cpu->c_x86_vendor;
615 return;
1da177e4
LT
616 }
617 }
10a434fc 618
1b74dde7
CY
619 pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
620 "CPU: Your system may be unstable.\n", v);
10a434fc 621
fe38d855
CE
622 c->x86_vendor = X86_VENDOR_UNKNOWN;
623 this_cpu = &default_cpu;
1da177e4
LT
624}
625
148f9bb8 626void cpu_detect(struct cpuinfo_x86 *c)
1da177e4 627{
1da177e4 628 /* Get vendor name */
4a148513
HH
629 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
630 (unsigned int *)&c->x86_vendor_id[0],
631 (unsigned int *)&c->x86_vendor_id[8],
632 (unsigned int *)&c->x86_vendor_id[4]);
1da177e4 633
1da177e4 634 c->x86 = 4;
9d31d35b 635 /* Intel-defined flags: level 0x00000001 */
1da177e4
LT
636 if (c->cpuid_level >= 0x00000001) {
637 u32 junk, tfms, cap0, misc;
0f3fa48a 638
1da177e4 639 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
99f925ce
BP
640 c->x86 = x86_family(tfms);
641 c->x86_model = x86_model(tfms);
642 c->x86_mask = x86_stepping(tfms);
0f3fa48a 643
d4387bd3 644 if (cap0 & (1<<19)) {
d4387bd3 645 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
9d31d35b 646 c->x86_cache_alignment = c->x86_clflush_size;
d4387bd3 647 }
1da177e4 648 }
1da177e4 649}
3da99c97 650
148f9bb8 651void get_cpu_cap(struct cpuinfo_x86 *c)
093af8d7 652{
39c06df4 653 u32 eax, ebx, ecx, edx;
093af8d7 654
3da99c97
YL
655 /* Intel-defined flags: level 0x00000001 */
656 if (c->cpuid_level >= 0x00000001) {
39c06df4 657 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
0f3fa48a 658
39c06df4
BP
659 c->x86_capability[CPUID_1_ECX] = ecx;
660 c->x86_capability[CPUID_1_EDX] = edx;
3da99c97 661 }
093af8d7 662
bdc802dc
PA
663 /* Additional Intel-defined flags: level 0x00000007 */
664 if (c->cpuid_level >= 0x00000007) {
bdc802dc
PA
665 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
666
39c06df4 667 c->x86_capability[CPUID_7_0_EBX] = ebx;
2ccd71f1 668
39c06df4 669 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
dfb4a70f 670 c->x86_capability[CPUID_7_ECX] = ecx;
bdc802dc
PA
671 }
672
6229ad27
FY
673 /* Extended state features: level 0x0000000d */
674 if (c->cpuid_level >= 0x0000000d) {
6229ad27
FY
675 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
676
39c06df4 677 c->x86_capability[CPUID_D_1_EAX] = eax;
6229ad27
FY
678 }
679
cbc82b17
PWJ
680 /* Additional Intel-defined flags: level 0x0000000F */
681 if (c->cpuid_level >= 0x0000000F) {
cbc82b17
PWJ
682
683 /* QoS sub-leaf, EAX=0Fh, ECX=0 */
684 cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
39c06df4
BP
685 c->x86_capability[CPUID_F_0_EDX] = edx;
686
cbc82b17
PWJ
687 if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
688 /* will be overridden if occupancy monitoring exists */
689 c->x86_cache_max_rmid = ebx;
690
691 /* QoS sub-leaf, EAX=0Fh, ECX=1 */
692 cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
39c06df4
BP
693 c->x86_capability[CPUID_F_1_EDX] = edx;
694
33c3cc7a
VS
695 if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
696 ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
697 (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
cbc82b17
PWJ
698 c->x86_cache_max_rmid = ecx;
699 c->x86_cache_occ_scale = ebx;
700 }
701 } else {
702 c->x86_cache_max_rmid = -1;
703 c->x86_cache_occ_scale = -1;
704 }
705 }
706
3da99c97 707 /* AMD-defined flags: level 0x80000001 */
39c06df4
BP
708 eax = cpuid_eax(0x80000000);
709 c->extended_cpuid_level = eax;
710
711 if ((eax & 0xffff0000) == 0x80000000) {
712 if (eax >= 0x80000001) {
713 cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
0f3fa48a 714
39c06df4
BP
715 c->x86_capability[CPUID_8000_0001_ECX] = ecx;
716 c->x86_capability[CPUID_8000_0001_EDX] = edx;
093af8d7 717 }
093af8d7 718 }
093af8d7 719
5122c890 720 if (c->extended_cpuid_level >= 0x80000008) {
39c06df4 721 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
5122c890
YL
722
723 c->x86_virt_bits = (eax >> 8) & 0xff;
724 c->x86_phys_bits = eax & 0xff;
39c06df4 725 c->x86_capability[CPUID_8000_0008_EBX] = ebx;
093af8d7 726 }
13c6c532
JB
727#ifdef CONFIG_X86_32
728 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
729 c->x86_phys_bits = 36;
5122c890 730#endif
e3224234
YL
731
732 if (c->extended_cpuid_level >= 0x80000007)
733 c->x86_power = cpuid_edx(0x80000007);
2ccd71f1
BP
734
735 if (c->extended_cpuid_level >= 0x8000000a)
39c06df4 736 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
093af8d7 737
1dedefd1 738 init_scattered_cpuid_features(c);
093af8d7 739}
1da177e4 740
148f9bb8 741static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
aef93c8b
YL
742{
743#ifdef CONFIG_X86_32
744 int i;
745
746 /*
747 * First of all, decide if this is a 486 or higher
748 * It's a 486 if we can modify the AC flag
749 */
750 if (flag_is_changeable_p(X86_EFLAGS_AC))
751 c->x86 = 4;
752 else
753 c->x86 = 3;
754
755 for (i = 0; i < X86_VENDOR_NUM; i++)
756 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
757 c->x86_vendor_id[0] = 0;
758 cpu_devs[i]->c_identify(c);
759 if (c->x86_vendor_id[0]) {
760 get_cpu_vendor(c);
761 break;
762 }
763 }
764#endif
765}
766
34048c9e
PC
767/*
768 * Do minimum CPU detection early.
769 * Fields really needed: vendor, cpuid_level, family, model, mask,
770 * cache alignment.
771 * The others are not touched to avoid unwanted side effects.
772 *
773 * WARNING: this function is only called on the BP. Don't add code here
774 * that is supposed to run on all CPUs.
775 */
3da99c97 776static void __init early_identify_cpu(struct cpuinfo_x86 *c)
d7cd5611 777{
6627d242
YL
778#ifdef CONFIG_X86_64
779 c->x86_clflush_size = 64;
13c6c532
JB
780 c->x86_phys_bits = 36;
781 c->x86_virt_bits = 48;
6627d242 782#else
d4387bd3 783 c->x86_clflush_size = 32;
13c6c532
JB
784 c->x86_phys_bits = 32;
785 c->x86_virt_bits = 32;
6627d242 786#endif
0a488a53 787 c->x86_cache_alignment = c->x86_clflush_size;
d7cd5611 788
3da99c97 789 memset(&c->x86_capability, 0, sizeof c->x86_capability);
0a488a53 790 c->extended_cpuid_level = 0;
d7cd5611 791
aef93c8b
YL
792 if (!have_cpuid_p())
793 identify_cpu_without_cpuid(c);
794
795 /* cyrix could have cpuid enabled via c_identify()*/
d7cd5611
RR
796 if (!have_cpuid_p())
797 return;
798
799 cpu_detect(c);
3da99c97 800 get_cpu_vendor(c);
3da99c97 801 get_cpu_cap(c);
12cf105c 802
10a434fc
YL
803 if (this_cpu->c_early_init)
804 this_cpu->c_early_init(c);
093af8d7 805
f6e9456c 806 c->cpu_index = 0;
b38b0665 807 filter_cpuid_features(c, false);
de5397ad 808
a110b5ec
BP
809 if (this_cpu->c_bsp_init)
810 this_cpu->c_bsp_init(c);
c3b83598
BP
811
812 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
db52ef74 813 fpu__init_system(c);
d7cd5611
RR
814}
815
9d31d35b
YL
816void __init early_cpu_init(void)
817{
02dde8b4 818 const struct cpu_dev *const *cdev;
10a434fc
YL
819 int count = 0;
820
ac23f253 821#ifdef CONFIG_PROCESSOR_SELECT
1b74dde7 822 pr_info("KERNEL supported cpus:\n");
31c997ca
IM
823#endif
824
10a434fc 825 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
02dde8b4 826 const struct cpu_dev *cpudev = *cdev;
9d31d35b 827
10a434fc
YL
828 if (count >= X86_VENDOR_NUM)
829 break;
830 cpu_devs[count] = cpudev;
831 count++;
832
ac23f253 833#ifdef CONFIG_PROCESSOR_SELECT
31c997ca
IM
834 {
835 unsigned int j;
836
837 for (j = 0; j < 2; j++) {
838 if (!cpudev->c_ident[j])
839 continue;
1b74dde7 840 pr_info(" %s %s\n", cpudev->c_vendor,
31c997ca
IM
841 cpudev->c_ident[j]);
842 }
10a434fc 843 }
0388423d 844#endif
10a434fc 845 }
9d31d35b 846 early_identify_cpu(&boot_cpu_data);
d7cd5611 847}
093af8d7 848
b6734c35 849/*
366d4a43
BP
850 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
851 * unfortunately, that's not true in practice because of early VIA
852 * chips and (more importantly) broken virtualizers that are not easy
853 * to detect. In the latter case it doesn't even *fail* reliably, so
854 * probing for it doesn't even work. Disable it completely on 32-bit
ba0593bf 855 * unless we can find a reliable way to detect all the broken cases.
366d4a43 856 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
b6734c35 857 */
148f9bb8 858static void detect_nopl(struct cpuinfo_x86 *c)
b6734c35 859{
366d4a43 860#ifdef CONFIG_X86_32
b6734c35 861 clear_cpu_cap(c, X86_FEATURE_NOPL);
366d4a43
BP
862#else
863 set_cpu_cap(c, X86_FEATURE_NOPL);
58a5aac5
AL
864#endif
865
866 /*
867 * ESPFIX is a strange bug. All real CPUs have it. Paravirt
868 * systems that run Linux at CPL > 0 may or may not have the
869 * issue, but, even if they have the issue, there's absolutely
870 * nothing we can do about it because we can't use the real IRET
871 * instruction.
872 *
873 * NB: For the time being, only 32-bit kernels support
874 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose
875 * whether to apply espfix using paravirt hooks. If any
876 * non-paravirt system ever shows up that does *not* have the
877 * ESPFIX issue, we can change this.
878 */
879#ifdef CONFIG_X86_32
880#ifdef CONFIG_PARAVIRT
881 do {
882 extern void native_iret(void);
883 if (pv_cpu_ops.iret == native_iret)
884 set_cpu_bug(c, X86_BUG_ESPFIX);
885 } while (0);
886#else
887 set_cpu_bug(c, X86_BUG_ESPFIX);
888#endif
366d4a43 889#endif
d7cd5611
RR
890}
891
148f9bb8 892static void generic_identify(struct cpuinfo_x86 *c)
1da177e4 893{
aef93c8b 894 c->extended_cpuid_level = 0;
1da177e4 895
3da99c97 896 if (!have_cpuid_p())
aef93c8b 897 identify_cpu_without_cpuid(c);
1d67953f 898
aef93c8b 899 /* cyrix could have cpuid enabled via c_identify()*/
a9853dd6 900 if (!have_cpuid_p())
aef93c8b 901 return;
1da177e4 902
3da99c97 903 cpu_detect(c);
1da177e4 904
3da99c97 905 get_cpu_vendor(c);
1da177e4 906
3da99c97 907 get_cpu_cap(c);
1da177e4 908
3da99c97
YL
909 if (c->cpuid_level >= 0x00000001) {
910 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
b89d3b3e 911#ifdef CONFIG_X86_32
c8e56d20 912# ifdef CONFIG_SMP
cb8cc442 913 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
b89d3b3e 914# else
3da99c97 915 c->apicid = c->initial_apicid;
b89d3b3e
YL
916# endif
917#endif
b89d3b3e 918 c->phys_proc_id = c->initial_apicid;
3da99c97 919 }
1da177e4 920
1b05d60d 921 get_model_name(c); /* Default name */
1da177e4 922
3da99c97 923 detect_nopl(c);
1da177e4 924}
1da177e4 925
cbc82b17
PWJ
926static void x86_init_cache_qos(struct cpuinfo_x86 *c)
927{
928 /*
929 * The heavy lifting of max_rmid and cache_occ_scale are handled
930 * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu
931 * in case CQM bits really aren't there in this CPU.
932 */
933 if (c != &boot_cpu_data) {
934 boot_cpu_data.x86_cache_max_rmid =
935 min(boot_cpu_data.x86_cache_max_rmid,
936 c->x86_cache_max_rmid);
937 }
938}
939
1da177e4
LT
940/*
941 * This does the hard work of actually picking apart the CPU stuff...
942 */
148f9bb8 943static void identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
944{
945 int i;
946
947 c->loops_per_jiffy = loops_per_jiffy;
948 c->x86_cache_size = -1;
949 c->x86_vendor = X86_VENDOR_UNKNOWN;
1da177e4
LT
950 c->x86_model = c->x86_mask = 0; /* So far unknown... */
951 c->x86_vendor_id[0] = '\0'; /* Unset */
952 c->x86_model_id[0] = '\0'; /* Unset */
94605eff 953 c->x86_max_cores = 1;
102bbe3a 954 c->x86_coreid_bits = 0;
11fdd252 955#ifdef CONFIG_X86_64
102bbe3a 956 c->x86_clflush_size = 64;
13c6c532
JB
957 c->x86_phys_bits = 36;
958 c->x86_virt_bits = 48;
102bbe3a
YL
959#else
960 c->cpuid_level = -1; /* CPUID not detected */
770d132f 961 c->x86_clflush_size = 32;
13c6c532
JB
962 c->x86_phys_bits = 32;
963 c->x86_virt_bits = 32;
102bbe3a
YL
964#endif
965 c->x86_cache_alignment = c->x86_clflush_size;
1da177e4
LT
966 memset(&c->x86_capability, 0, sizeof c->x86_capability);
967
1da177e4
LT
968 generic_identify(c);
969
3898534d 970 if (this_cpu->c_identify)
1da177e4
LT
971 this_cpu->c_identify(c);
972
6a6256f9 973 /* Clear/Set all flags overridden by options, after probe */
2759c328
YL
974 for (i = 0; i < NCAPINTS; i++) {
975 c->x86_capability[i] &= ~cpu_caps_cleared[i];
976 c->x86_capability[i] |= cpu_caps_set[i];
977 }
978
102bbe3a 979#ifdef CONFIG_X86_64
cb8cc442 980 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
102bbe3a
YL
981#endif
982
1da177e4
LT
983 /*
984 * Vendor-specific initialization. In this section we
985 * canonicalize the feature flags, meaning if there are
986 * features a certain CPU supports which CPUID doesn't
987 * tell us, CPUID claiming incorrect flags, or other bugs,
988 * we handle them here.
989 *
990 * At the end of this section, c->x86_capability better
991 * indicate the features this CPU genuinely supports!
992 */
993 if (this_cpu->c_init)
994 this_cpu->c_init(c);
995
996 /* Disable the PN if appropriate */
997 squash_the_stupid_serial_number(c);
998
b2cc2a07
PA
999 /* Set up SMEP/SMAP */
1000 setup_smep(c);
1001 setup_smap(c);
1002
1da177e4 1003 /*
0f3fa48a
IM
1004 * The vendor-specific functions might have changed features.
1005 * Now we do "generic changes."
1da177e4
LT
1006 */
1007
b38b0665
PA
1008 /* Filter out anything that depends on CPUID levels we don't have */
1009 filter_cpuid_features(c, true);
1010
1da177e4 1011 /* If the model name is still unset, do table lookup. */
34048c9e 1012 if (!c->x86_model_id[0]) {
02dde8b4 1013 const char *p;
1da177e4 1014 p = table_lookup_model(c);
34048c9e 1015 if (p)
1da177e4
LT
1016 strcpy(c->x86_model_id, p);
1017 else
1018 /* Last resort... */
1019 sprintf(c->x86_model_id, "%02x/%02x",
54a20f8c 1020 c->x86, c->x86_model);
1da177e4
LT
1021 }
1022
102bbe3a
YL
1023#ifdef CONFIG_X86_64
1024 detect_ht(c);
1025#endif
1026
88b094fb 1027 init_hypervisor(c);
49d859d7 1028 x86_init_rdrand(c);
cbc82b17 1029 x86_init_cache_qos(c);
06976945 1030 setup_pku(c);
3e0c3737
YL
1031
1032 /*
6a6256f9 1033 * Clear/Set all flags overridden by options, need do it
3e0c3737
YL
1034 * before following smp all cpus cap AND.
1035 */
1036 for (i = 0; i < NCAPINTS; i++) {
1037 c->x86_capability[i] &= ~cpu_caps_cleared[i];
1038 c->x86_capability[i] |= cpu_caps_set[i];
1039 }
1040
1da177e4
LT
1041 /*
1042 * On SMP, boot_cpu_data holds the common feature set between
1043 * all CPUs; so make sure that we indicate which features are
1044 * common between the CPUs. The first time this routine gets
1045 * executed, c == &boot_cpu_data.
1046 */
34048c9e 1047 if (c != &boot_cpu_data) {
1da177e4 1048 /* AND the already accumulated flags with these */
9d31d35b 1049 for (i = 0; i < NCAPINTS; i++)
1da177e4 1050 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
65fc985b
BP
1051
1052 /* OR, i.e. replicate the bug flags */
1053 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1054 c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1da177e4
LT
1055 }
1056
1057 /* Init Machine Check Exception if available. */
5e09954a 1058 mcheck_cpu_init(c);
30d432df
AK
1059
1060 select_idle_routine(c);
102bbe3a 1061
de2d9445 1062#ifdef CONFIG_NUMA
102bbe3a
YL
1063 numa_add_cpu(smp_processor_id());
1064#endif
1f12e32f
TG
1065 /* The boot/hotplug time assigment got cleared, restore it */
1066 c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
a6c4e076 1067}
31ab269a 1068
8b6c0ab1
IM
1069/*
1070 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1071 * on 32-bit kernels:
1072 */
cfda7bb9
AL
1073#ifdef CONFIG_X86_32
1074void enable_sep_cpu(void)
1075{
8b6c0ab1
IM
1076 struct tss_struct *tss;
1077 int cpu;
cfda7bb9 1078
8b6c0ab1
IM
1079 cpu = get_cpu();
1080 tss = &per_cpu(cpu_tss, cpu);
1081
1082 if (!boot_cpu_has(X86_FEATURE_SEP))
1083 goto out;
1084
1085 /*
cf9328cc
AL
1086 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1087 * see the big comment in struct x86_hw_tss's definition.
8b6c0ab1 1088 */
cfda7bb9
AL
1089
1090 tss->x86_tss.ss1 = __KERNEL_CS;
8b6c0ab1
IM
1091 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1092
cf9328cc
AL
1093 wrmsr(MSR_IA32_SYSENTER_ESP,
1094 (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
1095 0);
8b6c0ab1 1096
4c8cd0c5 1097 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
8b6c0ab1
IM
1098
1099out:
cfda7bb9
AL
1100 put_cpu();
1101}
e04d645f
GC
1102#endif
1103
a6c4e076
JF
1104void __init identify_boot_cpu(void)
1105{
1106 identify_cpu(&boot_cpu_data);
02c68a02 1107 init_amd_e400_c1e_mask();
102bbe3a 1108#ifdef CONFIG_X86_32
a6c4e076 1109 sysenter_setup();
6fe940d6 1110 enable_sep_cpu();
102bbe3a 1111#endif
5b556332 1112 cpu_detect_tlb(&boot_cpu_data);
a6c4e076 1113}
3b520b23 1114
148f9bb8 1115void identify_secondary_cpu(struct cpuinfo_x86 *c)
a6c4e076
JF
1116{
1117 BUG_ON(c == &boot_cpu_data);
1118 identify_cpu(c);
102bbe3a 1119#ifdef CONFIG_X86_32
a6c4e076 1120 enable_sep_cpu();
102bbe3a 1121#endif
a6c4e076 1122 mtrr_ap_init();
1da177e4
LT
1123}
1124
a0854a46 1125struct msr_range {
0f3fa48a
IM
1126 unsigned min;
1127 unsigned max;
a0854a46 1128};
1da177e4 1129
148f9bb8 1130static const struct msr_range msr_range_array[] = {
a0854a46
YL
1131 { 0x00000000, 0x00000418},
1132 { 0xc0000000, 0xc000040b},
1133 { 0xc0010000, 0xc0010142},
1134 { 0xc0011000, 0xc001103b},
1135};
1da177e4 1136
148f9bb8 1137static void __print_cpu_msr(void)
a0854a46 1138{
0f3fa48a 1139 unsigned index_min, index_max;
a0854a46
YL
1140 unsigned index;
1141 u64 val;
1142 int i;
a0854a46
YL
1143
1144 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
1145 index_min = msr_range_array[i].min;
1146 index_max = msr_range_array[i].max;
0f3fa48a 1147
a0854a46 1148 for (index = index_min; index < index_max; index++) {
ecd431d9 1149 if (rdmsrl_safe(index, &val))
a0854a46 1150 continue;
1b74dde7 1151 pr_info(" MSR%08x: %016llx\n", index, val);
1da177e4 1152 }
a0854a46
YL
1153 }
1154}
94605eff 1155
148f9bb8 1156static int show_msr;
0f3fa48a 1157
a0854a46
YL
1158static __init int setup_show_msr(char *arg)
1159{
1160 int num;
3dd9d514 1161
a0854a46 1162 get_option(&arg, &num);
3dd9d514 1163
a0854a46
YL
1164 if (num > 0)
1165 show_msr = num;
1166 return 1;
1da177e4 1167}
a0854a46 1168__setup("show_msr=", setup_show_msr);
1da177e4 1169
191679fd
AK
1170static __init int setup_noclflush(char *arg)
1171{
840d2830 1172 setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
da4aaa7d 1173 setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
191679fd
AK
1174 return 1;
1175}
1176__setup("noclflush", setup_noclflush);
1177
148f9bb8 1178void print_cpu_info(struct cpuinfo_x86 *c)
1da177e4 1179{
02dde8b4 1180 const char *vendor = NULL;
1da177e4 1181
0f3fa48a 1182 if (c->x86_vendor < X86_VENDOR_NUM) {
1da177e4 1183 vendor = this_cpu->c_vendor;
0f3fa48a
IM
1184 } else {
1185 if (c->cpuid_level >= 0)
1186 vendor = c->x86_vendor_id;
1187 }
1da177e4 1188
bd32a8cf 1189 if (vendor && !strstr(c->x86_model_id, vendor))
1b74dde7 1190 pr_cont("%s ", vendor);
1da177e4 1191
9d31d35b 1192 if (c->x86_model_id[0])
1b74dde7 1193 pr_cont("%s", c->x86_model_id);
1da177e4 1194 else
1b74dde7 1195 pr_cont("%d86", c->x86);
1da177e4 1196
1b74dde7 1197 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
924e101a 1198
34048c9e 1199 if (c->x86_mask || c->cpuid_level >= 0)
1b74dde7 1200 pr_cont(", stepping: 0x%x)\n", c->x86_mask);
1da177e4 1201 else
1b74dde7 1202 pr_cont(")\n");
a0854a46 1203
0b8b8078 1204 print_cpu_msr(c);
21c3fcf3
YL
1205}
1206
148f9bb8 1207void print_cpu_msr(struct cpuinfo_x86 *c)
21c3fcf3 1208{
a0854a46 1209 if (c->cpu_index < show_msr)
21c3fcf3 1210 __print_cpu_msr();
1da177e4
LT
1211}
1212
ac72e788
AK
1213static __init int setup_disablecpuid(char *arg)
1214{
1215 int bit;
0f3fa48a 1216
ac72e788
AK
1217 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1218 setup_clear_cpu_cap(bit);
1219 else
1220 return 0;
0f3fa48a 1221
ac72e788
AK
1222 return 1;
1223}
1224__setup("clearcpuid=", setup_disablecpuid);
1225
d5494d4f 1226#ifdef CONFIG_X86_64
9ff80942 1227struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
629f4f9d
SA
1228struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
1229 (unsigned long) debug_idt_table };
d5494d4f 1230
947e76cd 1231DEFINE_PER_CPU_FIRST(union irq_stack_union,
277d5b40 1232 irq_stack_union) __aligned(PAGE_SIZE) __visible;
0f3fa48a 1233
bdf977b3 1234/*
a7fcf28d
AL
1235 * The following percpu variables are hot. Align current_task to
1236 * cacheline size such that they fall in the same cacheline.
bdf977b3
TH
1237 */
1238DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1239 &init_task;
1240EXPORT_PER_CPU_SYMBOL(current_task);
d5494d4f 1241
bdf977b3
TH
1242DEFINE_PER_CPU(char *, irq_stack_ptr) =
1243 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1244
277d5b40 1245DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
d5494d4f 1246
c2daa3be
PZ
1247DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1248EXPORT_PER_CPU_SYMBOL(__preempt_count);
1249
0f3fa48a
IM
1250/*
1251 * Special IST stacks which the CPU switches to when it calls
1252 * an IST-marked descriptor entry. Up to 7 stacks (hardware
1253 * limit), all of them are 4K, except the debug stack which
1254 * is 8K.
1255 */
1256static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1257 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1258 [DEBUG_STACK - 1] = DEBUG_STKSZ
1259};
1260
92d65b23 1261static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
3e352aa8 1262 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
d5494d4f 1263
d5494d4f
YL
1264/* May not be marked __init: used by software suspend */
1265void syscall_init(void)
1da177e4 1266{
d5494d4f
YL
1267 /*
1268 * LSTAR and STAR live in a bit strange symbiosis.
1269 * They both write to the same internal register. STAR allows to
1270 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
1271 */
31ac34ca 1272 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
47edb651 1273 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
d56fe4bf
IM
1274
1275#ifdef CONFIG_IA32_EMULATION
47edb651 1276 wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
a76c7f46 1277 /*
487d1edb
DV
1278 * This only works on Intel CPUs.
1279 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
1280 * This does not cause SYSENTER to jump to the wrong location, because
1281 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
a76c7f46
DV
1282 */
1283 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1284 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
4c8cd0c5 1285 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
d56fe4bf 1286#else
47edb651 1287 wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
6b51311c 1288 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
d56fe4bf
IM
1289 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1290 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
d5494d4f 1291#endif
03ae5768 1292
d5494d4f
YL
1293 /* Flags to clear on syscall */
1294 wrmsrl(MSR_SYSCALL_MASK,
63bcff2a 1295 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
8c7aa698 1296 X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
1da177e4 1297}
62111195 1298
d5494d4f
YL
1299/*
1300 * Copies of the original ist values from the tss are only accessed during
1301 * debugging, no special alignment required.
1302 */
1303DEFINE_PER_CPU(struct orig_ist, orig_ist);
1304
228bdaa9 1305static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
42181186 1306DEFINE_PER_CPU(int, debug_stack_usage);
228bdaa9
SR
1307
1308int is_debug_stack(unsigned long addr)
1309{
89cbc767
CL
1310 return __this_cpu_read(debug_stack_usage) ||
1311 (addr <= __this_cpu_read(debug_stack_addr) &&
1312 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
228bdaa9 1313}
0f46efeb 1314NOKPROBE_SYMBOL(is_debug_stack);
228bdaa9 1315
629f4f9d 1316DEFINE_PER_CPU(u32, debug_idt_ctr);
f8988175 1317
228bdaa9
SR
1318void debug_stack_set_zero(void)
1319{
629f4f9d
SA
1320 this_cpu_inc(debug_idt_ctr);
1321 load_current_idt();
228bdaa9 1322}
0f46efeb 1323NOKPROBE_SYMBOL(debug_stack_set_zero);
228bdaa9
SR
1324
1325void debug_stack_reset(void)
1326{
629f4f9d 1327 if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
f8988175 1328 return;
629f4f9d
SA
1329 if (this_cpu_dec_return(debug_idt_ctr) == 0)
1330 load_current_idt();
228bdaa9 1331}
0f46efeb 1332NOKPROBE_SYMBOL(debug_stack_reset);
228bdaa9 1333
0f3fa48a 1334#else /* CONFIG_X86_64 */
d5494d4f 1335
bdf977b3
TH
1336DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1337EXPORT_PER_CPU_SYMBOL(current_task);
c2daa3be
PZ
1338DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1339EXPORT_PER_CPU_SYMBOL(__preempt_count);
bdf977b3 1340
a7fcf28d
AL
1341/*
1342 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
1343 * the top of the kernel stack. Use an extra percpu variable to track the
1344 * top of the kernel stack directly.
1345 */
1346DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1347 (unsigned long)&init_thread_union + THREAD_SIZE;
1348EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1349
60a5317f 1350#ifdef CONFIG_CC_STACKPROTECTOR
53f82452 1351DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
60a5317f 1352#endif
d5494d4f 1353
0f3fa48a 1354#endif /* CONFIG_X86_64 */
c5413fbe 1355
9766cdbc
JSR
1356/*
1357 * Clear all 6 debug registers:
1358 */
1359static void clear_all_debug_regs(void)
1360{
1361 int i;
1362
1363 for (i = 0; i < 8; i++) {
1364 /* Ignore db4, db5 */
1365 if ((i == 4) || (i == 5))
1366 continue;
1367
1368 set_debugreg(0, i);
1369 }
1370}
c5413fbe 1371
0bb9fef9
JW
1372#ifdef CONFIG_KGDB
1373/*
1374 * Restore debug regs if using kgdbwait and you have a kernel debugger
1375 * connection established.
1376 */
1377static void dbg_restore_debug_regs(void)
1378{
1379 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1380 arch_kgdb_ops.correct_hw_break();
1381}
1382#else /* ! CONFIG_KGDB */
1383#define dbg_restore_debug_regs()
1384#endif /* ! CONFIG_KGDB */
1385
ce4b1b16
IM
1386static void wait_for_master_cpu(int cpu)
1387{
1388#ifdef CONFIG_SMP
1389 /*
1390 * wait for ACK from master CPU before continuing
1391 * with AP initialization
1392 */
1393 WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1394 while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1395 cpu_relax();
1396#endif
1397}
1398
d2cbcc49
RR
1399/*
1400 * cpu_init() initializes state that is per-CPU. Some data is already
1401 * initialized (naturally) in the bootstrap process, such as the GDT
1402 * and IDT. We reload them nevertheless, this function acts as a
1403 * 'CPU state barrier', nothing should get across.
1ba76586 1404 * A lot of state is already set up in PDA init for 64 bit
d2cbcc49 1405 */
1ba76586 1406#ifdef CONFIG_X86_64
0f3fa48a 1407
148f9bb8 1408void cpu_init(void)
1ba76586 1409{
0fe1e009 1410 struct orig_ist *oist;
1ba76586 1411 struct task_struct *me;
0f3fa48a
IM
1412 struct tss_struct *t;
1413 unsigned long v;
ce4b1b16 1414 int cpu = stack_smp_processor_id();
1ba76586
YL
1415 int i;
1416
ce4b1b16
IM
1417 wait_for_master_cpu(cpu);
1418
1e02ce4c
AL
1419 /*
1420 * Initialize the CR4 shadow before doing anything that could
1421 * try to read it.
1422 */
1423 cr4_init_shadow();
1424
e6ebf5de
FY
1425 /*
1426 * Load microcode on this cpu if a valid microcode is available.
1427 * This is early microcode loading procedure.
1428 */
1429 load_ucode_ap();
1430
24933b82 1431 t = &per_cpu(cpu_tss, cpu);
0fe1e009 1432 oist = &per_cpu(orig_ist, cpu);
0f3fa48a 1433
e7a22c1e 1434#ifdef CONFIG_NUMA
27fd185f 1435 if (this_cpu_read(numa_node) == 0 &&
e534c7c5
LS
1436 early_cpu_to_node(cpu) != NUMA_NO_NODE)
1437 set_numa_node(early_cpu_to_node(cpu));
e7a22c1e 1438#endif
1ba76586
YL
1439
1440 me = current;
1441
2eaad1fd 1442 pr_debug("Initializing CPU#%d\n", cpu);
1ba76586 1443
375074cc 1444 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1ba76586
YL
1445
1446 /*
1447 * Initialize the per-CPU GDT with the boot GDT,
1448 * and set up the GDT descriptor:
1449 */
1450
552be871 1451 switch_to_new_gdt(cpu);
2697fbd5
BG
1452 loadsegment(fs, 0);
1453
cf910e83 1454 load_current_idt();
1ba76586
YL
1455
1456 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1457 syscall_init();
1458
1459 wrmsrl(MSR_FS_BASE, 0);
1460 wrmsrl(MSR_KERNEL_GS_BASE, 0);
1461 barrier();
1462
4763ed4d 1463 x86_configure_nx();
659006bf 1464 x2apic_setup();
1ba76586
YL
1465
1466 /*
1467 * set up and load the per-CPU TSS
1468 */
0fe1e009 1469 if (!oist->ist[0]) {
92d65b23 1470 char *estacks = per_cpu(exception_stacks, cpu);
0f3fa48a 1471
1ba76586 1472 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
0f3fa48a 1473 estacks += exception_stack_sizes[v];
0fe1e009 1474 oist->ist[v] = t->x86_tss.ist[v] =
1ba76586 1475 (unsigned long)estacks;
228bdaa9
SR
1476 if (v == DEBUG_STACK-1)
1477 per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
1ba76586
YL
1478 }
1479 }
1480
1481 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
0f3fa48a 1482
1ba76586
YL
1483 /*
1484 * <= is required because the CPU will access up to
1485 * 8 bits beyond the end of the IO permission bitmap.
1486 */
1487 for (i = 0; i <= IO_BITMAP_LONGS; i++)
1488 t->io_bitmap[i] = ~0UL;
1489
1490 atomic_inc(&init_mm.mm_count);
1491 me->active_mm = &init_mm;
8c5dfd25 1492 BUG_ON(me->mm);
1ba76586
YL
1493 enter_lazy_tlb(&init_mm, me);
1494
1495 load_sp0(t, &current->thread);
1496 set_tss_desc(cpu, t);
1497 load_TR_desc();
37868fe1 1498 load_mm_ldt(&init_mm);
1ba76586 1499
0bb9fef9
JW
1500 clear_all_debug_regs();
1501 dbg_restore_debug_regs();
1ba76586 1502
21c4cd10 1503 fpu__init_cpu();
1ba76586 1504
1ba76586
YL
1505 if (is_uv_system())
1506 uv_cpu_init();
1507}
1508
1509#else
1510
148f9bb8 1511void cpu_init(void)
9ee79a3d 1512{
d2cbcc49
RR
1513 int cpu = smp_processor_id();
1514 struct task_struct *curr = current;
24933b82 1515 struct tss_struct *t = &per_cpu(cpu_tss, cpu);
9ee79a3d 1516 struct thread_struct *thread = &curr->thread;
62111195 1517
ce4b1b16 1518 wait_for_master_cpu(cpu);
e6ebf5de 1519
5b2bdbc8
SR
1520 /*
1521 * Initialize the CR4 shadow before doing anything that could
1522 * try to read it.
1523 */
1524 cr4_init_shadow();
1525
ce4b1b16 1526 show_ucode_info_early();
62111195 1527
1b74dde7 1528 pr_info("Initializing CPU#%d\n", cpu);
62111195 1529
362f924b
BP
1530 if (cpu_feature_enabled(X86_FEATURE_VME) ||
1531 cpu_has_tsc ||
1532 boot_cpu_has(X86_FEATURE_DE))
375074cc 1533 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
62111195 1534
cf910e83 1535 load_current_idt();
552be871 1536 switch_to_new_gdt(cpu);
1da177e4 1537
1da177e4
LT
1538 /*
1539 * Set up and load the per-CPU TSS and LDT
1540 */
1541 atomic_inc(&init_mm.mm_count);
62111195 1542 curr->active_mm = &init_mm;
8c5dfd25 1543 BUG_ON(curr->mm);
62111195 1544 enter_lazy_tlb(&init_mm, curr);
1da177e4 1545
faca6227 1546 load_sp0(t, thread);
34048c9e 1547 set_tss_desc(cpu, t);
1da177e4 1548 load_TR_desc();
37868fe1 1549 load_mm_ldt(&init_mm);
1da177e4 1550
f9a196b8
TG
1551 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1552
22c4e308 1553#ifdef CONFIG_DOUBLEFAULT
1da177e4
LT
1554 /* Set up doublefault TSS pointer in the GDT */
1555 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
22c4e308 1556#endif
1da177e4 1557
9766cdbc 1558 clear_all_debug_regs();
0bb9fef9 1559 dbg_restore_debug_regs();
1da177e4 1560
21c4cd10 1561 fpu__init_cpu();
1da177e4 1562}
1ba76586 1563#endif
5700f743 1564
b51ef52d
LA
1565static void bsp_resume(void)
1566{
1567 if (this_cpu->c_bsp_resume)
1568 this_cpu->c_bsp_resume(&boot_cpu_data);
1569}
1570
1571static struct syscore_ops cpu_syscore_ops = {
1572 .resume = bsp_resume,
1573};
1574
1575static int __init init_cpu_syscore(void)
1576{
1577 register_syscore_ops(&cpu_syscore_ops);
1578 return 0;
1579}
1580core_initcall(init_cpu_syscore);