]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kernel/cpu/common.c
x86/entry: Clear X86_FEATURE_SMAP when CONFIG_X86_SMAP=n
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / cpu / common.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
2458e53f
KS
2/* cpu_feature_enabled() cannot be used this early */
3#define USE_EARLY_PGTABLE_L5
4
57c8a661 5#include <linux/memblock.h>
9766cdbc 6#include <linux/linkage.h>
f0fc4aff 7#include <linux/bitops.h>
9766cdbc 8#include <linux/kernel.h>
186f4360 9#include <linux/export.h>
9766cdbc
JSR
10#include <linux/percpu.h>
11#include <linux/string.h>
ee098e1a 12#include <linux/ctype.h>
1da177e4 13#include <linux/delay.h>
68e21be2 14#include <linux/sched/mm.h>
e6017571 15#include <linux/sched/clock.h>
9164bb4a 16#include <linux/sched/task.h>
b47a3698 17#include <linux/sched/smt.h>
9766cdbc 18#include <linux/init.h>
0f46efeb 19#include <linux/kprobes.h>
9766cdbc 20#include <linux/kgdb.h>
1da177e4 21#include <linux/smp.h>
9766cdbc 22#include <linux/io.h>
b51ef52d 23#include <linux/syscore_ops.h>
65fddcfc 24#include <linux/pgtable.h>
9766cdbc 25
1ef5423a 26#include <asm/cmdline.h>
9766cdbc 27#include <asm/stackprotector.h>
cdd6c482 28#include <asm/perf_event.h>
1da177e4 29#include <asm/mmu_context.h>
dc4e0021 30#include <asm/doublefault.h>
49d859d7 31#include <asm/archrandom.h>
9766cdbc
JSR
32#include <asm/hypervisor.h>
33#include <asm/processor.h>
1e02ce4c 34#include <asm/tlbflush.h>
f649e938 35#include <asm/debugreg.h>
9766cdbc 36#include <asm/sections.h>
f40c3300 37#include <asm/vsyscall.h>
8bdbd962
AC
38#include <linux/topology.h>
39#include <linux/cpumask.h>
60063497 40#include <linux/atomic.h>
9766cdbc
JSR
41#include <asm/proto.h>
42#include <asm/setup.h>
43#include <asm/apic.h>
44#include <asm/desc.h>
78f7f1e5 45#include <asm/fpu/internal.h>
27b07da7 46#include <asm/mtrr.h>
0274f955 47#include <asm/hwcap2.h>
8bdbd962 48#include <linux/numa.h>
0cd39f46 49#include <asm/numa.h>
9766cdbc 50#include <asm/asm.h>
0f6ff2bc 51#include <asm/bugs.h>
9766cdbc 52#include <asm/cpu.h>
a03a3e28 53#include <asm/mce.h>
9766cdbc 54#include <asm/msr.h>
eb243d1d 55#include <asm/memtype.h>
d288e1cf
FY
56#include <asm/microcode.h>
57#include <asm/microcode_intel.h>
fec9434a
DW
58#include <asm/intel-family.h>
59#include <asm/cpu_device_id.h>
bdbcdd48 60#include <asm/uv/uv.h>
939ef713 61#include <asm/sigframe.h>
1da177e4
LT
62
63#include "cpu.h"
64
0274f955
GA
65u32 elf_hwcap2 __read_mostly;
66
c2d1cec1 67/* all of these masks are initialized in setup_cpu_local_masks() */
c2d1cec1 68cpumask_var_t cpu_initialized_mask;
9766cdbc
JSR
69cpumask_var_t cpu_callout_mask;
70cpumask_var_t cpu_callin_mask;
c2d1cec1
MT
71
72/* representing cpus for which sibling maps can be computed */
73cpumask_var_t cpu_sibling_setup_mask;
74
f8b64d08
BP
75/* Number of siblings per CPU package */
76int smp_num_siblings = 1;
77EXPORT_SYMBOL(smp_num_siblings);
78
79/* Last level cache ID of each logical CPU */
80DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
81
9164d949
KP
82u16 get_llc_id(unsigned int cpu)
83{
84 return per_cpu(cpu_llc_id, cpu);
85}
86EXPORT_SYMBOL_GPL(get_llc_id);
87
2f2f52ba 88/* correctly size the local cpu masks */
4369f1fb 89void __init setup_cpu_local_masks(void)
2f2f52ba
BG
90{
91 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
92 alloc_bootmem_cpumask_var(&cpu_callin_mask);
93 alloc_bootmem_cpumask_var(&cpu_callout_mask);
94 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
95}
96
148f9bb8 97static void default_init(struct cpuinfo_x86 *c)
e8055139
OZ
98{
99#ifdef CONFIG_X86_64
27c13ece 100 cpu_detect_cache_sizes(c);
e8055139
OZ
101#else
102 /* Not much we can do here... */
103 /* Check if at least it has cpuid */
104 if (c->cpuid_level == -1) {
105 /* No cpuid. It must be an ancient CPU */
106 if (c->x86 == 4)
107 strcpy(c->x86_model_id, "486");
108 else if (c->x86 == 3)
109 strcpy(c->x86_model_id, "386");
110 }
111#endif
112}
113
148f9bb8 114static const struct cpu_dev default_cpu = {
e8055139
OZ
115 .c_init = default_init,
116 .c_vendor = "Unknown",
117 .c_x86_vendor = X86_VENDOR_UNKNOWN,
118};
119
148f9bb8 120static const struct cpu_dev *this_cpu = &default_cpu;
0a488a53 121
06deef89 122DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
950ad7ff 123#ifdef CONFIG_X86_64
06deef89
BG
124 /*
125 * We need valid kernel segments for data and code in long mode too
126 * IRET will check the segment types kkeil 2000/10/28
127 * Also sysret mandates a special GDT layout
128 *
9766cdbc 129 * TLS descriptors are currently at a different place compared to i386.
06deef89
BG
130 * Hopefully nobody expects them at a fixed place (Wine?)
131 */
1e5de182
AM
132 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
133 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
134 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
135 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
136 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
137 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
950ad7ff 138#else
1e5de182
AM
139 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
140 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
141 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
142 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
bf504672
RR
143 /*
144 * Segments used for calling PnP BIOS have byte granularity.
145 * They code segments and data segments have fixed 64k limits,
146 * the transfer segment sizes are set at run time.
147 */
6842ef0e 148 /* 32-bit code */
1e5de182 149 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
6842ef0e 150 /* 16-bit code */
1e5de182 151 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
6842ef0e 152 /* 16-bit data */
1e5de182 153 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
6842ef0e 154 /* 16-bit data */
1e5de182 155 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
6842ef0e 156 /* 16-bit data */
1e5de182 157 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
bf504672
RR
158 /*
159 * The APM segments have byte granularity and their bases
160 * are set at run time. All have 64k limits.
161 */
6842ef0e 162 /* 32-bit code */
1e5de182 163 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
bf504672 164 /* 16-bit code */
1e5de182 165 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
6842ef0e 166 /* data */
72c4d853 167 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
bf504672 168
1e5de182
AM
169 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
170 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
950ad7ff 171#endif
06deef89 172} };
7a61d35d 173EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
ae1ee11b 174
0790c9aa 175#ifdef CONFIG_X86_64
c7ad5ad2 176static int __init x86_nopcid_setup(char *s)
0790c9aa 177{
c7ad5ad2
AL
178 /* nopcid doesn't accept parameters */
179 if (s)
180 return -EINVAL;
0790c9aa
AL
181
182 /* do not emit a message if the feature is not present */
183 if (!boot_cpu_has(X86_FEATURE_PCID))
c7ad5ad2 184 return 0;
0790c9aa
AL
185
186 setup_clear_cpu_cap(X86_FEATURE_PCID);
187 pr_info("nopcid: PCID feature disabled\n");
c7ad5ad2 188 return 0;
0790c9aa 189}
c7ad5ad2 190early_param("nopcid", x86_nopcid_setup);
0790c9aa
AL
191#endif
192
d12a72b8
AL
193static int __init x86_noinvpcid_setup(char *s)
194{
195 /* noinvpcid doesn't accept parameters */
196 if (s)
197 return -EINVAL;
198
199 /* do not emit a message if the feature is not present */
200 if (!boot_cpu_has(X86_FEATURE_INVPCID))
201 return 0;
202
203 setup_clear_cpu_cap(X86_FEATURE_INVPCID);
204 pr_info("noinvpcid: INVPCID feature disabled\n");
205 return 0;
206}
207early_param("noinvpcid", x86_noinvpcid_setup);
208
ba51dced 209#ifdef CONFIG_X86_32
148f9bb8
PG
210static int cachesize_override = -1;
211static int disable_x86_serial_nr = 1;
1da177e4 212
0a488a53
YL
213static int __init cachesize_setup(char *str)
214{
215 get_option(&str, &cachesize_override);
216 return 1;
217}
218__setup("cachesize=", cachesize_setup);
219
0a488a53
YL
220static int __init x86_sep_setup(char *s)
221{
222 setup_clear_cpu_cap(X86_FEATURE_SEP);
223 return 1;
224}
225__setup("nosep", x86_sep_setup);
226
227/* Standard macro to see if a specific flag is changeable */
228static inline int flag_is_changeable_p(u32 flag)
229{
230 u32 f1, f2;
231
94f6bac1
KH
232 /*
233 * Cyrix and IDT cpus allow disabling of CPUID
234 * so the code below may return different results
235 * when it is executed before and after enabling
236 * the CPUID. Add "volatile" to not allow gcc to
237 * optimize the subsequent calls to this function.
238 */
0f3fa48a
IM
239 asm volatile ("pushfl \n\t"
240 "pushfl \n\t"
241 "popl %0 \n\t"
242 "movl %0, %1 \n\t"
243 "xorl %2, %0 \n\t"
244 "pushl %0 \n\t"
245 "popfl \n\t"
246 "pushfl \n\t"
247 "popl %0 \n\t"
248 "popfl \n\t"
249
94f6bac1
KH
250 : "=&r" (f1), "=&r" (f2)
251 : "ir" (flag));
0a488a53
YL
252
253 return ((f1^f2) & flag) != 0;
254}
255
256/* Probe for the CPUID instruction */
148f9bb8 257int have_cpuid_p(void)
0a488a53
YL
258{
259 return flag_is_changeable_p(X86_EFLAGS_ID);
260}
261
148f9bb8 262static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
0a488a53 263{
0f3fa48a
IM
264 unsigned long lo, hi;
265
266 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
267 return;
268
269 /* Disable processor serial number: */
270
271 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
272 lo |= 0x200000;
273 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
274
1b74dde7 275 pr_notice("CPU serial number disabled.\n");
0f3fa48a
IM
276 clear_cpu_cap(c, X86_FEATURE_PN);
277
278 /* Disabling the serial number may affect the cpuid level */
279 c->cpuid_level = cpuid_eax(0);
0a488a53
YL
280}
281
282static int __init x86_serial_nr_setup(char *s)
283{
284 disable_x86_serial_nr = 0;
285 return 1;
286}
287__setup("serialnumber", x86_serial_nr_setup);
ba51dced 288#else
102bbe3a
YL
289static inline int flag_is_changeable_p(u32 flag)
290{
291 return 1;
292}
102bbe3a
YL
293static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
294{
295}
ba51dced 296#endif
0a488a53 297
de5397ad
FY
298static __init int setup_disable_smep(char *arg)
299{
b2cc2a07 300 setup_clear_cpu_cap(X86_FEATURE_SMEP);
de5397ad
FY
301 return 1;
302}
303__setup("nosmep", setup_disable_smep);
304
b2cc2a07 305static __always_inline void setup_smep(struct cpuinfo_x86 *c)
de5397ad 306{
b2cc2a07 307 if (cpu_has(c, X86_FEATURE_SMEP))
375074cc 308 cr4_set_bits(X86_CR4_SMEP);
de5397ad
FY
309}
310
52b6179a
PA
311static __init int setup_disable_smap(char *arg)
312{
b2cc2a07 313 setup_clear_cpu_cap(X86_FEATURE_SMAP);
52b6179a
PA
314 return 1;
315}
316__setup("nosmap", setup_disable_smap);
317
b2cc2a07
PA
318static __always_inline void setup_smap(struct cpuinfo_x86 *c)
319{
581b7f15 320 unsigned long eflags = native_save_fl();
b2cc2a07
PA
321
322 /* This should have been cleared long ago */
b2cc2a07
PA
323 BUG_ON(eflags & X86_EFLAGS_AC);
324
03bbd596
PA
325 if (cpu_has(c, X86_FEATURE_SMAP)) {
326#ifdef CONFIG_X86_SMAP
375074cc 327 cr4_set_bits(X86_CR4_SMAP);
03bbd596 328#else
3958b9c3 329 clear_cpu_cap(c, X86_FEATURE_SMAP);
375074cc 330 cr4_clear_bits(X86_CR4_SMAP);
03bbd596
PA
331#endif
332 }
de5397ad
FY
333}
334
aa35f896
RN
335static __always_inline void setup_umip(struct cpuinfo_x86 *c)
336{
337 /* Check the boot processor, plus build option for UMIP. */
338 if (!cpu_feature_enabled(X86_FEATURE_UMIP))
339 goto out;
340
341 /* Check the current processor's cpuid bits. */
342 if (!cpu_has(c, X86_FEATURE_UMIP))
343 goto out;
344
345 cr4_set_bits(X86_CR4_UMIP);
346
438cbf88 347 pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
770c7755 348
aa35f896
RN
349 return;
350
351out:
352 /*
353 * Make sure UMIP is disabled in case it was enabled in a
354 * previous boot (e.g., via kexec).
355 */
356 cr4_clear_bits(X86_CR4_UMIP);
357}
358
a13b9d0b
KC
359/* These bits should not change their value after CPU init is finished. */
360static const unsigned long cr4_pinned_mask =
361 X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
7652ac92
TG
362static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
363static unsigned long cr4_pinned_bits __ro_after_init;
364
365void native_write_cr0(unsigned long val)
366{
367 unsigned long bits_missing = 0;
368
369set_register:
aa5cacdc 370 asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
7652ac92
TG
371
372 if (static_branch_likely(&cr_pinning)) {
373 if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
374 bits_missing = X86_CR0_WP;
375 val |= bits_missing;
376 goto set_register;
377 }
378 /* Warn after we've set the missing bits. */
379 WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
380 }
381}
382EXPORT_SYMBOL(native_write_cr0);
383
384void native_write_cr4(unsigned long val)
385{
a13b9d0b 386 unsigned long bits_changed = 0;
7652ac92
TG
387
388set_register:
aa5cacdc 389 asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
7652ac92
TG
390
391 if (static_branch_likely(&cr_pinning)) {
a13b9d0b
KC
392 if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
393 bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
394 val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
7652ac92
TG
395 goto set_register;
396 }
a13b9d0b
KC
397 /* Warn after we've corrected the changed bits. */
398 WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
399 bits_changed);
7652ac92
TG
400 }
401}
21953ee5 402#if IS_MODULE(CONFIG_LKDTM)
d8f0b353 403EXPORT_SYMBOL_GPL(native_write_cr4);
21953ee5 404#endif
d8f0b353
TG
405
406void cr4_update_irqsoff(unsigned long set, unsigned long clear)
407{
408 unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
409
410 lockdep_assert_irqs_disabled();
411
412 newval = (cr4 & ~clear) | set;
413 if (newval != cr4) {
414 this_cpu_write(cpu_tlbstate.cr4, newval);
415 __write_cr4(newval);
416 }
417}
418EXPORT_SYMBOL(cr4_update_irqsoff);
419
420/* Read the CR4 shadow. */
421unsigned long cr4_read_shadow(void)
422{
423 return this_cpu_read(cpu_tlbstate.cr4);
424}
425EXPORT_SYMBOL_GPL(cr4_read_shadow);
7652ac92
TG
426
427void cr4_init(void)
428{
429 unsigned long cr4 = __read_cr4();
430
431 if (boot_cpu_has(X86_FEATURE_PCID))
432 cr4 |= X86_CR4_PCIDE;
433 if (static_branch_likely(&cr_pinning))
a13b9d0b 434 cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
7652ac92
TG
435
436 __write_cr4(cr4);
437
438 /* Initialize cr4 shadow for this CPU. */
439 this_cpu_write(cpu_tlbstate.cr4, cr4);
440}
873d50d5
KC
441
442/*
443 * Once CPU feature detection is finished (and boot params have been
444 * parsed), record any of the sensitive CR bits that are set, and
445 * enable CR pinning.
446 */
447static void __init setup_cr_pinning(void)
448{
a13b9d0b 449 cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
873d50d5
KC
450 static_key_enable(&cr_pinning.key);
451}
452
b745cfba 453static __init int x86_nofsgsbase_setup(char *arg)
dd649bd0 454{
b745cfba
AL
455 /* Require an exact match without trailing characters. */
456 if (strlen(arg))
457 return 0;
458
459 /* Do not emit a message if the feature is not present. */
460 if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
461 return 1;
462
463 setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
464 pr_info("FSGSBASE disabled via kernel command line\n");
dd649bd0
AL
465 return 1;
466}
b745cfba 467__setup("nofsgsbase", x86_nofsgsbase_setup);
dd649bd0 468
06976945
DH
469/*
470 * Protection Keys are not available in 32-bit mode.
471 */
472static bool pku_disabled;
473
474static __always_inline void setup_pku(struct cpuinfo_x86 *c)
475{
8a1dc55a
TG
476 if (c == &boot_cpu_data) {
477 if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
478 return;
479 /*
480 * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
481 * bit to be set. Enforce it.
482 */
483 setup_force_cpu_cap(X86_FEATURE_OSPKE);
a5eff725 484
8a1dc55a 485 } else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
06976945 486 return;
8a1dc55a 487 }
06976945
DH
488
489 cr4_set_bits(X86_CR4_PKE);
fa8c84b7
TG
490 /* Load the default PKRU value */
491 pkru_write_default();
06976945
DH
492}
493
494#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
495static __init int setup_disable_pku(char *arg)
496{
497 /*
498 * Do not clear the X86_FEATURE_PKU bit. All of the
499 * runtime checks are against OSPKE so clearing the
500 * bit does nothing.
501 *
502 * This way, we will see "pku" in cpuinfo, but not
503 * "ospke", which is exactly what we want. It shows
504 * that the CPU has PKU, but the OS has not enabled it.
505 * This happens to be exactly how a system would look
506 * if we disabled the config option.
507 */
508 pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
509 pku_disabled = true;
510 return 1;
511}
512__setup("nopku", setup_disable_pku);
513#endif /* CONFIG_X86_64 */
514
b38b0665
PA
515/*
516 * Some CPU features depend on higher CPUID levels, which may not always
517 * be available due to CPUID level capping or broken virtualization
518 * software. Add those features to this table to auto-disable them.
519 */
520struct cpuid_dependent_feature {
521 u32 feature;
522 u32 level;
523};
0f3fa48a 524
148f9bb8 525static const struct cpuid_dependent_feature
b38b0665
PA
526cpuid_dependent_features[] = {
527 { X86_FEATURE_MWAIT, 0x00000005 },
528 { X86_FEATURE_DCA, 0x00000009 },
529 { X86_FEATURE_XSAVE, 0x0000000d },
530 { 0, 0 }
531};
532
148f9bb8 533static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
b38b0665
PA
534{
535 const struct cpuid_dependent_feature *df;
9766cdbc 536
b38b0665 537 for (df = cpuid_dependent_features; df->feature; df++) {
0f3fa48a
IM
538
539 if (!cpu_has(c, df->feature))
540 continue;
b38b0665
PA
541 /*
542 * Note: cpuid_level is set to -1 if unavailable, but
543 * extended_extended_level is set to 0 if unavailable
544 * and the legitimate extended levels are all negative
545 * when signed; hence the weird messing around with
546 * signs here...
547 */
0f3fa48a 548 if (!((s32)df->level < 0 ?
f6db44df 549 (u32)df->level > (u32)c->extended_cpuid_level :
0f3fa48a
IM
550 (s32)df->level > (s32)c->cpuid_level))
551 continue;
552
553 clear_cpu_cap(c, df->feature);
554 if (!warn)
555 continue;
556
1b74dde7
CY
557 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
558 x86_cap_flag(df->feature), df->level);
b38b0665 559 }
f6db44df 560}
b38b0665 561
102bbe3a
YL
562/*
563 * Naming convention should be: <Name> [(<Codename>)]
564 * This table only is used unless init_<vendor>() below doesn't set it;
0f3fa48a
IM
565 * in particular, if CPUID levels 0x80000002..4 are supported, this
566 * isn't used
102bbe3a
YL
567 */
568
569/* Look up CPU names by table lookup. */
148f9bb8 570static const char *table_lookup_model(struct cpuinfo_x86 *c)
102bbe3a 571{
09dc68d9
JB
572#ifdef CONFIG_X86_32
573 const struct legacy_cpu_model_info *info;
102bbe3a
YL
574
575 if (c->x86_model >= 16)
576 return NULL; /* Range check */
577
578 if (!this_cpu)
579 return NULL;
580
09dc68d9 581 info = this_cpu->legacy_models;
102bbe3a 582
09dc68d9 583 while (info->family) {
102bbe3a
YL
584 if (info->family == c->x86)
585 return info->model_names[c->x86_model];
586 info++;
587 }
09dc68d9 588#endif
102bbe3a
YL
589 return NULL; /* Not found */
590}
591
f6a892dd
FY
592/* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
593__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
594__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
7d851c8d 595
11e3a840
JF
596void load_percpu_segment(int cpu)
597{
598#ifdef CONFIG_X86_32
599 loadsegment(fs, __KERNEL_PERCPU);
600#else
45e876f7 601 __loadsegment_simple(gs, 0);
35060ed6 602 wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
11e3a840
JF
603#endif
604}
605
72f5e08d
AL
606#ifdef CONFIG_X86_32
607/* The 32-bit entry code needs to find cpu_entry_area. */
608DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
609#endif
610
45fc8757
TG
611/* Load the original GDT from the per-cpu structure */
612void load_direct_gdt(int cpu)
613{
614 struct desc_ptr gdt_descr;
615
616 gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
617 gdt_descr.size = GDT_SIZE - 1;
618 load_gdt(&gdt_descr);
619}
620EXPORT_SYMBOL_GPL(load_direct_gdt);
621
69218e47
TG
622/* Load a fixmap remapping of the per-cpu GDT */
623void load_fixmap_gdt(int cpu)
624{
625 struct desc_ptr gdt_descr;
626
627 gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
628 gdt_descr.size = GDT_SIZE - 1;
629 load_gdt(&gdt_descr);
630}
45fc8757 631EXPORT_SYMBOL_GPL(load_fixmap_gdt);
69218e47 632
0f3fa48a
IM
633/*
634 * Current gdt points %fs at the "master" per-cpu area: after this,
635 * it's on the real one.
636 */
552be871 637void switch_to_new_gdt(int cpu)
9d31d35b 638{
45fc8757
TG
639 /* Load the original GDT */
640 load_direct_gdt(cpu);
2697fbd5 641 /* Reload the per-cpu base */
11e3a840 642 load_percpu_segment(cpu);
9d31d35b
YL
643}
644
148f9bb8 645static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
1da177e4 646
148f9bb8 647static void get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
648{
649 unsigned int *v;
ee098e1a 650 char *p, *q, *s;
1da177e4 651
3da99c97 652 if (c->extended_cpuid_level < 0x80000004)
1b05d60d 653 return;
1da177e4 654
0f3fa48a 655 v = (unsigned int *)c->x86_model_id;
1da177e4
LT
656 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
657 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
658 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
659 c->x86_model_id[48] = 0;
660
ee098e1a
BP
661 /* Trim whitespace */
662 p = q = s = &c->x86_model_id[0];
663
664 while (*p == ' ')
665 p++;
666
667 while (*p) {
668 /* Note the last non-whitespace index */
669 if (!isspace(*p))
670 s = q;
671
672 *q++ = *p++;
673 }
674
675 *(s + 1) = '\0';
1da177e4
LT
676}
677
9305bd6c 678void detect_num_cpu_cores(struct cpuinfo_x86 *c)
2cc61be6
DW
679{
680 unsigned int eax, ebx, ecx, edx;
681
9305bd6c 682 c->x86_max_cores = 1;
2cc61be6 683 if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
9305bd6c 684 return;
2cc61be6
DW
685
686 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
687 if (eax & 0x1f)
9305bd6c 688 c->x86_max_cores = (eax >> 26) + 1;
2cc61be6
DW
689}
690
148f9bb8 691void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
1da177e4 692{
9d31d35b 693 unsigned int n, dummy, ebx, ecx, edx, l2size;
1da177e4 694
3da99c97 695 n = c->extended_cpuid_level;
1da177e4
LT
696
697 if (n >= 0x80000005) {
9d31d35b 698 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
9d31d35b 699 c->x86_cache_size = (ecx>>24) + (edx>>24);
140fc727
YL
700#ifdef CONFIG_X86_64
701 /* On K8 L1 TLB is inclusive, so don't count it */
702 c->x86_tlbsize = 0;
703#endif
1da177e4
LT
704 }
705
706 if (n < 0x80000006) /* Some chips just has a large L1. */
707 return;
708
0a488a53 709 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1da177e4 710 l2size = ecx >> 16;
34048c9e 711
140fc727
YL
712#ifdef CONFIG_X86_64
713 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
714#else
1da177e4 715 /* do processor-specific cache resizing */
09dc68d9
JB
716 if (this_cpu->legacy_cache_size)
717 l2size = this_cpu->legacy_cache_size(c, l2size);
1da177e4
LT
718
719 /* Allow user to override all this if necessary. */
720 if (cachesize_override != -1)
721 l2size = cachesize_override;
722
34048c9e 723 if (l2size == 0)
1da177e4 724 return; /* Again, no L2 cache is possible */
140fc727 725#endif
1da177e4
LT
726
727 c->x86_cache_size = l2size;
1da177e4
LT
728}
729
e0ba94f1
AS
730u16 __read_mostly tlb_lli_4k[NR_INFO];
731u16 __read_mostly tlb_lli_2m[NR_INFO];
732u16 __read_mostly tlb_lli_4m[NR_INFO];
733u16 __read_mostly tlb_lld_4k[NR_INFO];
734u16 __read_mostly tlb_lld_2m[NR_INFO];
735u16 __read_mostly tlb_lld_4m[NR_INFO];
dd360393 736u16 __read_mostly tlb_lld_1g[NR_INFO];
e0ba94f1 737
f94fe119 738static void cpu_detect_tlb(struct cpuinfo_x86 *c)
e0ba94f1
AS
739{
740 if (this_cpu->c_detect_tlb)
741 this_cpu->c_detect_tlb(c);
742
f94fe119 743 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
e0ba94f1 744 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
f94fe119
SH
745 tlb_lli_4m[ENTRIES]);
746
747 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
748 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
749 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
e0ba94f1
AS
750}
751
545401f4 752int detect_ht_early(struct cpuinfo_x86 *c)
1da177e4 753{
c8e56d20 754#ifdef CONFIG_SMP
0a488a53 755 u32 eax, ebx, ecx, edx;
1da177e4 756
0a488a53 757 if (!cpu_has(c, X86_FEATURE_HT))
545401f4 758 return -1;
1da177e4 759
0a488a53 760 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
545401f4 761 return -1;
1da177e4 762
1cd78776 763 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
545401f4 764 return -1;
1da177e4 765
0a488a53 766 cpuid(1, &eax, &ebx, &ecx, &edx);
1da177e4 767
9d31d35b 768 smp_num_siblings = (ebx & 0xff0000) >> 16;
545401f4 769 if (smp_num_siblings == 1)
1b74dde7 770 pr_info_once("CPU0: Hyper-Threading is disabled\n");
545401f4
TG
771#endif
772 return 0;
773}
9d31d35b 774
545401f4
TG
775void detect_ht(struct cpuinfo_x86 *c)
776{
777#ifdef CONFIG_SMP
778 int index_msb, core_bits;
55e6d279 779
545401f4 780 if (detect_ht_early(c) < 0)
55e6d279 781 return;
9d31d35b 782
0f3fa48a
IM
783 index_msb = get_count_order(smp_num_siblings);
784 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
9d31d35b 785
0f3fa48a 786 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
9d31d35b 787
0f3fa48a 788 index_msb = get_count_order(smp_num_siblings);
9d31d35b 789
0f3fa48a 790 core_bits = get_count_order(c->x86_max_cores);
9d31d35b 791
0f3fa48a
IM
792 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
793 ((1 << core_bits) - 1);
9d31d35b 794#endif
97e4db7c 795}
1da177e4 796
148f9bb8 797static void get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
798{
799 char *v = c->x86_vendor_id;
0f3fa48a 800 int i;
1da177e4
LT
801
802 for (i = 0; i < X86_VENDOR_NUM; i++) {
10a434fc
YL
803 if (!cpu_devs[i])
804 break;
805
806 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
807 (cpu_devs[i]->c_ident[1] &&
808 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
0f3fa48a 809
10a434fc
YL
810 this_cpu = cpu_devs[i];
811 c->x86_vendor = this_cpu->c_x86_vendor;
812 return;
1da177e4
LT
813 }
814 }
10a434fc 815
1b74dde7
CY
816 pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
817 "CPU: Your system may be unstable.\n", v);
10a434fc 818
fe38d855
CE
819 c->x86_vendor = X86_VENDOR_UNKNOWN;
820 this_cpu = &default_cpu;
1da177e4
LT
821}
822
148f9bb8 823void cpu_detect(struct cpuinfo_x86 *c)
1da177e4 824{
1da177e4 825 /* Get vendor name */
4a148513
HH
826 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
827 (unsigned int *)&c->x86_vendor_id[0],
828 (unsigned int *)&c->x86_vendor_id[8],
829 (unsigned int *)&c->x86_vendor_id[4]);
1da177e4 830
1da177e4 831 c->x86 = 4;
9d31d35b 832 /* Intel-defined flags: level 0x00000001 */
1da177e4
LT
833 if (c->cpuid_level >= 0x00000001) {
834 u32 junk, tfms, cap0, misc;
0f3fa48a 835
1da177e4 836 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
99f925ce
BP
837 c->x86 = x86_family(tfms);
838 c->x86_model = x86_model(tfms);
b399151c 839 c->x86_stepping = x86_stepping(tfms);
0f3fa48a 840
d4387bd3 841 if (cap0 & (1<<19)) {
d4387bd3 842 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
9d31d35b 843 c->x86_cache_alignment = c->x86_clflush_size;
d4387bd3 844 }
1da177e4 845 }
1da177e4 846}
3da99c97 847
8bf1ebca
AL
848static void apply_forced_caps(struct cpuinfo_x86 *c)
849{
850 int i;
851
6cbd2171 852 for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
8bf1ebca
AL
853 c->x86_capability[i] &= ~cpu_caps_cleared[i];
854 c->x86_capability[i] |= cpu_caps_set[i];
855 }
856}
857
7fcae111
DW
858static void init_speculation_control(struct cpuinfo_x86 *c)
859{
860 /*
861 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
862 * and they also have a different bit for STIBP support. Also,
863 * a hypervisor might have set the individual AMD bits even on
864 * Intel CPUs, for finer-grained selection of what's available.
7fcae111
DW
865 */
866 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
867 set_cpu_cap(c, X86_FEATURE_IBRS);
868 set_cpu_cap(c, X86_FEATURE_IBPB);
7eb8956a 869 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
7fcae111 870 }
e7c587da 871
7fcae111
DW
872 if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
873 set_cpu_cap(c, X86_FEATURE_STIBP);
e7c587da 874
bc226f07
TL
875 if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
876 cpu_has(c, X86_FEATURE_VIRT_SSBD))
52817587
TG
877 set_cpu_cap(c, X86_FEATURE_SSBD);
878
7eb8956a 879 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
e7c587da 880 set_cpu_cap(c, X86_FEATURE_IBRS);
7eb8956a
TG
881 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
882 }
e7c587da
BP
883
884 if (cpu_has(c, X86_FEATURE_AMD_IBPB))
885 set_cpu_cap(c, X86_FEATURE_IBPB);
886
7eb8956a 887 if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
e7c587da 888 set_cpu_cap(c, X86_FEATURE_STIBP);
7eb8956a
TG
889 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
890 }
6ac2f49e
KRW
891
892 if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
893 set_cpu_cap(c, X86_FEATURE_SSBD);
894 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
895 clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
896 }
7fcae111
DW
897}
898
148f9bb8 899void get_cpu_cap(struct cpuinfo_x86 *c)
093af8d7 900{
39c06df4 901 u32 eax, ebx, ecx, edx;
093af8d7 902
3da99c97
YL
903 /* Intel-defined flags: level 0x00000001 */
904 if (c->cpuid_level >= 0x00000001) {
39c06df4 905 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
0f3fa48a 906
39c06df4
BP
907 c->x86_capability[CPUID_1_ECX] = ecx;
908 c->x86_capability[CPUID_1_EDX] = edx;
3da99c97 909 }
093af8d7 910
3df8d920
AL
911 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */
912 if (c->cpuid_level >= 0x00000006)
913 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
914
bdc802dc
PA
915 /* Additional Intel-defined flags: level 0x00000007 */
916 if (c->cpuid_level >= 0x00000007) {
bdc802dc 917 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
39c06df4 918 c->x86_capability[CPUID_7_0_EBX] = ebx;
dfb4a70f 919 c->x86_capability[CPUID_7_ECX] = ecx;
95ca0ee8 920 c->x86_capability[CPUID_7_EDX] = edx;
b302e4b1
FY
921
922 /* Check valid sub-leaf index before accessing it */
923 if (eax >= 1) {
924 cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
925 c->x86_capability[CPUID_7_1_EAX] = eax;
926 }
bdc802dc
PA
927 }
928
6229ad27
FY
929 /* Extended state features: level 0x0000000d */
930 if (c->cpuid_level >= 0x0000000d) {
6229ad27
FY
931 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
932
39c06df4 933 c->x86_capability[CPUID_D_1_EAX] = eax;
6229ad27
FY
934 }
935
3da99c97 936 /* AMD-defined flags: level 0x80000001 */
39c06df4
BP
937 eax = cpuid_eax(0x80000000);
938 c->extended_cpuid_level = eax;
939
940 if ((eax & 0xffff0000) == 0x80000000) {
941 if (eax >= 0x80000001) {
942 cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
0f3fa48a 943
39c06df4
BP
944 c->x86_capability[CPUID_8000_0001_ECX] = ecx;
945 c->x86_capability[CPUID_8000_0001_EDX] = edx;
093af8d7 946 }
093af8d7 947 }
093af8d7 948
71faad43
YG
949 if (c->extended_cpuid_level >= 0x80000007) {
950 cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
951
952 c->x86_capability[CPUID_8000_0007_EBX] = ebx;
953 c->x86_power = edx;
954 }
955
c65732e4
TG
956 if (c->extended_cpuid_level >= 0x80000008) {
957 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
958 c->x86_capability[CPUID_8000_0008_EBX] = ebx;
959 }
960
2ccd71f1 961 if (c->extended_cpuid_level >= 0x8000000a)
39c06df4 962 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
093af8d7 963
fb35d30f
SC
964 if (c->extended_cpuid_level >= 0x8000001f)
965 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
966
1dedefd1 967 init_scattered_cpuid_features(c);
7fcae111 968 init_speculation_control(c);
60d34501
AL
969
970 /*
971 * Clear/Set all flags overridden by options, after probe.
972 * This needs to happen each time we re-probe, which may happen
973 * several times during CPU initialization.
974 */
975 apply_forced_caps(c);
093af8d7 976}
1da177e4 977
405c018a 978void get_cpu_address_sizes(struct cpuinfo_x86 *c)
d94a155c
KS
979{
980 u32 eax, ebx, ecx, edx;
981
982 if (c->extended_cpuid_level >= 0x80000008) {
983 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
984
985 c->x86_virt_bits = (eax >> 8) & 0xff;
986 c->x86_phys_bits = eax & 0xff;
d94a155c
KS
987 }
988#ifdef CONFIG_X86_32
989 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
990 c->x86_phys_bits = 36;
991#endif
cc51e542 992 c->x86_cache_bits = c->x86_phys_bits;
d94a155c
KS
993}
994
148f9bb8 995static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
aef93c8b
YL
996{
997#ifdef CONFIG_X86_32
998 int i;
999
1000 /*
1001 * First of all, decide if this is a 486 or higher
1002 * It's a 486 if we can modify the AC flag
1003 */
1004 if (flag_is_changeable_p(X86_EFLAGS_AC))
1005 c->x86 = 4;
1006 else
1007 c->x86 = 3;
1008
1009 for (i = 0; i < X86_VENDOR_NUM; i++)
1010 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
1011 c->x86_vendor_id[0] = 0;
1012 cpu_devs[i]->c_identify(c);
1013 if (c->x86_vendor_id[0]) {
1014 get_cpu_vendor(c);
1015 break;
1016 }
1017 }
1018#endif
1019}
1020
db4d30fb
VT
1021#define NO_SPECULATION BIT(0)
1022#define NO_MELTDOWN BIT(1)
1023#define NO_SSB BIT(2)
1024#define NO_L1TF BIT(3)
1025#define NO_MDS BIT(4)
1026#define MSBDS_ONLY BIT(5)
1027#define NO_SWAPGS BIT(6)
1028#define NO_ITLB_MULTIHIT BIT(7)
1e41a766 1029#define NO_SPECTRE_V2 BIT(8)
36ad3513 1030
f6d502fc
TG
1031#define VULNWL(vendor, family, model, whitelist) \
1032 X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
36ad3513
TG
1033
1034#define VULNWL_INTEL(model, whitelist) \
1035 VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
1036
1037#define VULNWL_AMD(family, whitelist) \
1038 VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
1039
1040#define VULNWL_HYGON(family, whitelist) \
1041 VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
1042
1043static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1044 VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
1045 VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
1046 VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
1047 VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
1048
ed5194c2 1049 /* Intel Family 6 */
db4d30fb
VT
1050 VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
1051 VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
1052 VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
1053 VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
1054 VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
1055
1056 VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1057 VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1058 VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1059 VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1060 VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1061 VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
36ad3513
TG
1062
1063 VULNWL_INTEL(CORE_YONAH, NO_SSB),
1064
db4d30fb
VT
1065 VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1066 VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
36ad3513 1067
db4d30fb
VT
1068 VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1069 VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1070 VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
f36cf386
TG
1071
1072 /*
1073 * Technically, swapgs isn't serializing on AMD (despite it previously
1074 * being documented as such in the APM). But according to AMD, %gs is
1075 * updated non-speculatively, and the issuing of %gs-relative memory
1076 * operands will be blocked until the %gs update completes, which is
1077 * good enough for our purposes.
1078 */
ed5194c2 1079
cad14885
PG
1080 VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
1081
ed5194c2 1082 /* AMD Family 0xf - 0x12 */
db4d30fb
VT
1083 VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1084 VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1085 VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1086 VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
36ad3513
TG
1087
1088 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
db4d30fb
VT
1089 VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1090 VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1e41a766
TW
1091
1092 /* Zhaoxin Family 7 */
a84de2fa
TW
1093 VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
1094 VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
fec9434a
DW
1095 {}
1096};
1097
7e5b3c26
MG
1098#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
1099 X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
1100 INTEL_FAM6_##model, steppings, \
1101 X86_FEATURE_ANY, issues)
1102
1103#define SRBDS BIT(0)
1104
1105static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1106 VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
1107 VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
1108 VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
1109 VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
1110 VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
1111 VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
1112 VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS),
1113 VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS),
1114 VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS),
1115 VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS),
1116 {}
1117};
1118
93920f61 1119static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
36ad3513 1120{
93920f61 1121 const struct x86_cpu_id *m = x86_match_cpu(table);
c456442c 1122
36ad3513
TG
1123 return m && !!(m->driver_data & which);
1124}
17dbca11 1125
286836a7 1126u64 x86_read_arch_cap_msr(void)
fec9434a
DW
1127{
1128 u64 ia32_cap = 0;
1129
286836a7
PG
1130 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1131 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1132
1133 return ia32_cap;
1134}
1135
1136static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1137{
1138 u64 ia32_cap = x86_read_arch_cap_msr();
1139
db4d30fb 1140 /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
93920f61
MG
1141 if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1142 !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
db4d30fb
VT
1143 setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1144
93920f61 1145 if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
8ecc4979
DB
1146 return;
1147
1148 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1e41a766 1149
93920f61 1150 if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
1e41a766 1151 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
8ecc4979 1152
93920f61
MG
1153 if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1154 !(ia32_cap & ARCH_CAP_SSB_NO) &&
24809860 1155 !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
c456442c
KRW
1156 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1157
706d5168
SP
1158 if (ia32_cap & ARCH_CAP_IBRS_ALL)
1159 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1160
93920f61
MG
1161 if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1162 !(ia32_cap & ARCH_CAP_MDS_NO)) {
ed5194c2 1163 setup_force_cpu_bug(X86_BUG_MDS);
93920f61 1164 if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
e261f209
TG
1165 setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1166 }
ed5194c2 1167
93920f61 1168 if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
f36cf386
TG
1169 setup_force_cpu_bug(X86_BUG_SWAPGS);
1170
1b42f017
PG
1171 /*
1172 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
1173 * - TSX is supported or
1174 * - TSX_CTRL is present
1175 *
1176 * TSX_CTRL check is needed for cases when TSX could be disabled before
1177 * the kernel boot e.g. kexec.
1178 * TSX_CTRL check alone is not sufficient for cases when the microcode
1179 * update is not present or running as guest that don't get TSX_CTRL.
1180 */
1181 if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
1182 (cpu_has(c, X86_FEATURE_RTM) ||
1183 (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
1184 setup_force_cpu_bug(X86_BUG_TAA);
1185
7e5b3c26
MG
1186 /*
1187 * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
1188 * in the vulnerability blacklist.
1189 */
1190 if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1191 cpu_has(c, X86_FEATURE_RDSEED)) &&
1192 cpu_matches(cpu_vuln_blacklist, SRBDS))
1193 setup_force_cpu_bug(X86_BUG_SRBDS);
1194
93920f61 1195 if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
4a28bfe3 1196 return;
fec9434a 1197
fec9434a
DW
1198 /* Rogue Data Cache Load? No! */
1199 if (ia32_cap & ARCH_CAP_RDCL_NO)
4a28bfe3 1200 return;
fec9434a 1201
4a28bfe3 1202 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
17dbca11 1203
93920f61 1204 if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
17dbca11
AK
1205 return;
1206
1207 setup_force_cpu_bug(X86_BUG_L1TF);
fec9434a
DW
1208}
1209
8990cac6
PT
1210/*
1211 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1212 * unfortunately, that's not true in practice because of early VIA
1213 * chips and (more importantly) broken virtualizers that are not easy
1214 * to detect. In the latter case it doesn't even *fail* reliably, so
1215 * probing for it doesn't even work. Disable it completely on 32-bit
1216 * unless we can find a reliable way to detect all the broken cases.
1217 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1218 */
9b3661cd 1219static void detect_nopl(void)
8990cac6
PT
1220{
1221#ifdef CONFIG_X86_32
9b3661cd 1222 setup_clear_cpu_cap(X86_FEATURE_NOPL);
8990cac6 1223#else
9b3661cd 1224 setup_force_cpu_cap(X86_FEATURE_NOPL);
8990cac6
PT
1225#endif
1226}
1227
1ef5423a
MH
1228/*
1229 * We parse cpu parameters early because fpu__init_system() is executed
1230 * before parse_early_param().
1231 */
1232static void __init cpu_parse_early_param(void)
1233{
1234 char arg[128];
1235 char *argptr = arg;
1236 int arglen, res, bit;
1237
1238#ifdef CONFIG_X86_32
1239 if (cmdline_find_option_bool(boot_command_line, "no387"))
1240#ifdef CONFIG_MATH_EMULATION
1241 setup_clear_cpu_cap(X86_FEATURE_FPU);
1242#else
1243 pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
1244#endif
1245
1246 if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
1247 setup_clear_cpu_cap(X86_FEATURE_FXSR);
1248#endif
1249
1250 if (cmdline_find_option_bool(boot_command_line, "noxsave"))
1251 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
1252
1253 if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
1254 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
1255
1256 if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
1257 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
1258
1259 arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
1260 if (arglen <= 0)
1261 return;
1262
1263 pr_info("Clearing CPUID bits:");
1264 do {
1265 res = get_option(&argptr, &bit);
1266 if (res == 0 || res == 3)
1267 break;
1268
1269 /* If the argument was too long, the last bit may be cut off */
1270 if (res == 1 && arglen >= sizeof(arg))
1271 break;
1272
1273 if (bit >= 0 && bit < NCAPINTS * 32) {
1274 pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
1275 setup_clear_cpu_cap(bit);
1276 }
1277 } while (res == 2);
1278 pr_cont("\n");
1279}
1280
34048c9e
PC
1281/*
1282 * Do minimum CPU detection early.
1283 * Fields really needed: vendor, cpuid_level, family, model, mask,
1284 * cache alignment.
1285 * The others are not touched to avoid unwanted side effects.
1286 *
a1652bb8
JD
1287 * WARNING: this function is only called on the boot CPU. Don't add code
1288 * here that is supposed to run on all CPUs.
34048c9e 1289 */
3da99c97 1290static void __init early_identify_cpu(struct cpuinfo_x86 *c)
d7cd5611 1291{
6627d242
YL
1292#ifdef CONFIG_X86_64
1293 c->x86_clflush_size = 64;
13c6c532
JB
1294 c->x86_phys_bits = 36;
1295 c->x86_virt_bits = 48;
6627d242 1296#else
d4387bd3 1297 c->x86_clflush_size = 32;
13c6c532
JB
1298 c->x86_phys_bits = 32;
1299 c->x86_virt_bits = 32;
6627d242 1300#endif
0a488a53 1301 c->x86_cache_alignment = c->x86_clflush_size;
d7cd5611 1302
0e96f31e 1303 memset(&c->x86_capability, 0, sizeof(c->x86_capability));
0a488a53 1304 c->extended_cpuid_level = 0;
d7cd5611 1305
2893cc8f
MW
1306 if (!have_cpuid_p())
1307 identify_cpu_without_cpuid(c);
1308
aef93c8b 1309 /* cyrix could have cpuid enabled via c_identify()*/
05fb3c19
AL
1310 if (have_cpuid_p()) {
1311 cpu_detect(c);
1312 get_cpu_vendor(c);
1313 get_cpu_cap(c);
d94a155c 1314 get_cpu_address_sizes(c);
78d1b296 1315 setup_force_cpu_cap(X86_FEATURE_CPUID);
1ef5423a 1316 cpu_parse_early_param();
d7cd5611 1317
05fb3c19
AL
1318 if (this_cpu->c_early_init)
1319 this_cpu->c_early_init(c);
12cf105c 1320
05fb3c19
AL
1321 c->cpu_index = 0;
1322 filter_cpuid_features(c, false);
093af8d7 1323
05fb3c19
AL
1324 if (this_cpu->c_bsp_init)
1325 this_cpu->c_bsp_init(c);
78d1b296 1326 } else {
78d1b296 1327 setup_clear_cpu_cap(X86_FEATURE_CPUID);
05fb3c19 1328 }
c3b83598
BP
1329
1330 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
a89f040f 1331
4a28bfe3 1332 cpu_set_bug_bits(c);
99c6fa25 1333
ebb1064e 1334 sld_setup(c);
6650cdd9 1335
db52ef74 1336 fpu__init_system(c);
b8b7abae 1337
939ef713
CB
1338 init_sigframe_size();
1339
b8b7abae
AL
1340#ifdef CONFIG_X86_32
1341 /*
1342 * Regardless of whether PCID is enumerated, the SDM says
1343 * that it can't be enabled in 32-bit mode.
1344 */
1345 setup_clear_cpu_cap(X86_FEATURE_PCID);
1346#endif
372fddf7
KS
1347
1348 /*
1349 * Later in the boot process pgtable_l5_enabled() relies on
1350 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1351 * enabled by this point we need to clear the feature bit to avoid
1352 * false-positives at the later stage.
1353 *
1354 * pgtable_l5_enabled() can be false here for several reasons:
1355 * - 5-level paging is disabled compile-time;
1356 * - it's 32-bit kernel;
1357 * - machine doesn't support 5-level paging;
1358 * - user specified 'no5lvl' in kernel command line.
1359 */
1360 if (!pgtable_l5_enabled())
1361 setup_clear_cpu_cap(X86_FEATURE_LA57);
8990cac6 1362
9b3661cd 1363 detect_nopl();
d7cd5611
RR
1364}
1365
9d31d35b
YL
1366void __init early_cpu_init(void)
1367{
02dde8b4 1368 const struct cpu_dev *const *cdev;
10a434fc
YL
1369 int count = 0;
1370
ac23f253 1371#ifdef CONFIG_PROCESSOR_SELECT
1b74dde7 1372 pr_info("KERNEL supported cpus:\n");
31c997ca
IM
1373#endif
1374
10a434fc 1375 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
02dde8b4 1376 const struct cpu_dev *cpudev = *cdev;
9d31d35b 1377
10a434fc
YL
1378 if (count >= X86_VENDOR_NUM)
1379 break;
1380 cpu_devs[count] = cpudev;
1381 count++;
1382
ac23f253 1383#ifdef CONFIG_PROCESSOR_SELECT
31c997ca
IM
1384 {
1385 unsigned int j;
1386
1387 for (j = 0; j < 2; j++) {
1388 if (!cpudev->c_ident[j])
1389 continue;
1b74dde7 1390 pr_info(" %s %s\n", cpudev->c_vendor,
31c997ca
IM
1391 cpudev->c_ident[j]);
1392 }
10a434fc 1393 }
0388423d 1394#endif
10a434fc 1395 }
9d31d35b 1396 early_identify_cpu(&boot_cpu_data);
d7cd5611 1397}
093af8d7 1398
7a5d6704
AL
1399static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
1400{
1401#ifdef CONFIG_X86_64
58a5aac5 1402 /*
7a5d6704
AL
1403 * Empirically, writing zero to a segment selector on AMD does
1404 * not clear the base, whereas writing zero to a segment
1405 * selector on Intel does clear the base. Intel's behavior
1406 * allows slightly faster context switches in the common case
1407 * where GS is unused by the prev and next threads.
58a5aac5 1408 *
7a5d6704 1409 * Since neither vendor documents this anywhere that I can see,
d9f6e12f 1410 * detect it directly instead of hard-coding the choice by
7a5d6704
AL
1411 * vendor.
1412 *
1413 * I've designated AMD's behavior as the "bug" because it's
1414 * counterintuitive and less friendly.
58a5aac5 1415 */
7a5d6704
AL
1416
1417 unsigned long old_base, tmp;
1418 rdmsrl(MSR_FS_BASE, old_base);
1419 wrmsrl(MSR_FS_BASE, 1);
1420 loadsegment(fs, 0);
1421 rdmsrl(MSR_FS_BASE, tmp);
1422 if (tmp != 0)
1423 set_cpu_bug(c, X86_BUG_NULL_SEG);
1424 wrmsrl(MSR_FS_BASE, old_base);
366d4a43 1425#endif
d7cd5611
RR
1426}
1427
148f9bb8 1428static void generic_identify(struct cpuinfo_x86 *c)
1da177e4 1429{
aef93c8b 1430 c->extended_cpuid_level = 0;
1da177e4 1431
3da99c97 1432 if (!have_cpuid_p())
aef93c8b 1433 identify_cpu_without_cpuid(c);
1d67953f 1434
aef93c8b 1435 /* cyrix could have cpuid enabled via c_identify()*/
a9853dd6 1436 if (!have_cpuid_p())
aef93c8b 1437 return;
1da177e4 1438
3da99c97 1439 cpu_detect(c);
1da177e4 1440
3da99c97 1441 get_cpu_vendor(c);
1da177e4 1442
3da99c97 1443 get_cpu_cap(c);
1da177e4 1444
d94a155c
KS
1445 get_cpu_address_sizes(c);
1446
3da99c97
YL
1447 if (c->cpuid_level >= 0x00000001) {
1448 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
b89d3b3e 1449#ifdef CONFIG_X86_32
c8e56d20 1450# ifdef CONFIG_SMP
cb8cc442 1451 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
b89d3b3e 1452# else
3da99c97 1453 c->apicid = c->initial_apicid;
b89d3b3e
YL
1454# endif
1455#endif
b89d3b3e 1456 c->phys_proc_id = c->initial_apicid;
3da99c97 1457 }
1da177e4 1458
1b05d60d 1459 get_model_name(c); /* Default name */
1da177e4 1460
7a5d6704 1461 detect_null_seg_behavior(c);
0230bb03
AL
1462
1463 /*
1464 * ESPFIX is a strange bug. All real CPUs have it. Paravirt
1465 * systems that run Linux at CPL > 0 may or may not have the
1466 * issue, but, even if they have the issue, there's absolutely
1467 * nothing we can do about it because we can't use the real IRET
1468 * instruction.
1469 *
1470 * NB: For the time being, only 32-bit kernels support
1471 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose
1472 * whether to apply espfix using paravirt hooks. If any
1473 * non-paravirt system ever shows up that does *not* have the
1474 * ESPFIX issue, we can change this.
1475 */
1476#ifdef CONFIG_X86_32
0230bb03 1477 set_cpu_bug(c, X86_BUG_ESPFIX);
0230bb03 1478#endif
1da177e4 1479}
1da177e4 1480
d49597fd 1481/*
9d85eb91
TG
1482 * Validate that ACPI/mptables have the same information about the
1483 * effective APIC id and update the package map.
d49597fd 1484 */
9d85eb91 1485static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
d49597fd
TG
1486{
1487#ifdef CONFIG_SMP
9d85eb91 1488 unsigned int apicid, cpu = smp_processor_id();
d49597fd
TG
1489
1490 apicid = apic->cpu_present_to_apicid(cpu);
d49597fd 1491
9d85eb91
TG
1492 if (apicid != c->apicid) {
1493 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
d49597fd 1494 cpu, apicid, c->initial_apicid);
d49597fd 1495 }
9d85eb91 1496 BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
212bf4fd 1497 BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
d49597fd
TG
1498#else
1499 c->logical_proc_id = 0;
1500#endif
1501}
1502
1da177e4
LT
1503/*
1504 * This does the hard work of actually picking apart the CPU stuff...
1505 */
148f9bb8 1506static void identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
1507{
1508 int i;
1509
1510 c->loops_per_jiffy = loops_per_jiffy;
24dbc600 1511 c->x86_cache_size = 0;
1da177e4 1512 c->x86_vendor = X86_VENDOR_UNKNOWN;
b399151c 1513 c->x86_model = c->x86_stepping = 0; /* So far unknown... */
1da177e4
LT
1514 c->x86_vendor_id[0] = '\0'; /* Unset */
1515 c->x86_model_id[0] = '\0'; /* Unset */
94605eff 1516 c->x86_max_cores = 1;
102bbe3a 1517 c->x86_coreid_bits = 0;
79a8b9aa 1518 c->cu_id = 0xff;
11fdd252 1519#ifdef CONFIG_X86_64
102bbe3a 1520 c->x86_clflush_size = 64;
13c6c532
JB
1521 c->x86_phys_bits = 36;
1522 c->x86_virt_bits = 48;
102bbe3a
YL
1523#else
1524 c->cpuid_level = -1; /* CPUID not detected */
770d132f 1525 c->x86_clflush_size = 32;
13c6c532
JB
1526 c->x86_phys_bits = 32;
1527 c->x86_virt_bits = 32;
102bbe3a
YL
1528#endif
1529 c->x86_cache_alignment = c->x86_clflush_size;
0e96f31e 1530 memset(&c->x86_capability, 0, sizeof(c->x86_capability));
b47ce1fe
SC
1531#ifdef CONFIG_X86_VMX_FEATURE_NAMES
1532 memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
1533#endif
1da177e4 1534
1da177e4
LT
1535 generic_identify(c);
1536
3898534d 1537 if (this_cpu->c_identify)
1da177e4
LT
1538 this_cpu->c_identify(c);
1539
6a6256f9 1540 /* Clear/Set all flags overridden by options, after probe */
8bf1ebca 1541 apply_forced_caps(c);
2759c328 1542
102bbe3a 1543#ifdef CONFIG_X86_64
cb8cc442 1544 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
102bbe3a
YL
1545#endif
1546
1da177e4
LT
1547 /*
1548 * Vendor-specific initialization. In this section we
1549 * canonicalize the feature flags, meaning if there are
1550 * features a certain CPU supports which CPUID doesn't
1551 * tell us, CPUID claiming incorrect flags, or other bugs,
1552 * we handle them here.
1553 *
1554 * At the end of this section, c->x86_capability better
1555 * indicate the features this CPU genuinely supports!
1556 */
1557 if (this_cpu->c_init)
1558 this_cpu->c_init(c);
1559
1560 /* Disable the PN if appropriate */
1561 squash_the_stupid_serial_number(c);
1562
aa35f896 1563 /* Set up SMEP/SMAP/UMIP */
b2cc2a07
PA
1564 setup_smep(c);
1565 setup_smap(c);
aa35f896 1566 setup_umip(c);
b2cc2a07 1567
dd649bd0 1568 /* Enable FSGSBASE instructions if available. */
742c45c3 1569 if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
b745cfba 1570 cr4_set_bits(X86_CR4_FSGSBASE);
742c45c3
AK
1571 elf_hwcap2 |= HWCAP2_FSGSBASE;
1572 }
dd649bd0 1573
1da177e4 1574 /*
0f3fa48a
IM
1575 * The vendor-specific functions might have changed features.
1576 * Now we do "generic changes."
1da177e4
LT
1577 */
1578
b38b0665
PA
1579 /* Filter out anything that depends on CPUID levels we don't have */
1580 filter_cpuid_features(c, true);
1581
1da177e4 1582 /* If the model name is still unset, do table lookup. */
34048c9e 1583 if (!c->x86_model_id[0]) {
02dde8b4 1584 const char *p;
1da177e4 1585 p = table_lookup_model(c);
34048c9e 1586 if (p)
1da177e4
LT
1587 strcpy(c->x86_model_id, p);
1588 else
1589 /* Last resort... */
1590 sprintf(c->x86_model_id, "%02x/%02x",
54a20f8c 1591 c->x86, c->x86_model);
1da177e4
LT
1592 }
1593
102bbe3a
YL
1594#ifdef CONFIG_X86_64
1595 detect_ht(c);
1596#endif
1597
49d859d7 1598 x86_init_rdrand(c);
06976945 1599 setup_pku(c);
3e0c3737
YL
1600
1601 /*
6a6256f9 1602 * Clear/Set all flags overridden by options, need do it
3e0c3737
YL
1603 * before following smp all cpus cap AND.
1604 */
8bf1ebca 1605 apply_forced_caps(c);
3e0c3737 1606
1da177e4
LT
1607 /*
1608 * On SMP, boot_cpu_data holds the common feature set between
1609 * all CPUs; so make sure that we indicate which features are
1610 * common between the CPUs. The first time this routine gets
1611 * executed, c == &boot_cpu_data.
1612 */
34048c9e 1613 if (c != &boot_cpu_data) {
1da177e4 1614 /* AND the already accumulated flags with these */
9d31d35b 1615 for (i = 0; i < NCAPINTS; i++)
1da177e4 1616 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
65fc985b
BP
1617
1618 /* OR, i.e. replicate the bug flags */
1619 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1620 c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1da177e4
LT
1621 }
1622
1623 /* Init Machine Check Exception if available. */
5e09954a 1624 mcheck_cpu_init(c);
30d432df
AK
1625
1626 select_idle_routine(c);
102bbe3a 1627
de2d9445 1628#ifdef CONFIG_NUMA
102bbe3a
YL
1629 numa_add_cpu(smp_processor_id());
1630#endif
a6c4e076 1631}
31ab269a 1632
8b6c0ab1
IM
1633/*
1634 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1635 * on 32-bit kernels:
1636 */
cfda7bb9
AL
1637#ifdef CONFIG_X86_32
1638void enable_sep_cpu(void)
1639{
8b6c0ab1
IM
1640 struct tss_struct *tss;
1641 int cpu;
cfda7bb9 1642
b3edfda4
BP
1643 if (!boot_cpu_has(X86_FEATURE_SEP))
1644 return;
1645
8b6c0ab1 1646 cpu = get_cpu();
c482feef 1647 tss = &per_cpu(cpu_tss_rw, cpu);
8b6c0ab1 1648
8b6c0ab1 1649 /*
cf9328cc
AL
1650 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1651 * see the big comment in struct x86_hw_tss's definition.
8b6c0ab1 1652 */
cfda7bb9
AL
1653
1654 tss->x86_tss.ss1 = __KERNEL_CS;
8b6c0ab1 1655 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
4fe2d8b1 1656 wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
4c8cd0c5 1657 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
8b6c0ab1 1658
cfda7bb9
AL
1659 put_cpu();
1660}
e04d645f
GC
1661#endif
1662
a6c4e076
JF
1663void __init identify_boot_cpu(void)
1664{
1665 identify_cpu(&boot_cpu_data);
102bbe3a 1666#ifdef CONFIG_X86_32
a6c4e076 1667 sysenter_setup();
6fe940d6 1668 enable_sep_cpu();
102bbe3a 1669#endif
5b556332 1670 cpu_detect_tlb(&boot_cpu_data);
873d50d5 1671 setup_cr_pinning();
95c5824f
PG
1672
1673 tsx_init();
a6c4e076 1674}
3b520b23 1675
148f9bb8 1676void identify_secondary_cpu(struct cpuinfo_x86 *c)
a6c4e076
JF
1677{
1678 BUG_ON(c == &boot_cpu_data);
1679 identify_cpu(c);
102bbe3a 1680#ifdef CONFIG_X86_32
a6c4e076 1681 enable_sep_cpu();
102bbe3a 1682#endif
a6c4e076 1683 mtrr_ap_init();
9d85eb91 1684 validate_apic_and_package_id(c);
77243971 1685 x86_spec_ctrl_setup_ap();
7e5b3c26 1686 update_srbds_msr();
1da177e4
LT
1687}
1688
191679fd
AK
1689static __init int setup_noclflush(char *arg)
1690{
840d2830 1691 setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
da4aaa7d 1692 setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
191679fd
AK
1693 return 1;
1694}
1695__setup("noclflush", setup_noclflush);
1696
148f9bb8 1697void print_cpu_info(struct cpuinfo_x86 *c)
1da177e4 1698{
02dde8b4 1699 const char *vendor = NULL;
1da177e4 1700
0f3fa48a 1701 if (c->x86_vendor < X86_VENDOR_NUM) {
1da177e4 1702 vendor = this_cpu->c_vendor;
0f3fa48a
IM
1703 } else {
1704 if (c->cpuid_level >= 0)
1705 vendor = c->x86_vendor_id;
1706 }
1da177e4 1707
bd32a8cf 1708 if (vendor && !strstr(c->x86_model_id, vendor))
1b74dde7 1709 pr_cont("%s ", vendor);
1da177e4 1710
9d31d35b 1711 if (c->x86_model_id[0])
1b74dde7 1712 pr_cont("%s", c->x86_model_id);
1da177e4 1713 else
1b74dde7 1714 pr_cont("%d86", c->x86);
1da177e4 1715
1b74dde7 1716 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
924e101a 1717
b399151c
JZ
1718 if (c->x86_stepping || c->cpuid_level >= 0)
1719 pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
1da177e4 1720 else
1b74dde7 1721 pr_cont(")\n");
1da177e4
LT
1722}
1723
0c2a3913 1724/*
ce38f038
TG
1725 * clearcpuid= was already parsed in cpu_parse_early_param(). This dummy
1726 * function prevents it from becoming an environment variable for init.
0c2a3913
AK
1727 */
1728static __init int setup_clearcpuid(char *arg)
ac72e788 1729{
ac72e788
AK
1730 return 1;
1731}
0c2a3913 1732__setup("clearcpuid=", setup_clearcpuid);
ac72e788 1733
d5494d4f 1734#ifdef CONFIG_X86_64
e6401c13
AL
1735DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
1736 fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
1737EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
0f3fa48a 1738
bdf977b3 1739/*
a7fcf28d
AL
1740 * The following percpu variables are hot. Align current_task to
1741 * cacheline size such that they fall in the same cacheline.
bdf977b3
TH
1742 */
1743DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1744 &init_task;
1745EXPORT_PER_CPU_SYMBOL(current_task);
d5494d4f 1746
951c2a51 1747DEFINE_PER_CPU(void *, hardirq_stack_ptr);
e7f89001 1748DEFINE_PER_CPU(bool, hardirq_stack_inuse);
d5494d4f 1749
c2daa3be
PZ
1750DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1751EXPORT_PER_CPU_SYMBOL(__preempt_count);
1752
1591584e
LJ
1753DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
1754
d5494d4f
YL
1755/* May not be marked __init: used by software suspend */
1756void syscall_init(void)
1da177e4 1757{
31ac34ca 1758 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
bf904d27 1759 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
d56fe4bf
IM
1760
1761#ifdef CONFIG_IA32_EMULATION
47edb651 1762 wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
a76c7f46 1763 /*
487d1edb
DV
1764 * This only works on Intel CPUs.
1765 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
1766 * This does not cause SYSENTER to jump to the wrong location, because
1767 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
a76c7f46
DV
1768 */
1769 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
8e6b65a1 1770 wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
1771 (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
4c8cd0c5 1772 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
d56fe4bf 1773#else
47edb651 1774 wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
6b51311c 1775 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
d56fe4bf
IM
1776 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1777 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
d5494d4f 1778#endif
03ae5768 1779
6de4ac1d
PAI
1780 /*
1781 * Flags to clear on syscall; clear as much as possible
1782 * to minimize user space-kernel interference.
1783 */
d5494d4f 1784 wrmsrl(MSR_SYSCALL_MASK,
6de4ac1d
PAI
1785 X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
1786 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
1787 X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
1788 X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
1789 X86_EFLAGS_AC|X86_EFLAGS_ID);
1da177e4 1790}
62111195 1791
0f3fa48a 1792#else /* CONFIG_X86_64 */
d5494d4f 1793
bdf977b3
TH
1794DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1795EXPORT_PER_CPU_SYMBOL(current_task);
c2daa3be
PZ
1796DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1797EXPORT_PER_CPU_SYMBOL(__preempt_count);
bdf977b3 1798
a7fcf28d
AL
1799/*
1800 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
1801 * the top of the kernel stack. Use an extra percpu variable to track the
1802 * top of the kernel stack directly.
1803 */
1804DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1805 (unsigned long)&init_thread_union + THREAD_SIZE;
1806EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1807
050e9baa 1808#ifdef CONFIG_STACKPROTECTOR
3fb0fdb3
AL
1809DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
1810EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
60a5317f 1811#endif
d5494d4f 1812
0f3fa48a 1813#endif /* CONFIG_X86_64 */
c5413fbe 1814
9766cdbc
JSR
1815/*
1816 * Clear all 6 debug registers:
1817 */
1818static void clear_all_debug_regs(void)
1819{
1820 int i;
1821
1822 for (i = 0; i < 8; i++) {
1823 /* Ignore db4, db5 */
1824 if ((i == 4) || (i == 5))
1825 continue;
1826
1827 set_debugreg(0, i);
1828 }
1829}
c5413fbe 1830
0bb9fef9
JW
1831#ifdef CONFIG_KGDB
1832/*
1833 * Restore debug regs if using kgdbwait and you have a kernel debugger
1834 * connection established.
1835 */
1836static void dbg_restore_debug_regs(void)
1837{
1838 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1839 arch_kgdb_ops.correct_hw_break();
1840}
1841#else /* ! CONFIG_KGDB */
1842#define dbg_restore_debug_regs()
1843#endif /* ! CONFIG_KGDB */
1844
ce4b1b16
IM
1845static void wait_for_master_cpu(int cpu)
1846{
1847#ifdef CONFIG_SMP
1848 /*
1849 * wait for ACK from master CPU before continuing
1850 * with AP initialization
1851 */
1852 WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1853 while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1854 cpu_relax();
1855#endif
1856}
1857
b2e2ba57 1858#ifdef CONFIG_X86_64
505b7899 1859static inline void setup_getcpu(int cpu)
b2e2ba57 1860{
22245bdf 1861 unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
b2e2ba57
CB
1862 struct desc_struct d = { };
1863
b6b4fbd9 1864 if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
fc48a6d1 1865 wrmsr(MSR_TSC_AUX, cpudata, 0);
b2e2ba57
CB
1866
1867 /* Store CPU and node number in limit. */
1868 d.limit0 = cpudata;
1869 d.limit1 = cpudata >> 16;
1870
1871 d.type = 5; /* RO data, expand down, accessed */
1872 d.dpl = 3; /* Visible to user code */
1873 d.s = 1; /* Not a system segment */
1874 d.p = 1; /* Present */
1875 d.d = 1; /* 32-bit */
1876
22245bdf 1877 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
b2e2ba57 1878}
505b7899
TG
1879
1880static inline void ucode_cpu_init(int cpu)
1881{
1882 if (cpu)
1883 load_ucode_ap();
1884}
1885
1886static inline void tss_setup_ist(struct tss_struct *tss)
1887{
1888 /* Set up the per-CPU TSS IST stacks */
1889 tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
1890 tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
1891 tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
1892 tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
02772fb9
JR
1893 /* Only mapped when SEV-ES is active */
1894 tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
505b7899
TG
1895}
1896
505b7899
TG
1897#else /* CONFIG_X86_64 */
1898
1899static inline void setup_getcpu(int cpu) { }
1900
1901static inline void ucode_cpu_init(int cpu)
1902{
1903 show_ucode_info_early();
1904}
1905
1906static inline void tss_setup_ist(struct tss_struct *tss) { }
1907
505b7899 1908#endif /* !CONFIG_X86_64 */
b2e2ba57 1909
111e7b15
TG
1910static inline void tss_setup_io_bitmap(struct tss_struct *tss)
1911{
1912 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
1913
1914#ifdef CONFIG_X86_IOPL_IOPERM
1915 tss->io_bitmap.prev_max = 0;
1916 tss->io_bitmap.prev_sequence = 0;
1917 memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
1918 /*
1919 * Invalidate the extra array entry past the end of the all
1920 * permission bitmap as required by the hardware.
1921 */
1922 tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
b2e2ba57 1923#endif
111e7b15 1924}
b2e2ba57 1925
520d0308
JR
1926/*
1927 * Setup everything needed to handle exceptions from the IDT, including the IST
1928 * exceptions which use paranoid_entry().
1929 */
1930void cpu_init_exception_handling(void)
1931{
1932 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
1933 int cpu = raw_smp_processor_id();
1934
1935 /* paranoid_entry() gets the CPU number from the GDT */
1936 setup_getcpu(cpu);
1937
1938 /* IST vectors need TSS to be set up. */
1939 tss_setup_ist(tss);
1940 tss_setup_io_bitmap(tss);
1941 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1942
1943 load_TR_desc();
1944
1945 /* Finally load the IDT */
1946 load_current_idt();
1947}
1948
d2cbcc49
RR
1949/*
1950 * cpu_init() initializes state that is per-CPU. Some data is already
b1efd0ff
BP
1951 * initialized (naturally) in the bootstrap process, such as the GDT. We
1952 * reload it nevertheless, this function acts as a 'CPU state barrier',
1953 * nothing should get across.
d2cbcc49 1954 */
148f9bb8 1955void cpu_init(void)
1ba76586 1956{
505b7899 1957 struct task_struct *cur = current;
f6ef7322 1958 int cpu = raw_smp_processor_id();
1ba76586 1959
ce4b1b16
IM
1960 wait_for_master_cpu(cpu);
1961
505b7899 1962 ucode_cpu_init(cpu);
0f3fa48a 1963
e7a22c1e 1964#ifdef CONFIG_NUMA
27fd185f 1965 if (this_cpu_read(numa_node) == 0 &&
e534c7c5
LS
1966 early_cpu_to_node(cpu) != NUMA_NO_NODE)
1967 set_numa_node(early_cpu_to_node(cpu));
e7a22c1e 1968#endif
2eaad1fd 1969 pr_debug("Initializing CPU#%d\n", cpu);
1ba76586 1970
505b7899
TG
1971 if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
1972 boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
1973 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1ba76586
YL
1974
1975 /*
1976 * Initialize the per-CPU GDT with the boot GDT,
1977 * and set up the GDT descriptor:
1978 */
552be871 1979 switch_to_new_gdt(cpu);
1ba76586 1980
505b7899
TG
1981 if (IS_ENABLED(CONFIG_X86_64)) {
1982 loadsegment(fs, 0);
1983 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1984 syscall_init();
1ba76586 1985
505b7899
TG
1986 wrmsrl(MSR_FS_BASE, 0);
1987 wrmsrl(MSR_KERNEL_GS_BASE, 0);
1988 barrier();
1ba76586 1989
505b7899 1990 x2apic_setup();
1ba76586
YL
1991 }
1992
f1f10076 1993 mmgrab(&init_mm);
505b7899
TG
1994 cur->active_mm = &init_mm;
1995 BUG_ON(cur->mm);
72c0098d 1996 initialize_tlbstate_and_flush();
505b7899 1997 enter_lazy_tlb(&init_mm, cur);
1ba76586 1998
505b7899
TG
1999 /*
2000 * sp0 points to the entry trampoline stack regardless of what task
2001 * is running.
2002 */
4fe2d8b1 2003 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
20bb8344 2004
37868fe1 2005 load_mm_ldt(&init_mm);
1ba76586 2006
0bb9fef9
JW
2007 clear_all_debug_regs();
2008 dbg_restore_debug_regs();
1ba76586 2009
dc4e0021 2010 doublefault_init_cpu_tss();
505b7899 2011
21c4cd10 2012 fpu__init_cpu();
1ba76586 2013
1ba76586
YL
2014 if (is_uv_system())
2015 uv_cpu_init();
69218e47 2016
69218e47 2017 load_fixmap_gdt(cpu);
1ba76586
YL
2018}
2019
b1efd0ff
BP
2020#ifdef CONFIG_SMP
2021void cpu_init_secondary(void)
2022{
2023 /*
2024 * Relies on the BP having set-up the IDT tables, which are loaded
2025 * on this CPU in cpu_init_exception_handling().
2026 */
2027 cpu_init_exception_handling();
2028 cpu_init();
2029}
2030#endif
2031
1008c52c
BP
2032/*
2033 * The microcode loader calls this upon late microcode load to recheck features,
2034 * only when microcode has been updated. Caller holds microcode_mutex and CPU
2035 * hotplug lock.
2036 */
2037void microcode_check(void)
2038{
42ca8082
BP
2039 struct cpuinfo_x86 info;
2040
1008c52c 2041 perf_check_microcode();
42ca8082
BP
2042
2043 /* Reload CPUID max function as it might've changed. */
2044 info.cpuid_level = cpuid_eax(0);
2045
2046 /*
2047 * Copy all capability leafs to pick up the synthetic ones so that
2048 * memcmp() below doesn't fail on that. The ones coming from CPUID will
2049 * get overwritten in get_cpu_cap().
2050 */
2051 memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
2052
2053 get_cpu_cap(&info);
2054
2055 if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
2056 return;
2057
2058 pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
2059 pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
1008c52c 2060}
9c92374b
TG
2061
2062/*
2063 * Invoked from core CPU hotplug code after hotplug operations
2064 */
2065void arch_smt_update(void)
2066{
2067 /* Handle the speculative execution misfeatures */
2068 cpu_bugs_smt_update();
6a1cb5f5
TG
2069 /* Check whether IPI broadcasting can be enabled */
2070 apic_smt_update();
9c92374b 2071}