2 * Copyright (C) 1994 Linus Torvalds
4 * Cyrix stuff, June 1998 by:
5 * - Rafael R. Reilova (moved everything from head.S),
6 * <rreilova@ececs.uc.edu>
7 * - Channing Corn (tests & fixes),
8 * - Andrew D. Balsa (code cleanup).
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13 #include <linux/smp.h>
15 #include <asm/spec-ctrl.h>
16 #include <asm/cmdline.h>
18 #include <asm/processor.h>
19 #include <asm/processor-flags.h>
20 #include <asm/fpu/internal.h>
22 #include <asm/paravirt.h>
23 #include <asm/alternative.h>
24 #include <asm/pgtable.h>
25 #include <asm/set_memory.h>
26 #include <asm/intel-family.h>
28 static void __init
spectre_v2_select_mitigation(void);
29 static void __init
ssb_select_mitigation(void);
32 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
33 * writes to SPEC_CTRL contain whatever reserved bits have been set.
35 u64 __ro_after_init x86_spec_ctrl_base
;
38 * The vendor and possibly platform specific bits which can be modified in
41 static u64 __ro_after_init x86_spec_ctrl_mask
= ~SPEC_CTRL_IBRS
;
44 * AMD specific MSR info for Speculative Store Bypass control.
45 * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
47 u64 __ro_after_init x86_amd_ls_cfg_base
;
48 u64 __ro_after_init x86_amd_ls_cfg_rds_mask
;
50 void __init
check_bugs(void)
54 if (!IS_ENABLED(CONFIG_SMP
)) {
56 print_cpu_info(&boot_cpu_data
);
60 * Read the SPEC_CTRL MSR to account for reserved bits which may
61 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
62 * init code as it is not enumerated and depends on the family.
65 rdmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
67 /* Select the proper spectre mitigation before patching alternatives */
68 spectre_v2_select_mitigation();
71 * Select proper mitigation for any exposure to the Speculative Store
72 * Bypass vulnerability.
74 ssb_select_mitigation();
78 * Check whether we are able to run this kernel safely on SMP.
80 * - i386 is no longer supported.
81 * - In order to run on anything without a TSC, we need to be
82 * compiled for a i486.
84 if (boot_cpu_data
.x86
< 4)
85 panic("Kernel requires i486+ for 'invlpg' and other features");
87 init_utsname()->machine
[1] =
88 '0' + (boot_cpu_data
.x86
> 6 ? 6 : boot_cpu_data
.x86
);
89 alternative_instructions();
91 fpu__init_check_bugs();
92 #else /* CONFIG_X86_64 */
93 alternative_instructions();
96 * Make sure the first 2MB area is not mapped by huge pages
97 * There are typically fixed size MTRRs in there and overlapping
98 * MTRRs into large pages causes slow downs.
100 * Right now we don't do that with gbpages because there seems
101 * very little benefit for that case.
104 set_memory_4k((unsigned long)__va(0), 1);
108 /* The kernel command line selection */
109 enum spectre_v2_mitigation_cmd
{
112 SPECTRE_V2_CMD_FORCE
,
113 SPECTRE_V2_CMD_RETPOLINE
,
114 SPECTRE_V2_CMD_RETPOLINE_GENERIC
,
115 SPECTRE_V2_CMD_RETPOLINE_AMD
,
118 static const char *spectre_v2_strings
[] = {
119 [SPECTRE_V2_NONE
] = "Vulnerable",
120 [SPECTRE_V2_RETPOLINE_MINIMAL
] = "Vulnerable: Minimal generic ASM retpoline",
121 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD
] = "Vulnerable: Minimal AMD ASM retpoline",
122 [SPECTRE_V2_RETPOLINE_GENERIC
] = "Mitigation: Full generic retpoline",
123 [SPECTRE_V2_RETPOLINE_AMD
] = "Mitigation: Full AMD retpoline",
127 #define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
129 static enum spectre_v2_mitigation spectre_v2_enabled
= SPECTRE_V2_NONE
;
131 void x86_spec_ctrl_set(u64 val
)
133 if (val
& x86_spec_ctrl_mask
)
134 WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val
);
136 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
| val
);
138 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set
);
140 u64
x86_spec_ctrl_get_default(void)
142 u64 msrval
= x86_spec_ctrl_base
;
144 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
)
145 msrval
|= rds_tif_to_spec_ctrl(current_thread_info()->flags
);
148 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default
);
150 void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl
)
152 u64 host
= x86_spec_ctrl_base
;
157 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
)
158 host
|= rds_tif_to_spec_ctrl(current_thread_info()->flags
);
160 if (host
!= guest_spec_ctrl
)
161 wrmsrl(MSR_IA32_SPEC_CTRL
, guest_spec_ctrl
);
163 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest
);
165 void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl
)
167 u64 host
= x86_spec_ctrl_base
;
172 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
)
173 host
|= rds_tif_to_spec_ctrl(current_thread_info()->flags
);
175 if (host
!= guest_spec_ctrl
)
176 wrmsrl(MSR_IA32_SPEC_CTRL
, host
);
178 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host
);
180 static void x86_amd_rds_enable(void)
182 u64 msrval
= x86_amd_ls_cfg_base
| x86_amd_ls_cfg_rds_mask
;
184 if (boot_cpu_has(X86_FEATURE_AMD_RDS
))
185 wrmsrl(MSR_AMD64_LS_CFG
, msrval
);
188 static void __init
spec2_print_if_insecure(const char *reason
)
190 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
))
191 pr_info("%s\n", reason
);
194 static void __init
spec2_print_if_secure(const char *reason
)
196 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
))
197 pr_info("%s\n", reason
);
200 static inline bool retp_compiler(void)
202 return __is_defined(RETPOLINE
);
205 static inline bool match_option(const char *arg
, int arglen
, const char *opt
)
207 int len
= strlen(opt
);
209 return len
== arglen
&& !strncmp(arg
, opt
, len
);
212 static enum spectre_v2_mitigation_cmd __init
spectre_v2_parse_cmdline(void)
217 ret
= cmdline_find_option(boot_command_line
, "spectre_v2", arg
,
220 if (match_option(arg
, ret
, "off")) {
222 } else if (match_option(arg
, ret
, "on")) {
223 spec2_print_if_secure("force enabled on command line.");
224 return SPECTRE_V2_CMD_FORCE
;
225 } else if (match_option(arg
, ret
, "retpoline")) {
226 spec2_print_if_insecure("retpoline selected on command line.");
227 return SPECTRE_V2_CMD_RETPOLINE
;
228 } else if (match_option(arg
, ret
, "retpoline,amd")) {
229 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
) {
230 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
231 return SPECTRE_V2_CMD_AUTO
;
233 spec2_print_if_insecure("AMD retpoline selected on command line.");
234 return SPECTRE_V2_CMD_RETPOLINE_AMD
;
235 } else if (match_option(arg
, ret
, "retpoline,generic")) {
236 spec2_print_if_insecure("generic retpoline selected on command line.");
237 return SPECTRE_V2_CMD_RETPOLINE_GENERIC
;
238 } else if (match_option(arg
, ret
, "auto")) {
239 return SPECTRE_V2_CMD_AUTO
;
243 if (!cmdline_find_option_bool(boot_command_line
, "nospectre_v2"))
244 return SPECTRE_V2_CMD_AUTO
;
246 spec2_print_if_insecure("disabled on command line.");
247 return SPECTRE_V2_CMD_NONE
;
250 /* Check for Skylake-like CPUs (for RSB handling) */
251 static bool __init
is_skylake_era(void)
253 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
254 boot_cpu_data
.x86
== 6) {
255 switch (boot_cpu_data
.x86_model
) {
256 case INTEL_FAM6_SKYLAKE_MOBILE
:
257 case INTEL_FAM6_SKYLAKE_DESKTOP
:
258 case INTEL_FAM6_SKYLAKE_X
:
259 case INTEL_FAM6_KABYLAKE_MOBILE
:
260 case INTEL_FAM6_KABYLAKE_DESKTOP
:
267 static void __init
spectre_v2_select_mitigation(void)
269 enum spectre_v2_mitigation_cmd cmd
= spectre_v2_parse_cmdline();
270 enum spectre_v2_mitigation mode
= SPECTRE_V2_NONE
;
273 * If the CPU is not affected and the command line mode is NONE or AUTO
274 * then nothing to do.
276 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) &&
277 (cmd
== SPECTRE_V2_CMD_NONE
|| cmd
== SPECTRE_V2_CMD_AUTO
))
281 case SPECTRE_V2_CMD_NONE
:
284 case SPECTRE_V2_CMD_FORCE
:
286 case SPECTRE_V2_CMD_AUTO
:
289 case SPECTRE_V2_CMD_RETPOLINE_AMD
:
290 if (IS_ENABLED(CONFIG_RETPOLINE
))
293 case SPECTRE_V2_CMD_RETPOLINE_GENERIC
:
294 if (IS_ENABLED(CONFIG_RETPOLINE
))
295 goto retpoline_generic
;
297 case SPECTRE_V2_CMD_RETPOLINE
:
298 if (IS_ENABLED(CONFIG_RETPOLINE
))
302 pr_err("kernel not compiled with retpoline; no mitigation available!");
306 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
) {
308 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC
)) {
309 pr_err("LFENCE not serializing. Switching to generic retpoline\n");
310 goto retpoline_generic
;
312 mode
= retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD
:
313 SPECTRE_V2_RETPOLINE_MINIMAL_AMD
;
314 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD
);
315 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
318 mode
= retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC
:
319 SPECTRE_V2_RETPOLINE_MINIMAL
;
320 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
323 spectre_v2_enabled
= mode
;
324 pr_info("%s\n", spectre_v2_strings
[mode
]);
326 pr_info("Speculation control IBPB %s IBRS %s",
327 ibpb_supported
? "supported" : "not-supported",
328 ibrs_supported
? "supported" : "not-supported");
331 * If we have a full retpoline mode and then disable IBPB in kernel mode
332 * we do not require both.
334 if (mode
== SPECTRE_V2_RETPOLINE_AMD
||
335 mode
== SPECTRE_V2_RETPOLINE_GENERIC
)
337 if (ibrs_supported
) {
338 pr_info("Retpoline compiled kernel. Defaulting IBRS to disabled");
341 sysctl_ibrs_enabled
= 0;
346 * If neither SMEP or KPTI are available, there is a risk of
347 * hitting userspace addresses in the RSB after a context switch
348 * from a shallow call stack to a deeper one. To prevent this fill
349 * the entire RSB, even when using IBRS.
351 * Skylake era CPUs have a separate issue with *underflow* of the
352 * RSB, when they will predict 'ret' targets from the generic BTB.
353 * The proper mitigation for this is IBRS. If IBRS is not supported
354 * or deactivated in favour of retpolines the RSB fill on context
355 * switch is required.
357 if ((!boot_cpu_has(X86_FEATURE_PTI
) &&
358 !boot_cpu_has(X86_FEATURE_SMEP
)) || is_skylake_era()) {
359 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW
);
360 pr_info("Filling RSB on context switch\n");
365 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
367 static enum ssb_mitigation ssb_mode
= SPEC_STORE_BYPASS_NONE
;
369 /* The kernel command line selection */
370 enum ssb_mitigation_cmd
{
371 SPEC_STORE_BYPASS_CMD_NONE
,
372 SPEC_STORE_BYPASS_CMD_AUTO
,
373 SPEC_STORE_BYPASS_CMD_ON
,
376 static const char *ssb_strings
[] = {
377 [SPEC_STORE_BYPASS_NONE
] = "Vulnerable",
378 [SPEC_STORE_BYPASS_DISABLE
] = "Mitigation: Speculative Store Bypass disabled"
381 static const struct {
383 enum ssb_mitigation_cmd cmd
;
384 } ssb_mitigation_options
[] = {
385 { "auto", SPEC_STORE_BYPASS_CMD_AUTO
}, /* Platform decides */
386 { "on", SPEC_STORE_BYPASS_CMD_ON
}, /* Disable Speculative Store Bypass */
387 { "off", SPEC_STORE_BYPASS_CMD_NONE
}, /* Don't touch Speculative Store Bypass */
390 static enum ssb_mitigation_cmd __init
ssb_parse_cmdline(void)
392 enum ssb_mitigation_cmd cmd
= SPEC_STORE_BYPASS_CMD_AUTO
;
396 if (cmdline_find_option_bool(boot_command_line
, "nospec_store_bypass_disable")) {
397 return SPEC_STORE_BYPASS_CMD_NONE
;
399 ret
= cmdline_find_option(boot_command_line
, "spec_store_bypass_disable",
402 return SPEC_STORE_BYPASS_CMD_AUTO
;
404 for (i
= 0; i
< ARRAY_SIZE(ssb_mitigation_options
); i
++) {
405 if (!match_option(arg
, ret
, ssb_mitigation_options
[i
].option
))
408 cmd
= ssb_mitigation_options
[i
].cmd
;
412 if (i
>= ARRAY_SIZE(ssb_mitigation_options
)) {
413 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
414 return SPEC_STORE_BYPASS_CMD_AUTO
;
421 static enum ssb_mitigation_cmd __init
__ssb_select_mitigation(void)
423 enum ssb_mitigation mode
= SPEC_STORE_BYPASS_NONE
;
424 enum ssb_mitigation_cmd cmd
;
426 if (!boot_cpu_has(X86_FEATURE_RDS
))
429 cmd
= ssb_parse_cmdline();
430 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
) &&
431 (cmd
== SPEC_STORE_BYPASS_CMD_NONE
||
432 cmd
== SPEC_STORE_BYPASS_CMD_AUTO
))
436 case SPEC_STORE_BYPASS_CMD_AUTO
:
438 * AMD platforms by default don't need SSB mitigation.
440 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
)
442 case SPEC_STORE_BYPASS_CMD_ON
:
443 mode
= SPEC_STORE_BYPASS_DISABLE
;
445 case SPEC_STORE_BYPASS_CMD_NONE
:
450 * We have three CPU feature flags that are in play here:
451 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
452 * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
453 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
455 if (mode
!= SPEC_STORE_BYPASS_NONE
) {
456 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
);
458 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
459 * a completely different MSR and bit dependent on family.
461 switch (boot_cpu_data
.x86_vendor
) {
462 case X86_VENDOR_INTEL
:
463 x86_spec_ctrl_base
|= SPEC_CTRL_RDS
;
464 x86_spec_ctrl_mask
&= ~SPEC_CTRL_RDS
;
465 x86_spec_ctrl_set(SPEC_CTRL_RDS
);
468 x86_amd_rds_enable();
476 static void ssb_select_mitigation()
478 ssb_mode
= __ssb_select_mitigation();
480 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
481 pr_info("%s\n", ssb_strings
[ssb_mode
]);
486 void x86_spec_ctrl_setup_ap(void)
489 x86_spec_ctrl_set(x86_spec_ctrl_base
& ~x86_spec_ctrl_mask
);
491 if (ssb_mode
== SPEC_STORE_BYPASS_DISABLE
)
492 x86_amd_rds_enable();
496 ssize_t
cpu_show_common(struct device
*dev
, struct device_attribute
*attr
,
497 char *buf
, unsigned int bug
)
499 if (!boot_cpu_has_bug(bug
))
500 return sprintf(buf
, "Not affected\n");
503 case X86_BUG_CPU_MELTDOWN
:
504 if (boot_cpu_has(X86_FEATURE_PTI
))
505 return sprintf(buf
, "Mitigation: PTI\n");
508 case X86_BUG_SPECTRE_V1
:
510 return sprintf(buf
, "Mitigation: OSB (observable speculation barrier, Intel v6)\n");
512 case X86_BUG_SPECTRE_V2
:
513 return sprintf(buf
, "%s%s\n", spectre_v2_strings
[spectre_v2_enabled
], ibpb_inuse
? ", IBPB (Intel v4)" : "");
515 case X86_BUG_SPEC_STORE_BYPASS
:
516 return sprintf(buf
, "%s\n", ssb_strings
[ssb_mode
]);
522 return sprintf(buf
, "Vulnerable\n");
525 ssize_t
cpu_show_meltdown(struct device
*dev
, struct device_attribute
*attr
,
528 return cpu_show_common(dev
, attr
, buf
, X86_BUG_CPU_MELTDOWN
);
531 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
,
534 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V1
);
537 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
,
540 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V2
);
543 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
545 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPEC_STORE_BYPASS
);