1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18 #include <linux/pgtable.h>
19 #include <linux/bpf.h>
21 #include <asm/spec-ctrl.h>
22 #include <asm/cmdline.h>
24 #include <asm/processor.h>
25 #include <asm/processor-flags.h>
26 #include <asm/fpu/api.h>
29 #include <asm/paravirt.h>
30 #include <asm/alternative.h>
31 #include <asm/set_memory.h>
32 #include <asm/intel-family.h>
33 #include <asm/e820/api.h>
34 #include <asm/hypervisor.h>
35 #include <asm/tlbflush.h>
39 static void __init
spectre_v1_select_mitigation(void);
40 static void __init
spectre_v2_select_mitigation(void);
41 static void __init
ssb_select_mitigation(void);
42 static void __init
l1tf_select_mitigation(void);
43 static void __init
mds_select_mitigation(void);
44 static void __init
md_clear_update_mitigation(void);
45 static void __init
md_clear_select_mitigation(void);
46 static void __init
taa_select_mitigation(void);
47 static void __init
mmio_select_mitigation(void);
48 static void __init
srbds_select_mitigation(void);
49 static void __init
l1d_flush_select_mitigation(void);
51 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
52 u64 x86_spec_ctrl_base
;
53 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base
);
54 static DEFINE_MUTEX(spec_ctrl_mutex
);
57 * The vendor and possibly platform specific bits which can be modified in
60 static u64 __ro_after_init x86_spec_ctrl_mask
= SPEC_CTRL_IBRS
;
63 * AMD specific MSR info for Speculative Store Bypass control.
64 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
66 u64 __ro_after_init x86_amd_ls_cfg_base
;
67 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask
;
69 /* Control conditional STIBP in switch_to() */
70 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp
);
71 /* Control conditional IBPB in switch_mm() */
72 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb
);
73 /* Control unconditional IBPB in switch_mm() */
74 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb
);
76 /* Control MDS CPU buffer clear before returning to user space */
77 DEFINE_STATIC_KEY_FALSE(mds_user_clear
);
78 EXPORT_SYMBOL_GPL(mds_user_clear
);
79 /* Control MDS CPU buffer clear before idling (halt, mwait) */
80 DEFINE_STATIC_KEY_FALSE(mds_idle_clear
);
81 EXPORT_SYMBOL_GPL(mds_idle_clear
);
84 * Controls whether l1d flush based mitigations are enabled,
85 * based on hw features and admin setting via boot parameter
88 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush
);
90 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
91 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear
);
92 EXPORT_SYMBOL_GPL(mmio_stale_data_clear
);
94 void __init
check_bugs(void)
99 * identify_boot_cpu() initialized SMT support information, let the
102 cpu_smt_check_topology();
104 if (!IS_ENABLED(CONFIG_SMP
)) {
106 print_cpu_info(&boot_cpu_data
);
110 * Read the SPEC_CTRL MSR to account for reserved bits which may
111 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
112 * init code as it is not enumerated and depends on the family.
114 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
115 rdmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
117 /* Allow STIBP in MSR_SPEC_CTRL if supported */
118 if (boot_cpu_has(X86_FEATURE_STIBP
))
119 x86_spec_ctrl_mask
|= SPEC_CTRL_STIBP
;
121 /* Select the proper CPU mitigations before patching alternatives: */
122 spectre_v1_select_mitigation();
123 spectre_v2_select_mitigation();
124 ssb_select_mitigation();
125 l1tf_select_mitigation();
126 md_clear_select_mitigation();
127 srbds_select_mitigation();
128 l1d_flush_select_mitigation();
134 * Check whether we are able to run this kernel safely on SMP.
136 * - i386 is no longer supported.
137 * - In order to run on anything without a TSC, we need to be
138 * compiled for a i486.
140 if (boot_cpu_data
.x86
< 4)
141 panic("Kernel requires i486+ for 'invlpg' and other features");
143 init_utsname()->machine
[1] =
144 '0' + (boot_cpu_data
.x86
> 6 ? 6 : boot_cpu_data
.x86
);
145 alternative_instructions();
147 fpu__init_check_bugs();
148 #else /* CONFIG_X86_64 */
149 alternative_instructions();
152 * Make sure the first 2MB area is not mapped by huge pages
153 * There are typically fixed size MTRRs in there and overlapping
154 * MTRRs into large pages causes slow downs.
156 * Right now we don't do that with gbpages because there seems
157 * very little benefit for that case.
160 set_memory_4k((unsigned long)__va(0), 1);
165 x86_virt_spec_ctrl(u64 guest_spec_ctrl
, u64 guest_virt_spec_ctrl
, bool setguest
)
167 u64 msrval
, guestval
, hostval
= x86_spec_ctrl_base
;
168 struct thread_info
*ti
= current_thread_info();
170 /* Is MSR_SPEC_CTRL implemented ? */
171 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
)) {
173 * Restrict guest_spec_ctrl to supported values. Clear the
174 * modifiable bits in the host base value and or the
175 * modifiable bits from the guest value.
177 guestval
= hostval
& ~x86_spec_ctrl_mask
;
178 guestval
|= guest_spec_ctrl
& x86_spec_ctrl_mask
;
180 /* SSBD controlled in MSR_SPEC_CTRL */
181 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
182 static_cpu_has(X86_FEATURE_AMD_SSBD
))
183 hostval
|= ssbd_tif_to_spec_ctrl(ti
->flags
);
185 /* Conditional STIBP enabled? */
186 if (static_branch_unlikely(&switch_to_cond_stibp
))
187 hostval
|= stibp_tif_to_spec_ctrl(ti
->flags
);
189 if (hostval
!= guestval
) {
190 msrval
= setguest
? guestval
: hostval
;
191 wrmsrl(MSR_IA32_SPEC_CTRL
, msrval
);
196 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
197 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
199 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD
) &&
200 !static_cpu_has(X86_FEATURE_VIRT_SSBD
))
204 * If the host has SSBD mitigation enabled, force it in the host's
205 * virtual MSR value. If its not permanently enabled, evaluate
206 * current's TIF_SSBD thread flag.
208 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
))
209 hostval
= SPEC_CTRL_SSBD
;
211 hostval
= ssbd_tif_to_spec_ctrl(ti
->flags
);
213 /* Sanitize the guest value */
214 guestval
= guest_virt_spec_ctrl
& SPEC_CTRL_SSBD
;
216 if (hostval
!= guestval
) {
219 tif
= setguest
? ssbd_spec_ctrl_to_tif(guestval
) :
220 ssbd_spec_ctrl_to_tif(hostval
);
222 speculation_ctrl_update(tif
);
225 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl
);
227 static void x86_amd_ssb_disable(void)
229 u64 msrval
= x86_amd_ls_cfg_base
| x86_amd_ls_cfg_ssbd_mask
;
231 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD
))
232 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, SPEC_CTRL_SSBD
);
233 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
))
234 wrmsrl(MSR_AMD64_LS_CFG
, msrval
);
238 #define pr_fmt(fmt) "MDS: " fmt
240 /* Default mitigation for MDS-affected CPUs */
241 static enum mds_mitigations mds_mitigation __ro_after_init
= MDS_MITIGATION_FULL
;
242 static bool mds_nosmt __ro_after_init
= false;
244 static const char * const mds_strings
[] = {
245 [MDS_MITIGATION_OFF
] = "Vulnerable",
246 [MDS_MITIGATION_FULL
] = "Mitigation: Clear CPU buffers",
247 [MDS_MITIGATION_VMWERV
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
250 static void __init
mds_select_mitigation(void)
252 if (!boot_cpu_has_bug(X86_BUG_MDS
) || cpu_mitigations_off()) {
253 mds_mitigation
= MDS_MITIGATION_OFF
;
257 if (mds_mitigation
== MDS_MITIGATION_FULL
) {
258 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR
))
259 mds_mitigation
= MDS_MITIGATION_VMWERV
;
261 static_branch_enable(&mds_user_clear
);
263 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY
) &&
264 (mds_nosmt
|| cpu_mitigations_auto_nosmt()))
265 cpu_smt_disable(false);
269 static int __init
mds_cmdline(char *str
)
271 if (!boot_cpu_has_bug(X86_BUG_MDS
))
277 if (!strcmp(str
, "off"))
278 mds_mitigation
= MDS_MITIGATION_OFF
;
279 else if (!strcmp(str
, "full"))
280 mds_mitigation
= MDS_MITIGATION_FULL
;
281 else if (!strcmp(str
, "full,nosmt")) {
282 mds_mitigation
= MDS_MITIGATION_FULL
;
288 early_param("mds", mds_cmdline
);
291 #define pr_fmt(fmt) "TAA: " fmt
293 enum taa_mitigations
{
295 TAA_MITIGATION_UCODE_NEEDED
,
297 TAA_MITIGATION_TSX_DISABLED
,
300 /* Default mitigation for TAA-affected CPUs */
301 static enum taa_mitigations taa_mitigation __ro_after_init
= TAA_MITIGATION_VERW
;
302 static bool taa_nosmt __ro_after_init
;
304 static const char * const taa_strings
[] = {
305 [TAA_MITIGATION_OFF
] = "Vulnerable",
306 [TAA_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
307 [TAA_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
308 [TAA_MITIGATION_TSX_DISABLED
] = "Mitigation: TSX disabled",
311 static void __init
taa_select_mitigation(void)
315 if (!boot_cpu_has_bug(X86_BUG_TAA
)) {
316 taa_mitigation
= TAA_MITIGATION_OFF
;
320 /* TSX previously disabled by tsx=off */
321 if (!boot_cpu_has(X86_FEATURE_RTM
)) {
322 taa_mitigation
= TAA_MITIGATION_TSX_DISABLED
;
326 if (cpu_mitigations_off()) {
327 taa_mitigation
= TAA_MITIGATION_OFF
;
332 * TAA mitigation via VERW is turned off if both
333 * tsx_async_abort=off and mds=off are specified.
335 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
336 mds_mitigation
== MDS_MITIGATION_OFF
)
339 if (boot_cpu_has(X86_FEATURE_MD_CLEAR
))
340 taa_mitigation
= TAA_MITIGATION_VERW
;
342 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
345 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
346 * A microcode update fixes this behavior to clear CPU buffers. It also
347 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
348 * ARCH_CAP_TSX_CTRL_MSR bit.
350 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
351 * update is required.
353 ia32_cap
= x86_read_arch_cap_msr();
354 if ( (ia32_cap
& ARCH_CAP_MDS_NO
) &&
355 !(ia32_cap
& ARCH_CAP_TSX_CTRL_MSR
))
356 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
359 * TSX is enabled, select alternate mitigation for TAA which is
360 * the same as MDS. Enable MDS static branch to clear CPU buffers.
362 * For guests that can't determine whether the correct microcode is
363 * present on host, enable the mitigation for UCODE_NEEDED as well.
365 static_branch_enable(&mds_user_clear
);
367 if (taa_nosmt
|| cpu_mitigations_auto_nosmt())
368 cpu_smt_disable(false);
371 static int __init
tsx_async_abort_parse_cmdline(char *str
)
373 if (!boot_cpu_has_bug(X86_BUG_TAA
))
379 if (!strcmp(str
, "off")) {
380 taa_mitigation
= TAA_MITIGATION_OFF
;
381 } else if (!strcmp(str
, "full")) {
382 taa_mitigation
= TAA_MITIGATION_VERW
;
383 } else if (!strcmp(str
, "full,nosmt")) {
384 taa_mitigation
= TAA_MITIGATION_VERW
;
390 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline
);
393 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
395 enum mmio_mitigations
{
397 MMIO_MITIGATION_UCODE_NEEDED
,
398 MMIO_MITIGATION_VERW
,
401 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
402 static enum mmio_mitigations mmio_mitigation __ro_after_init
= MMIO_MITIGATION_VERW
;
403 static bool mmio_nosmt __ro_after_init
= false;
405 static const char * const mmio_strings
[] = {
406 [MMIO_MITIGATION_OFF
] = "Vulnerable",
407 [MMIO_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
408 [MMIO_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
411 static void __init
mmio_select_mitigation(void)
415 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
) ||
416 cpu_mitigations_off()) {
417 mmio_mitigation
= MMIO_MITIGATION_OFF
;
421 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
424 ia32_cap
= x86_read_arch_cap_msr();
427 * Enable CPU buffer clear mitigation for host and VMM, if also affected
428 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
430 if (boot_cpu_has_bug(X86_BUG_MDS
) || (boot_cpu_has_bug(X86_BUG_TAA
) &&
431 boot_cpu_has(X86_FEATURE_RTM
)))
432 static_branch_enable(&mds_user_clear
);
434 static_branch_enable(&mmio_stale_data_clear
);
437 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
438 * be propagated to uncore buffers, clearing the Fill buffers on idle
439 * is required irrespective of SMT state.
441 if (!(ia32_cap
& ARCH_CAP_FBSDP_NO
))
442 static_branch_enable(&mds_idle_clear
);
445 * Check if the system has the right microcode.
447 * CPU Fill buffer clear mitigation is enumerated by either an explicit
448 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
451 if ((ia32_cap
& ARCH_CAP_FB_CLEAR
) ||
452 (boot_cpu_has(X86_FEATURE_MD_CLEAR
) &&
453 boot_cpu_has(X86_FEATURE_FLUSH_L1D
) &&
454 !(ia32_cap
& ARCH_CAP_MDS_NO
)))
455 mmio_mitigation
= MMIO_MITIGATION_VERW
;
457 mmio_mitigation
= MMIO_MITIGATION_UCODE_NEEDED
;
459 if (mmio_nosmt
|| cpu_mitigations_auto_nosmt())
460 cpu_smt_disable(false);
463 static int __init
mmio_stale_data_parse_cmdline(char *str
)
465 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
471 if (!strcmp(str
, "off")) {
472 mmio_mitigation
= MMIO_MITIGATION_OFF
;
473 } else if (!strcmp(str
, "full")) {
474 mmio_mitigation
= MMIO_MITIGATION_VERW
;
475 } else if (!strcmp(str
, "full,nosmt")) {
476 mmio_mitigation
= MMIO_MITIGATION_VERW
;
482 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline
);
485 #define pr_fmt(fmt) "" fmt
487 static void __init
md_clear_update_mitigation(void)
489 if (cpu_mitigations_off())
492 if (!static_key_enabled(&mds_user_clear
))
496 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
497 * mitigation, if necessary.
499 if (mds_mitigation
== MDS_MITIGATION_OFF
&&
500 boot_cpu_has_bug(X86_BUG_MDS
)) {
501 mds_mitigation
= MDS_MITIGATION_FULL
;
502 mds_select_mitigation();
504 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
505 boot_cpu_has_bug(X86_BUG_TAA
)) {
506 taa_mitigation
= TAA_MITIGATION_VERW
;
507 taa_select_mitigation();
509 if (mmio_mitigation
== MMIO_MITIGATION_OFF
&&
510 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
)) {
511 mmio_mitigation
= MMIO_MITIGATION_VERW
;
512 mmio_select_mitigation();
515 if (boot_cpu_has_bug(X86_BUG_MDS
))
516 pr_info("MDS: %s\n", mds_strings
[mds_mitigation
]);
517 if (boot_cpu_has_bug(X86_BUG_TAA
))
518 pr_info("TAA: %s\n", taa_strings
[taa_mitigation
]);
519 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
520 pr_info("MMIO Stale Data: %s\n", mmio_strings
[mmio_mitigation
]);
523 static void __init
md_clear_select_mitigation(void)
525 mds_select_mitigation();
526 taa_select_mitigation();
527 mmio_select_mitigation();
530 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
531 * and print their mitigation after MDS, TAA and MMIO Stale Data
532 * mitigation selection is done.
534 md_clear_update_mitigation();
538 #define pr_fmt(fmt) "SRBDS: " fmt
540 enum srbds_mitigations
{
541 SRBDS_MITIGATION_OFF
,
542 SRBDS_MITIGATION_UCODE_NEEDED
,
543 SRBDS_MITIGATION_FULL
,
544 SRBDS_MITIGATION_TSX_OFF
,
545 SRBDS_MITIGATION_HYPERVISOR
,
548 static enum srbds_mitigations srbds_mitigation __ro_after_init
= SRBDS_MITIGATION_FULL
;
550 static const char * const srbds_strings
[] = {
551 [SRBDS_MITIGATION_OFF
] = "Vulnerable",
552 [SRBDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
553 [SRBDS_MITIGATION_FULL
] = "Mitigation: Microcode",
554 [SRBDS_MITIGATION_TSX_OFF
] = "Mitigation: TSX disabled",
555 [SRBDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
558 static bool srbds_off
;
560 void update_srbds_msr(void)
564 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
567 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
570 if (srbds_mitigation
== SRBDS_MITIGATION_UCODE_NEEDED
)
573 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
575 switch (srbds_mitigation
) {
576 case SRBDS_MITIGATION_OFF
:
577 case SRBDS_MITIGATION_TSX_OFF
:
578 mcu_ctrl
|= RNGDS_MITG_DIS
;
580 case SRBDS_MITIGATION_FULL
:
581 mcu_ctrl
&= ~RNGDS_MITG_DIS
;
587 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
590 static void __init
srbds_select_mitigation(void)
594 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
598 * Check to see if this is one of the MDS_NO systems supporting
599 * TSX that are only exposed to SRBDS when TSX is enabled.
601 ia32_cap
= x86_read_arch_cap_msr();
602 if ((ia32_cap
& ARCH_CAP_MDS_NO
) && !boot_cpu_has(X86_FEATURE_RTM
))
603 srbds_mitigation
= SRBDS_MITIGATION_TSX_OFF
;
604 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
605 srbds_mitigation
= SRBDS_MITIGATION_HYPERVISOR
;
606 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
607 srbds_mitigation
= SRBDS_MITIGATION_UCODE_NEEDED
;
608 else if (cpu_mitigations_off() || srbds_off
)
609 srbds_mitigation
= SRBDS_MITIGATION_OFF
;
612 pr_info("%s\n", srbds_strings
[srbds_mitigation
]);
615 static int __init
srbds_parse_cmdline(char *str
)
620 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
623 srbds_off
= !strcmp(str
, "off");
626 early_param("srbds", srbds_parse_cmdline
);
629 #define pr_fmt(fmt) "L1D Flush : " fmt
631 enum l1d_flush_mitigations
{
636 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata
= L1D_FLUSH_OFF
;
638 static void __init
l1d_flush_select_mitigation(void)
640 if (!l1d_flush_mitigation
|| !boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
643 static_branch_enable(&switch_mm_cond_l1d_flush
);
644 pr_info("Conditional flush on switch_mm() enabled\n");
647 static int __init
l1d_flush_parse_cmdline(char *str
)
649 if (!strcmp(str
, "on"))
650 l1d_flush_mitigation
= L1D_FLUSH_ON
;
654 early_param("l1d_flush", l1d_flush_parse_cmdline
);
657 #define pr_fmt(fmt) "Spectre V1 : " fmt
659 enum spectre_v1_mitigation
{
660 SPECTRE_V1_MITIGATION_NONE
,
661 SPECTRE_V1_MITIGATION_AUTO
,
664 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init
=
665 SPECTRE_V1_MITIGATION_AUTO
;
667 static const char * const spectre_v1_strings
[] = {
668 [SPECTRE_V1_MITIGATION_NONE
] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
669 [SPECTRE_V1_MITIGATION_AUTO
] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
673 * Does SMAP provide full mitigation against speculative kernel access to
676 static bool smap_works_speculatively(void)
678 if (!boot_cpu_has(X86_FEATURE_SMAP
))
682 * On CPUs which are vulnerable to Meltdown, SMAP does not
683 * prevent speculative access to user data in the L1 cache.
684 * Consider SMAP to be non-functional as a mitigation on these
687 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN
))
693 static void __init
spectre_v1_select_mitigation(void)
695 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1
) || cpu_mitigations_off()) {
696 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
700 if (spectre_v1_mitigation
== SPECTRE_V1_MITIGATION_AUTO
) {
702 * With Spectre v1, a user can speculatively control either
703 * path of a conditional swapgs with a user-controlled GS
704 * value. The mitigation is to add lfences to both code paths.
706 * If FSGSBASE is enabled, the user can put a kernel address in
707 * GS, in which case SMAP provides no protection.
709 * If FSGSBASE is disabled, the user can only put a user space
710 * address in GS. That makes an attack harder, but still
711 * possible if there's no SMAP protection.
713 if (boot_cpu_has(X86_FEATURE_FSGSBASE
) ||
714 !smap_works_speculatively()) {
716 * Mitigation can be provided from SWAPGS itself or
717 * PTI as the CR3 write in the Meltdown mitigation
720 * If neither is there, mitigate with an LFENCE to
721 * stop speculation through swapgs.
723 if (boot_cpu_has_bug(X86_BUG_SWAPGS
) &&
724 !boot_cpu_has(X86_FEATURE_PTI
))
725 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER
);
728 * Enable lfences in the kernel entry (non-swapgs)
729 * paths, to prevent user entry from speculatively
732 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL
);
736 pr_info("%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
739 static int __init
nospectre_v1_cmdline(char *str
)
741 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
744 early_param("nospectre_v1", nospectre_v1_cmdline
);
747 #define pr_fmt(fmt) "Spectre V2 : " fmt
749 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init
=
752 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init
=
753 SPECTRE_V2_USER_NONE
;
754 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init
=
755 SPECTRE_V2_USER_NONE
;
757 #ifdef CONFIG_RETPOLINE
758 static bool spectre_v2_bad_module
;
760 bool retpoline_module_ok(bool has_retpoline
)
762 if (spectre_v2_enabled
== SPECTRE_V2_NONE
|| has_retpoline
)
765 pr_err("System may be vulnerable to spectre v2\n");
766 spectre_v2_bad_module
= true;
770 static inline const char *spectre_v2_module_string(void)
772 return spectre_v2_bad_module
? " - vulnerable module loaded" : "";
775 static inline const char *spectre_v2_module_string(void) { return ""; }
778 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
779 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
780 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
782 #ifdef CONFIG_BPF_SYSCALL
783 void unpriv_ebpf_notify(int new_state
)
788 /* Unprivileged eBPF is enabled */
790 switch (spectre_v2_enabled
) {
791 case SPECTRE_V2_EIBRS
:
792 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
794 case SPECTRE_V2_EIBRS_LFENCE
:
795 if (sched_smt_active())
796 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
804 static inline bool match_option(const char *arg
, int arglen
, const char *opt
)
806 int len
= strlen(opt
);
808 return len
== arglen
&& !strncmp(arg
, opt
, len
);
811 /* The kernel command line selection for spectre v2 */
812 enum spectre_v2_mitigation_cmd
{
815 SPECTRE_V2_CMD_FORCE
,
816 SPECTRE_V2_CMD_RETPOLINE
,
817 SPECTRE_V2_CMD_RETPOLINE_GENERIC
,
818 SPECTRE_V2_CMD_RETPOLINE_LFENCE
,
819 SPECTRE_V2_CMD_EIBRS
,
820 SPECTRE_V2_CMD_EIBRS_RETPOLINE
,
821 SPECTRE_V2_CMD_EIBRS_LFENCE
,
824 enum spectre_v2_user_cmd
{
825 SPECTRE_V2_USER_CMD_NONE
,
826 SPECTRE_V2_USER_CMD_AUTO
,
827 SPECTRE_V2_USER_CMD_FORCE
,
828 SPECTRE_V2_USER_CMD_PRCTL
,
829 SPECTRE_V2_USER_CMD_PRCTL_IBPB
,
830 SPECTRE_V2_USER_CMD_SECCOMP
,
831 SPECTRE_V2_USER_CMD_SECCOMP_IBPB
,
834 static const char * const spectre_v2_user_strings
[] = {
835 [SPECTRE_V2_USER_NONE
] = "User space: Vulnerable",
836 [SPECTRE_V2_USER_STRICT
] = "User space: Mitigation: STIBP protection",
837 [SPECTRE_V2_USER_STRICT_PREFERRED
] = "User space: Mitigation: STIBP always-on protection",
838 [SPECTRE_V2_USER_PRCTL
] = "User space: Mitigation: STIBP via prctl",
839 [SPECTRE_V2_USER_SECCOMP
] = "User space: Mitigation: STIBP via seccomp and prctl",
842 static const struct {
844 enum spectre_v2_user_cmd cmd
;
846 } v2_user_options
[] __initconst
= {
847 { "auto", SPECTRE_V2_USER_CMD_AUTO
, false },
848 { "off", SPECTRE_V2_USER_CMD_NONE
, false },
849 { "on", SPECTRE_V2_USER_CMD_FORCE
, true },
850 { "prctl", SPECTRE_V2_USER_CMD_PRCTL
, false },
851 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB
, false },
852 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP
, false },
853 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB
, false },
856 static void __init
spec_v2_user_print_cond(const char *reason
, bool secure
)
858 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
859 pr_info("spectre_v2_user=%s forced on command line.\n", reason
);
862 static enum spectre_v2_user_cmd __init
863 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd
)
869 case SPECTRE_V2_CMD_NONE
:
870 return SPECTRE_V2_USER_CMD_NONE
;
871 case SPECTRE_V2_CMD_FORCE
:
872 return SPECTRE_V2_USER_CMD_FORCE
;
877 ret
= cmdline_find_option(boot_command_line
, "spectre_v2_user",
880 return SPECTRE_V2_USER_CMD_AUTO
;
882 for (i
= 0; i
< ARRAY_SIZE(v2_user_options
); i
++) {
883 if (match_option(arg
, ret
, v2_user_options
[i
].option
)) {
884 spec_v2_user_print_cond(v2_user_options
[i
].option
,
885 v2_user_options
[i
].secure
);
886 return v2_user_options
[i
].cmd
;
890 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg
);
891 return SPECTRE_V2_USER_CMD_AUTO
;
894 static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode
)
896 return (mode
== SPECTRE_V2_EIBRS
||
897 mode
== SPECTRE_V2_EIBRS_RETPOLINE
||
898 mode
== SPECTRE_V2_EIBRS_LFENCE
);
902 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd
)
904 enum spectre_v2_user_mitigation mode
= SPECTRE_V2_USER_NONE
;
905 bool smt_possible
= IS_ENABLED(CONFIG_SMP
);
906 enum spectre_v2_user_cmd cmd
;
908 if (!boot_cpu_has(X86_FEATURE_IBPB
) && !boot_cpu_has(X86_FEATURE_STIBP
))
911 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
912 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
913 smt_possible
= false;
915 cmd
= spectre_v2_parse_user_cmdline(v2_cmd
);
917 case SPECTRE_V2_USER_CMD_NONE
:
919 case SPECTRE_V2_USER_CMD_FORCE
:
920 mode
= SPECTRE_V2_USER_STRICT
;
922 case SPECTRE_V2_USER_CMD_PRCTL
:
923 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
924 mode
= SPECTRE_V2_USER_PRCTL
;
926 case SPECTRE_V2_USER_CMD_AUTO
:
927 case SPECTRE_V2_USER_CMD_SECCOMP
:
928 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
929 if (IS_ENABLED(CONFIG_SECCOMP
))
930 mode
= SPECTRE_V2_USER_SECCOMP
;
932 mode
= SPECTRE_V2_USER_PRCTL
;
936 /* Initialize Indirect Branch Prediction Barrier */
937 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
938 setup_force_cpu_cap(X86_FEATURE_USE_IBPB
);
940 spectre_v2_user_ibpb
= mode
;
942 case SPECTRE_V2_USER_CMD_FORCE
:
943 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
944 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
945 static_branch_enable(&switch_mm_always_ibpb
);
946 spectre_v2_user_ibpb
= SPECTRE_V2_USER_STRICT
;
948 case SPECTRE_V2_USER_CMD_PRCTL
:
949 case SPECTRE_V2_USER_CMD_AUTO
:
950 case SPECTRE_V2_USER_CMD_SECCOMP
:
951 static_branch_enable(&switch_mm_cond_ibpb
);
957 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
958 static_key_enabled(&switch_mm_always_ibpb
) ?
959 "always-on" : "conditional");
963 * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
966 if (!boot_cpu_has(X86_FEATURE_STIBP
) ||
968 spectre_v2_in_eibrs_mode(spectre_v2_enabled
))
972 * At this point, an STIBP mode other than "off" has been set.
973 * If STIBP support is not being forced, check if STIBP always-on
976 if (mode
!= SPECTRE_V2_USER_STRICT
&&
977 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON
))
978 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
980 spectre_v2_user_stibp
= mode
;
983 pr_info("%s\n", spectre_v2_user_strings
[mode
]);
986 static const char * const spectre_v2_strings
[] = {
987 [SPECTRE_V2_NONE
] = "Vulnerable",
988 [SPECTRE_V2_RETPOLINE
] = "Mitigation: Retpolines",
989 [SPECTRE_V2_LFENCE
] = "Mitigation: LFENCE",
990 [SPECTRE_V2_EIBRS
] = "Mitigation: Enhanced IBRS",
991 [SPECTRE_V2_EIBRS_LFENCE
] = "Mitigation: Enhanced IBRS + LFENCE",
992 [SPECTRE_V2_EIBRS_RETPOLINE
] = "Mitigation: Enhanced IBRS + Retpolines",
995 static const struct {
997 enum spectre_v2_mitigation_cmd cmd
;
999 } mitigation_options
[] __initconst
= {
1000 { "off", SPECTRE_V2_CMD_NONE
, false },
1001 { "on", SPECTRE_V2_CMD_FORCE
, true },
1002 { "retpoline", SPECTRE_V2_CMD_RETPOLINE
, false },
1003 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1004 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1005 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC
, false },
1006 { "eibrs", SPECTRE_V2_CMD_EIBRS
, false },
1007 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE
, false },
1008 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE
, false },
1009 { "auto", SPECTRE_V2_CMD_AUTO
, false },
1012 static void __init
spec_v2_print_cond(const char *reason
, bool secure
)
1014 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
1015 pr_info("%s selected on command line.\n", reason
);
1018 static enum spectre_v2_mitigation_cmd __init
spectre_v2_parse_cmdline(void)
1020 enum spectre_v2_mitigation_cmd cmd
= SPECTRE_V2_CMD_AUTO
;
1024 if (cmdline_find_option_bool(boot_command_line
, "nospectre_v2") ||
1025 cpu_mitigations_off())
1026 return SPECTRE_V2_CMD_NONE
;
1028 ret
= cmdline_find_option(boot_command_line
, "spectre_v2", arg
, sizeof(arg
));
1030 return SPECTRE_V2_CMD_AUTO
;
1032 for (i
= 0; i
< ARRAY_SIZE(mitigation_options
); i
++) {
1033 if (!match_option(arg
, ret
, mitigation_options
[i
].option
))
1035 cmd
= mitigation_options
[i
].cmd
;
1039 if (i
>= ARRAY_SIZE(mitigation_options
)) {
1040 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1041 return SPECTRE_V2_CMD_AUTO
;
1044 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE
||
1045 cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1046 cmd
== SPECTRE_V2_CMD_RETPOLINE_GENERIC
||
1047 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1048 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1049 !IS_ENABLED(CONFIG_RETPOLINE
)) {
1050 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1051 mitigation_options
[i
].option
);
1052 return SPECTRE_V2_CMD_AUTO
;
1055 if ((cmd
== SPECTRE_V2_CMD_EIBRS
||
1056 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1057 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1058 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1059 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
1060 mitigation_options
[i
].option
);
1061 return SPECTRE_V2_CMD_AUTO
;
1064 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1065 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
) &&
1066 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC
)) {
1067 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1068 mitigation_options
[i
].option
);
1069 return SPECTRE_V2_CMD_AUTO
;
1072 spec_v2_print_cond(mitigation_options
[i
].option
,
1073 mitigation_options
[i
].secure
);
1077 static enum spectre_v2_mitigation __init
spectre_v2_select_retpoline(void)
1079 if (!IS_ENABLED(CONFIG_RETPOLINE
)) {
1080 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1081 return SPECTRE_V2_NONE
;
1084 return SPECTRE_V2_RETPOLINE
;
1087 static void __init
spectre_v2_select_mitigation(void)
1089 enum spectre_v2_mitigation_cmd cmd
= spectre_v2_parse_cmdline();
1090 enum spectre_v2_mitigation mode
= SPECTRE_V2_NONE
;
1093 * If the CPU is not affected and the command line mode is NONE or AUTO
1094 * then nothing to do.
1096 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) &&
1097 (cmd
== SPECTRE_V2_CMD_NONE
|| cmd
== SPECTRE_V2_CMD_AUTO
))
1101 case SPECTRE_V2_CMD_NONE
:
1104 case SPECTRE_V2_CMD_FORCE
:
1105 case SPECTRE_V2_CMD_AUTO
:
1106 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1107 mode
= SPECTRE_V2_EIBRS
;
1111 mode
= spectre_v2_select_retpoline();
1114 case SPECTRE_V2_CMD_RETPOLINE_LFENCE
:
1115 pr_err(SPECTRE_V2_LFENCE_MSG
);
1116 mode
= SPECTRE_V2_LFENCE
;
1119 case SPECTRE_V2_CMD_RETPOLINE_GENERIC
:
1120 mode
= SPECTRE_V2_RETPOLINE
;
1123 case SPECTRE_V2_CMD_RETPOLINE
:
1124 mode
= spectre_v2_select_retpoline();
1127 case SPECTRE_V2_CMD_EIBRS
:
1128 mode
= SPECTRE_V2_EIBRS
;
1131 case SPECTRE_V2_CMD_EIBRS_LFENCE
:
1132 mode
= SPECTRE_V2_EIBRS_LFENCE
;
1135 case SPECTRE_V2_CMD_EIBRS_RETPOLINE
:
1136 mode
= SPECTRE_V2_EIBRS_RETPOLINE
;
1140 if (mode
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
1141 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
1143 if (spectre_v2_in_eibrs_mode(mode
)) {
1144 /* Force it so VMEXIT will restore correctly */
1145 x86_spec_ctrl_base
|= SPEC_CTRL_IBRS
;
1146 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1150 case SPECTRE_V2_NONE
:
1151 case SPECTRE_V2_EIBRS
:
1154 case SPECTRE_V2_LFENCE
:
1155 case SPECTRE_V2_EIBRS_LFENCE
:
1156 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE
);
1159 case SPECTRE_V2_RETPOLINE
:
1160 case SPECTRE_V2_EIBRS_RETPOLINE
:
1161 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
1165 spectre_v2_enabled
= mode
;
1166 pr_info("%s\n", spectre_v2_strings
[mode
]);
1169 * If spectre v2 protection has been enabled, unconditionally fill
1170 * RSB during a context switch; this protects against two independent
1173 * - RSB underflow (and switch to BTB) on Skylake+
1174 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
1176 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW
);
1177 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1180 * Retpoline means the kernel is safe because it has no indirect
1181 * branches. Enhanced IBRS protects firmware too, so, enable restricted
1182 * speculation around firmware calls only when Enhanced IBRS isn't
1185 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1186 * the user might select retpoline on the kernel command line and if
1187 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1188 * enable IBRS around firmware calls.
1190 if (boot_cpu_has(X86_FEATURE_IBRS
) && !spectre_v2_in_eibrs_mode(mode
)) {
1191 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW
);
1192 pr_info("Enabling Restricted Speculation for firmware calls\n");
1195 /* Set up IBPB and STIBP depending on the general spectre V2 command */
1196 spectre_v2_user_select_mitigation(cmd
);
1199 static void update_stibp_msr(void * __unused
)
1201 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1204 /* Update x86_spec_ctrl_base in case SMT state changed. */
1205 static void update_stibp_strict(void)
1207 u64 mask
= x86_spec_ctrl_base
& ~SPEC_CTRL_STIBP
;
1209 if (sched_smt_active())
1210 mask
|= SPEC_CTRL_STIBP
;
1212 if (mask
== x86_spec_ctrl_base
)
1215 pr_info("Update user space SMT mitigation: STIBP %s\n",
1216 mask
& SPEC_CTRL_STIBP
? "always-on" : "off");
1217 x86_spec_ctrl_base
= mask
;
1218 on_each_cpu(update_stibp_msr
, NULL
, 1);
1221 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1222 static void update_indir_branch_cond(void)
1224 if (sched_smt_active())
1225 static_branch_enable(&switch_to_cond_stibp
);
1227 static_branch_disable(&switch_to_cond_stibp
);
1231 #define pr_fmt(fmt) fmt
1233 /* Update the static key controlling the MDS CPU buffer clear in idle */
1234 static void update_mds_branch_idle(void)
1236 u64 ia32_cap
= x86_read_arch_cap_msr();
1239 * Enable the idle clearing if SMT is active on CPUs which are
1240 * affected only by MSBDS and not any other MDS variant.
1242 * The other variants cannot be mitigated when SMT is enabled, so
1243 * clearing the buffers on idle just to prevent the Store Buffer
1244 * repartitioning leak would be a window dressing exercise.
1246 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY
))
1249 if (sched_smt_active()) {
1250 static_branch_enable(&mds_idle_clear
);
1251 } else if (mmio_mitigation
== MMIO_MITIGATION_OFF
||
1252 (ia32_cap
& ARCH_CAP_FBSDP_NO
)) {
1253 static_branch_disable(&mds_idle_clear
);
1257 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1258 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1260 void cpu_bugs_smt_update(void)
1262 mutex_lock(&spec_ctrl_mutex
);
1264 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1265 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
1266 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
1268 switch (spectre_v2_user_stibp
) {
1269 case SPECTRE_V2_USER_NONE
:
1271 case SPECTRE_V2_USER_STRICT
:
1272 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1273 update_stibp_strict();
1275 case SPECTRE_V2_USER_PRCTL
:
1276 case SPECTRE_V2_USER_SECCOMP
:
1277 update_indir_branch_cond();
1281 switch (mds_mitigation
) {
1282 case MDS_MITIGATION_FULL
:
1283 case MDS_MITIGATION_VMWERV
:
1284 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY
))
1285 pr_warn_once(MDS_MSG_SMT
);
1286 update_mds_branch_idle();
1288 case MDS_MITIGATION_OFF
:
1292 switch (taa_mitigation
) {
1293 case TAA_MITIGATION_VERW
:
1294 case TAA_MITIGATION_UCODE_NEEDED
:
1295 if (sched_smt_active())
1296 pr_warn_once(TAA_MSG_SMT
);
1298 case TAA_MITIGATION_TSX_DISABLED
:
1299 case TAA_MITIGATION_OFF
:
1303 mutex_unlock(&spec_ctrl_mutex
);
1307 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1309 static enum ssb_mitigation ssb_mode __ro_after_init
= SPEC_STORE_BYPASS_NONE
;
1311 /* The kernel command line selection */
1312 enum ssb_mitigation_cmd
{
1313 SPEC_STORE_BYPASS_CMD_NONE
,
1314 SPEC_STORE_BYPASS_CMD_AUTO
,
1315 SPEC_STORE_BYPASS_CMD_ON
,
1316 SPEC_STORE_BYPASS_CMD_PRCTL
,
1317 SPEC_STORE_BYPASS_CMD_SECCOMP
,
1320 static const char * const ssb_strings
[] = {
1321 [SPEC_STORE_BYPASS_NONE
] = "Vulnerable",
1322 [SPEC_STORE_BYPASS_DISABLE
] = "Mitigation: Speculative Store Bypass disabled",
1323 [SPEC_STORE_BYPASS_PRCTL
] = "Mitigation: Speculative Store Bypass disabled via prctl",
1324 [SPEC_STORE_BYPASS_SECCOMP
] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1327 static const struct {
1329 enum ssb_mitigation_cmd cmd
;
1330 } ssb_mitigation_options
[] __initconst
= {
1331 { "auto", SPEC_STORE_BYPASS_CMD_AUTO
}, /* Platform decides */
1332 { "on", SPEC_STORE_BYPASS_CMD_ON
}, /* Disable Speculative Store Bypass */
1333 { "off", SPEC_STORE_BYPASS_CMD_NONE
}, /* Don't touch Speculative Store Bypass */
1334 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL
}, /* Disable Speculative Store Bypass via prctl */
1335 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP
}, /* Disable Speculative Store Bypass via prctl and seccomp */
1338 static enum ssb_mitigation_cmd __init
ssb_parse_cmdline(void)
1340 enum ssb_mitigation_cmd cmd
= SPEC_STORE_BYPASS_CMD_AUTO
;
1344 if (cmdline_find_option_bool(boot_command_line
, "nospec_store_bypass_disable") ||
1345 cpu_mitigations_off()) {
1346 return SPEC_STORE_BYPASS_CMD_NONE
;
1348 ret
= cmdline_find_option(boot_command_line
, "spec_store_bypass_disable",
1351 return SPEC_STORE_BYPASS_CMD_AUTO
;
1353 for (i
= 0; i
< ARRAY_SIZE(ssb_mitigation_options
); i
++) {
1354 if (!match_option(arg
, ret
, ssb_mitigation_options
[i
].option
))
1357 cmd
= ssb_mitigation_options
[i
].cmd
;
1361 if (i
>= ARRAY_SIZE(ssb_mitigation_options
)) {
1362 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1363 return SPEC_STORE_BYPASS_CMD_AUTO
;
1370 static enum ssb_mitigation __init
__ssb_select_mitigation(void)
1372 enum ssb_mitigation mode
= SPEC_STORE_BYPASS_NONE
;
1373 enum ssb_mitigation_cmd cmd
;
1375 if (!boot_cpu_has(X86_FEATURE_SSBD
))
1378 cmd
= ssb_parse_cmdline();
1379 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
) &&
1380 (cmd
== SPEC_STORE_BYPASS_CMD_NONE
||
1381 cmd
== SPEC_STORE_BYPASS_CMD_AUTO
))
1385 case SPEC_STORE_BYPASS_CMD_AUTO
:
1386 case SPEC_STORE_BYPASS_CMD_SECCOMP
:
1388 * Choose prctl+seccomp as the default mode if seccomp is
1391 if (IS_ENABLED(CONFIG_SECCOMP
))
1392 mode
= SPEC_STORE_BYPASS_SECCOMP
;
1394 mode
= SPEC_STORE_BYPASS_PRCTL
;
1396 case SPEC_STORE_BYPASS_CMD_ON
:
1397 mode
= SPEC_STORE_BYPASS_DISABLE
;
1399 case SPEC_STORE_BYPASS_CMD_PRCTL
:
1400 mode
= SPEC_STORE_BYPASS_PRCTL
;
1402 case SPEC_STORE_BYPASS_CMD_NONE
:
1407 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1408 * bit in the mask to allow guests to use the mitigation even in the
1409 * case where the host does not enable it.
1411 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
1412 static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1413 x86_spec_ctrl_mask
|= SPEC_CTRL_SSBD
;
1417 * We have three CPU feature flags that are in play here:
1418 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1419 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1420 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1422 if (mode
== SPEC_STORE_BYPASS_DISABLE
) {
1423 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
);
1425 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1426 * use a completely different MSR and bit dependent on family.
1428 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) &&
1429 !static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1430 x86_amd_ssb_disable();
1432 x86_spec_ctrl_base
|= SPEC_CTRL_SSBD
;
1433 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1440 static void ssb_select_mitigation(void)
1442 ssb_mode
= __ssb_select_mitigation();
1444 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1445 pr_info("%s\n", ssb_strings
[ssb_mode
]);
1449 #define pr_fmt(fmt) "Speculation prctl: " fmt
1451 static void task_update_spec_tif(struct task_struct
*tsk
)
1453 /* Force the update of the real TIF bits */
1454 set_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
);
1457 * Immediately update the speculation control MSRs for the current
1458 * task, but for a non-current task delay setting the CPU
1459 * mitigation until it is scheduled next.
1461 * This can only happen for SECCOMP mitigation. For PRCTL it's
1462 * always the current task.
1465 speculation_ctrl_update_current();
1468 static int l1d_flush_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1471 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
1475 case PR_SPEC_ENABLE
:
1476 set_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
1478 case PR_SPEC_DISABLE
:
1479 clear_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
1486 static int ssb_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1488 if (ssb_mode
!= SPEC_STORE_BYPASS_PRCTL
&&
1489 ssb_mode
!= SPEC_STORE_BYPASS_SECCOMP
)
1493 case PR_SPEC_ENABLE
:
1494 /* If speculation is force disabled, enable is not allowed */
1495 if (task_spec_ssb_force_disable(task
))
1497 task_clear_spec_ssb_disable(task
);
1498 task_clear_spec_ssb_noexec(task
);
1499 task_update_spec_tif(task
);
1501 case PR_SPEC_DISABLE
:
1502 task_set_spec_ssb_disable(task
);
1503 task_clear_spec_ssb_noexec(task
);
1504 task_update_spec_tif(task
);
1506 case PR_SPEC_FORCE_DISABLE
:
1507 task_set_spec_ssb_disable(task
);
1508 task_set_spec_ssb_force_disable(task
);
1509 task_clear_spec_ssb_noexec(task
);
1510 task_update_spec_tif(task
);
1512 case PR_SPEC_DISABLE_NOEXEC
:
1513 if (task_spec_ssb_force_disable(task
))
1515 task_set_spec_ssb_disable(task
);
1516 task_set_spec_ssb_noexec(task
);
1517 task_update_spec_tif(task
);
1525 static bool is_spec_ib_user_controlled(void)
1527 return spectre_v2_user_ibpb
== SPECTRE_V2_USER_PRCTL
||
1528 spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
1529 spectre_v2_user_stibp
== SPECTRE_V2_USER_PRCTL
||
1530 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
;
1533 static int ib_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1536 case PR_SPEC_ENABLE
:
1537 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1538 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1542 * With strict mode for both IBPB and STIBP, the instruction
1543 * code paths avoid checking this task flag and instead,
1544 * unconditionally run the instruction. However, STIBP and IBPB
1545 * are independent and either can be set to conditionally
1546 * enabled regardless of the mode of the other.
1548 * If either is set to conditional, allow the task flag to be
1549 * updated, unless it was force-disabled by a previous prctl
1550 * call. Currently, this is possible on an AMD CPU which has the
1551 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1552 * kernel is booted with 'spectre_v2_user=seccomp', then
1553 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1554 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1556 if (!is_spec_ib_user_controlled() ||
1557 task_spec_ib_force_disable(task
))
1560 task_clear_spec_ib_disable(task
);
1561 task_update_spec_tif(task
);
1563 case PR_SPEC_DISABLE
:
1564 case PR_SPEC_FORCE_DISABLE
:
1566 * Indirect branch speculation is always allowed when
1567 * mitigation is force disabled.
1569 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1570 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1573 if (!is_spec_ib_user_controlled())
1576 task_set_spec_ib_disable(task
);
1577 if (ctrl
== PR_SPEC_FORCE_DISABLE
)
1578 task_set_spec_ib_force_disable(task
);
1579 task_update_spec_tif(task
);
1587 int arch_prctl_spec_ctrl_set(struct task_struct
*task
, unsigned long which
,
1591 case PR_SPEC_STORE_BYPASS
:
1592 return ssb_prctl_set(task
, ctrl
);
1593 case PR_SPEC_INDIRECT_BRANCH
:
1594 return ib_prctl_set(task
, ctrl
);
1595 case PR_SPEC_L1D_FLUSH
:
1596 return l1d_flush_prctl_set(task
, ctrl
);
1602 #ifdef CONFIG_SECCOMP
1603 void arch_seccomp_spec_mitigate(struct task_struct
*task
)
1605 if (ssb_mode
== SPEC_STORE_BYPASS_SECCOMP
)
1606 ssb_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
1607 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
1608 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
)
1609 ib_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
1613 static int l1d_flush_prctl_get(struct task_struct
*task
)
1615 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
1616 return PR_SPEC_FORCE_DISABLE
;
1618 if (test_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
))
1619 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1621 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1624 static int ssb_prctl_get(struct task_struct
*task
)
1627 case SPEC_STORE_BYPASS_DISABLE
:
1628 return PR_SPEC_DISABLE
;
1629 case SPEC_STORE_BYPASS_SECCOMP
:
1630 case SPEC_STORE_BYPASS_PRCTL
:
1631 if (task_spec_ssb_force_disable(task
))
1632 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
1633 if (task_spec_ssb_noexec(task
))
1634 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE_NOEXEC
;
1635 if (task_spec_ssb_disable(task
))
1636 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1637 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1639 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1640 return PR_SPEC_ENABLE
;
1641 return PR_SPEC_NOT_AFFECTED
;
1645 static int ib_prctl_get(struct task_struct
*task
)
1647 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
))
1648 return PR_SPEC_NOT_AFFECTED
;
1650 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1651 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1652 return PR_SPEC_ENABLE
;
1653 else if (is_spec_ib_user_controlled()) {
1654 if (task_spec_ib_force_disable(task
))
1655 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
1656 if (task_spec_ib_disable(task
))
1657 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1658 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1659 } else if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_STRICT
||
1660 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
1661 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
)
1662 return PR_SPEC_DISABLE
;
1664 return PR_SPEC_NOT_AFFECTED
;
1667 int arch_prctl_spec_ctrl_get(struct task_struct
*task
, unsigned long which
)
1670 case PR_SPEC_STORE_BYPASS
:
1671 return ssb_prctl_get(task
);
1672 case PR_SPEC_INDIRECT_BRANCH
:
1673 return ib_prctl_get(task
);
1674 case PR_SPEC_L1D_FLUSH
:
1675 return l1d_flush_prctl_get(task
);
1681 void x86_spec_ctrl_setup_ap(void)
1683 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
1684 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1686 if (ssb_mode
== SPEC_STORE_BYPASS_DISABLE
)
1687 x86_amd_ssb_disable();
1690 bool itlb_multihit_kvm_mitigation
;
1691 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation
);
1694 #define pr_fmt(fmt) "L1TF: " fmt
1696 /* Default mitigation for L1TF-affected CPUs */
1697 enum l1tf_mitigations l1tf_mitigation __ro_after_init
= L1TF_MITIGATION_FLUSH
;
1698 #if IS_ENABLED(CONFIG_KVM_INTEL)
1699 EXPORT_SYMBOL_GPL(l1tf_mitigation
);
1701 enum vmx_l1d_flush_state l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_AUTO
;
1702 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation
);
1705 * These CPUs all support 44bits physical address space internally in the
1706 * cache but CPUID can report a smaller number of physical address bits.
1708 * The L1TF mitigation uses the top most address bit for the inversion of
1709 * non present PTEs. When the installed memory reaches into the top most
1710 * address bit due to memory holes, which has been observed on machines
1711 * which report 36bits physical address bits and have 32G RAM installed,
1712 * then the mitigation range check in l1tf_select_mitigation() triggers.
1713 * This is a false positive because the mitigation is still possible due to
1714 * the fact that the cache uses 44bit internally. Use the cache bits
1715 * instead of the reported physical bits and adjust them on the affected
1716 * machines to 44bit if the reported bits are less than 44.
1718 static void override_cache_bits(struct cpuinfo_x86
*c
)
1723 switch (c
->x86_model
) {
1724 case INTEL_FAM6_NEHALEM
:
1725 case INTEL_FAM6_WESTMERE
:
1726 case INTEL_FAM6_SANDYBRIDGE
:
1727 case INTEL_FAM6_IVYBRIDGE
:
1728 case INTEL_FAM6_HASWELL
:
1729 case INTEL_FAM6_HASWELL_L
:
1730 case INTEL_FAM6_HASWELL_G
:
1731 case INTEL_FAM6_BROADWELL
:
1732 case INTEL_FAM6_BROADWELL_G
:
1733 case INTEL_FAM6_SKYLAKE_L
:
1734 case INTEL_FAM6_SKYLAKE
:
1735 case INTEL_FAM6_KABYLAKE_L
:
1736 case INTEL_FAM6_KABYLAKE
:
1737 if (c
->x86_cache_bits
< 44)
1738 c
->x86_cache_bits
= 44;
1743 static void __init
l1tf_select_mitigation(void)
1747 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
1750 if (cpu_mitigations_off())
1751 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
1752 else if (cpu_mitigations_auto_nosmt())
1753 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
1755 override_cache_bits(&boot_cpu_data
);
1757 switch (l1tf_mitigation
) {
1758 case L1TF_MITIGATION_OFF
:
1759 case L1TF_MITIGATION_FLUSH_NOWARN
:
1760 case L1TF_MITIGATION_FLUSH
:
1762 case L1TF_MITIGATION_FLUSH_NOSMT
:
1763 case L1TF_MITIGATION_FULL
:
1764 cpu_smt_disable(false);
1766 case L1TF_MITIGATION_FULL_FORCE
:
1767 cpu_smt_disable(true);
1771 #if CONFIG_PGTABLE_LEVELS == 2
1772 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1776 half_pa
= (u64
)l1tf_pfn_limit() << PAGE_SHIFT
;
1777 if (l1tf_mitigation
!= L1TF_MITIGATION_OFF
&&
1778 e820__mapped_any(half_pa
, ULLONG_MAX
- half_pa
, E820_TYPE_RAM
)) {
1779 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1780 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1782 pr_info("However, doing so will make a part of your RAM unusable.\n");
1783 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1787 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV
);
1790 static int __init
l1tf_cmdline(char *str
)
1792 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
1798 if (!strcmp(str
, "off"))
1799 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
1800 else if (!strcmp(str
, "flush,nowarn"))
1801 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOWARN
;
1802 else if (!strcmp(str
, "flush"))
1803 l1tf_mitigation
= L1TF_MITIGATION_FLUSH
;
1804 else if (!strcmp(str
, "flush,nosmt"))
1805 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
1806 else if (!strcmp(str
, "full"))
1807 l1tf_mitigation
= L1TF_MITIGATION_FULL
;
1808 else if (!strcmp(str
, "full,force"))
1809 l1tf_mitigation
= L1TF_MITIGATION_FULL_FORCE
;
1813 early_param("l1tf", l1tf_cmdline
);
1816 #define pr_fmt(fmt) fmt
1820 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1822 #if IS_ENABLED(CONFIG_KVM_INTEL)
1823 static const char * const l1tf_vmx_states
[] = {
1824 [VMENTER_L1D_FLUSH_AUTO
] = "auto",
1825 [VMENTER_L1D_FLUSH_NEVER
] = "vulnerable",
1826 [VMENTER_L1D_FLUSH_COND
] = "conditional cache flushes",
1827 [VMENTER_L1D_FLUSH_ALWAYS
] = "cache flushes",
1828 [VMENTER_L1D_FLUSH_EPT_DISABLED
] = "EPT disabled",
1829 [VMENTER_L1D_FLUSH_NOT_REQUIRED
] = "flush not necessary"
1832 static ssize_t
l1tf_show_state(char *buf
)
1834 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_AUTO
)
1835 return sprintf(buf
, "%s\n", L1TF_DEFAULT_MSG
);
1837 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_EPT_DISABLED
||
1838 (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_NEVER
&&
1839 sched_smt_active())) {
1840 return sprintf(buf
, "%s; VMX: %s\n", L1TF_DEFAULT_MSG
,
1841 l1tf_vmx_states
[l1tf_vmx_mitigation
]);
1844 return sprintf(buf
, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG
,
1845 l1tf_vmx_states
[l1tf_vmx_mitigation
],
1846 sched_smt_active() ? "vulnerable" : "disabled");
1849 static ssize_t
itlb_multihit_show_state(char *buf
)
1851 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
1852 !boot_cpu_has(X86_FEATURE_VMX
))
1853 return sprintf(buf
, "KVM: Mitigation: VMX unsupported\n");
1854 else if (!(cr4_read_shadow() & X86_CR4_VMXE
))
1855 return sprintf(buf
, "KVM: Mitigation: VMX disabled\n");
1856 else if (itlb_multihit_kvm_mitigation
)
1857 return sprintf(buf
, "KVM: Mitigation: Split huge pages\n");
1859 return sprintf(buf
, "KVM: Vulnerable\n");
1862 static ssize_t
l1tf_show_state(char *buf
)
1864 return sprintf(buf
, "%s\n", L1TF_DEFAULT_MSG
);
1867 static ssize_t
itlb_multihit_show_state(char *buf
)
1869 return sprintf(buf
, "Processor vulnerable\n");
1873 static ssize_t
mds_show_state(char *buf
)
1875 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1876 return sprintf(buf
, "%s; SMT Host state unknown\n",
1877 mds_strings
[mds_mitigation
]);
1880 if (boot_cpu_has(X86_BUG_MSBDS_ONLY
)) {
1881 return sprintf(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
1882 (mds_mitigation
== MDS_MITIGATION_OFF
? "vulnerable" :
1883 sched_smt_active() ? "mitigated" : "disabled"));
1886 return sprintf(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
1887 sched_smt_active() ? "vulnerable" : "disabled");
1890 static ssize_t
tsx_async_abort_show_state(char *buf
)
1892 if ((taa_mitigation
== TAA_MITIGATION_TSX_DISABLED
) ||
1893 (taa_mitigation
== TAA_MITIGATION_OFF
))
1894 return sprintf(buf
, "%s\n", taa_strings
[taa_mitigation
]);
1896 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1897 return sprintf(buf
, "%s; SMT Host state unknown\n",
1898 taa_strings
[taa_mitigation
]);
1901 return sprintf(buf
, "%s; SMT %s\n", taa_strings
[taa_mitigation
],
1902 sched_smt_active() ? "vulnerable" : "disabled");
1905 static ssize_t
mmio_stale_data_show_state(char *buf
)
1907 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
1908 return sysfs_emit(buf
, "%s\n", mmio_strings
[mmio_mitigation
]);
1910 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1911 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
1912 mmio_strings
[mmio_mitigation
]);
1915 return sysfs_emit(buf
, "%s; SMT %s\n", mmio_strings
[mmio_mitigation
],
1916 sched_smt_active() ? "vulnerable" : "disabled");
1919 static char *stibp_state(void)
1921 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled
))
1924 switch (spectre_v2_user_stibp
) {
1925 case SPECTRE_V2_USER_NONE
:
1926 return ", STIBP: disabled";
1927 case SPECTRE_V2_USER_STRICT
:
1928 return ", STIBP: forced";
1929 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1930 return ", STIBP: always-on";
1931 case SPECTRE_V2_USER_PRCTL
:
1932 case SPECTRE_V2_USER_SECCOMP
:
1933 if (static_key_enabled(&switch_to_cond_stibp
))
1934 return ", STIBP: conditional";
1939 static char *ibpb_state(void)
1941 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
1942 if (static_key_enabled(&switch_mm_always_ibpb
))
1943 return ", IBPB: always-on";
1944 if (static_key_enabled(&switch_mm_cond_ibpb
))
1945 return ", IBPB: conditional";
1946 return ", IBPB: disabled";
1951 static ssize_t
spectre_v2_show_state(char *buf
)
1953 if (spectre_v2_enabled
== SPECTRE_V2_LFENCE
)
1954 return sprintf(buf
, "Vulnerable: LFENCE\n");
1956 if (spectre_v2_enabled
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
1957 return sprintf(buf
, "Vulnerable: eIBRS with unprivileged eBPF\n");
1959 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1960 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
1961 return sprintf(buf
, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
1963 return sprintf(buf
, "%s%s%s%s%s%s\n",
1964 spectre_v2_strings
[spectre_v2_enabled
],
1966 boot_cpu_has(X86_FEATURE_USE_IBRS_FW
) ? ", IBRS_FW" : "",
1968 boot_cpu_has(X86_FEATURE_RSB_CTXSW
) ? ", RSB filling" : "",
1969 spectre_v2_module_string());
1972 static ssize_t
srbds_show_state(char *buf
)
1974 return sprintf(buf
, "%s\n", srbds_strings
[srbds_mitigation
]);
1977 static ssize_t
cpu_show_common(struct device
*dev
, struct device_attribute
*attr
,
1978 char *buf
, unsigned int bug
)
1980 if (!boot_cpu_has_bug(bug
))
1981 return sprintf(buf
, "Not affected\n");
1984 case X86_BUG_CPU_MELTDOWN
:
1985 if (boot_cpu_has(X86_FEATURE_PTI
))
1986 return sprintf(buf
, "Mitigation: PTI\n");
1988 if (hypervisor_is_type(X86_HYPER_XEN_PV
))
1989 return sprintf(buf
, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1993 case X86_BUG_SPECTRE_V1
:
1994 return sprintf(buf
, "%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
1996 case X86_BUG_SPECTRE_V2
:
1997 return spectre_v2_show_state(buf
);
1999 case X86_BUG_SPEC_STORE_BYPASS
:
2000 return sprintf(buf
, "%s\n", ssb_strings
[ssb_mode
]);
2003 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV
))
2004 return l1tf_show_state(buf
);
2008 return mds_show_state(buf
);
2011 return tsx_async_abort_show_state(buf
);
2013 case X86_BUG_ITLB_MULTIHIT
:
2014 return itlb_multihit_show_state(buf
);
2017 return srbds_show_state(buf
);
2019 case X86_BUG_MMIO_STALE_DATA
:
2020 return mmio_stale_data_show_state(buf
);
2026 return sprintf(buf
, "Vulnerable\n");
2029 ssize_t
cpu_show_meltdown(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2031 return cpu_show_common(dev
, attr
, buf
, X86_BUG_CPU_MELTDOWN
);
2034 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2036 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V1
);
2039 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2041 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V2
);
2044 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2046 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPEC_STORE_BYPASS
);
2049 ssize_t
cpu_show_l1tf(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2051 return cpu_show_common(dev
, attr
, buf
, X86_BUG_L1TF
);
2054 ssize_t
cpu_show_mds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2056 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MDS
);
2059 ssize_t
cpu_show_tsx_async_abort(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2061 return cpu_show_common(dev
, attr
, buf
, X86_BUG_TAA
);
2064 ssize_t
cpu_show_itlb_multihit(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2066 return cpu_show_common(dev
, attr
, buf
, X86_BUG_ITLB_MULTIHIT
);
2069 ssize_t
cpu_show_srbds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2071 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRBDS
);
2074 ssize_t
cpu_show_mmio_stale_data(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2076 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MMIO_STALE_DATA
);