1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18 #include <linux/pgtable.h>
19 #include <linux/bpf.h>
21 #include <asm/spec-ctrl.h>
22 #include <asm/cmdline.h>
24 #include <asm/processor.h>
25 #include <asm/processor-flags.h>
26 #include <asm/fpu/api.h>
29 #include <asm/paravirt.h>
30 #include <asm/alternative.h>
31 #include <asm/set_memory.h>
32 #include <asm/intel-family.h>
33 #include <asm/e820/api.h>
34 #include <asm/hypervisor.h>
35 #include <asm/tlbflush.h>
39 static void __init
spectre_v1_select_mitigation(void);
40 static void __init
spectre_v2_select_mitigation(void);
41 static void __init
ssb_select_mitigation(void);
42 static void __init
l1tf_select_mitigation(void);
43 static void __init
mds_select_mitigation(void);
44 static void __init
md_clear_update_mitigation(void);
45 static void __init
md_clear_select_mitigation(void);
46 static void __init
taa_select_mitigation(void);
47 static void __init
mmio_select_mitigation(void);
48 static void __init
srbds_select_mitigation(void);
49 static void __init
l1d_flush_select_mitigation(void);
51 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
52 u64 x86_spec_ctrl_base
;
53 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base
);
54 static DEFINE_MUTEX(spec_ctrl_mutex
);
57 * The vendor and possibly platform specific bits which can be modified in
60 static u64 __ro_after_init x86_spec_ctrl_mask
= SPEC_CTRL_IBRS
;
63 * AMD specific MSR info for Speculative Store Bypass control.
64 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
66 u64 __ro_after_init x86_amd_ls_cfg_base
;
67 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask
;
69 /* Control conditional STIBP in switch_to() */
70 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp
);
71 /* Control conditional IBPB in switch_mm() */
72 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb
);
73 /* Control unconditional IBPB in switch_mm() */
74 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb
);
76 /* Control MDS CPU buffer clear before returning to user space */
77 DEFINE_STATIC_KEY_FALSE(mds_user_clear
);
78 EXPORT_SYMBOL_GPL(mds_user_clear
);
79 /* Control MDS CPU buffer clear before idling (halt, mwait) */
80 DEFINE_STATIC_KEY_FALSE(mds_idle_clear
);
81 EXPORT_SYMBOL_GPL(mds_idle_clear
);
84 * Controls whether l1d flush based mitigations are enabled,
85 * based on hw features and admin setting via boot parameter
88 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush
);
90 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
91 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear
);
92 EXPORT_SYMBOL_GPL(mmio_stale_data_clear
);
94 void __init
check_bugs(void)
99 * identify_boot_cpu() initialized SMT support information, let the
102 cpu_smt_check_topology();
104 if (!IS_ENABLED(CONFIG_SMP
)) {
106 print_cpu_info(&boot_cpu_data
);
110 * Read the SPEC_CTRL MSR to account for reserved bits which may
111 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
112 * init code as it is not enumerated and depends on the family.
114 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
115 rdmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
117 /* Allow STIBP in MSR_SPEC_CTRL if supported */
118 if (boot_cpu_has(X86_FEATURE_STIBP
))
119 x86_spec_ctrl_mask
|= SPEC_CTRL_STIBP
;
121 /* Select the proper CPU mitigations before patching alternatives: */
122 spectre_v1_select_mitigation();
123 spectre_v2_select_mitigation();
124 ssb_select_mitigation();
125 l1tf_select_mitigation();
126 md_clear_select_mitigation();
127 srbds_select_mitigation();
128 l1d_flush_select_mitigation();
134 * Check whether we are able to run this kernel safely on SMP.
136 * - i386 is no longer supported.
137 * - In order to run on anything without a TSC, we need to be
138 * compiled for a i486.
140 if (boot_cpu_data
.x86
< 4)
141 panic("Kernel requires i486+ for 'invlpg' and other features");
143 init_utsname()->machine
[1] =
144 '0' + (boot_cpu_data
.x86
> 6 ? 6 : boot_cpu_data
.x86
);
145 alternative_instructions();
147 fpu__init_check_bugs();
148 #else /* CONFIG_X86_64 */
149 alternative_instructions();
152 * Make sure the first 2MB area is not mapped by huge pages
153 * There are typically fixed size MTRRs in there and overlapping
154 * MTRRs into large pages causes slow downs.
156 * Right now we don't do that with gbpages because there seems
157 * very little benefit for that case.
160 set_memory_4k((unsigned long)__va(0), 1);
165 x86_virt_spec_ctrl(u64 guest_spec_ctrl
, u64 guest_virt_spec_ctrl
, bool setguest
)
167 u64 msrval
, guestval
, hostval
= x86_spec_ctrl_base
;
168 struct thread_info
*ti
= current_thread_info();
170 /* Is MSR_SPEC_CTRL implemented ? */
171 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
)) {
173 * Restrict guest_spec_ctrl to supported values. Clear the
174 * modifiable bits in the host base value and or the
175 * modifiable bits from the guest value.
177 guestval
= hostval
& ~x86_spec_ctrl_mask
;
178 guestval
|= guest_spec_ctrl
& x86_spec_ctrl_mask
;
180 /* SSBD controlled in MSR_SPEC_CTRL */
181 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
182 static_cpu_has(X86_FEATURE_AMD_SSBD
))
183 hostval
|= ssbd_tif_to_spec_ctrl(ti
->flags
);
185 /* Conditional STIBP enabled? */
186 if (static_branch_unlikely(&switch_to_cond_stibp
))
187 hostval
|= stibp_tif_to_spec_ctrl(ti
->flags
);
189 if (hostval
!= guestval
) {
190 msrval
= setguest
? guestval
: hostval
;
191 wrmsrl(MSR_IA32_SPEC_CTRL
, msrval
);
196 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
197 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
199 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD
) &&
200 !static_cpu_has(X86_FEATURE_VIRT_SSBD
))
204 * If the host has SSBD mitigation enabled, force it in the host's
205 * virtual MSR value. If its not permanently enabled, evaluate
206 * current's TIF_SSBD thread flag.
208 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
))
209 hostval
= SPEC_CTRL_SSBD
;
211 hostval
= ssbd_tif_to_spec_ctrl(ti
->flags
);
213 /* Sanitize the guest value */
214 guestval
= guest_virt_spec_ctrl
& SPEC_CTRL_SSBD
;
216 if (hostval
!= guestval
) {
219 tif
= setguest
? ssbd_spec_ctrl_to_tif(guestval
) :
220 ssbd_spec_ctrl_to_tif(hostval
);
222 speculation_ctrl_update(tif
);
225 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl
);
227 static void x86_amd_ssb_disable(void)
229 u64 msrval
= x86_amd_ls_cfg_base
| x86_amd_ls_cfg_ssbd_mask
;
231 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD
))
232 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, SPEC_CTRL_SSBD
);
233 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
))
234 wrmsrl(MSR_AMD64_LS_CFG
, msrval
);
238 #define pr_fmt(fmt) "MDS: " fmt
240 /* Default mitigation for MDS-affected CPUs */
241 static enum mds_mitigations mds_mitigation __ro_after_init
= MDS_MITIGATION_FULL
;
242 static bool mds_nosmt __ro_after_init
= false;
244 static const char * const mds_strings
[] = {
245 [MDS_MITIGATION_OFF
] = "Vulnerable",
246 [MDS_MITIGATION_FULL
] = "Mitigation: Clear CPU buffers",
247 [MDS_MITIGATION_VMWERV
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
250 static void __init
mds_select_mitigation(void)
252 if (!boot_cpu_has_bug(X86_BUG_MDS
) || cpu_mitigations_off()) {
253 mds_mitigation
= MDS_MITIGATION_OFF
;
257 if (mds_mitigation
== MDS_MITIGATION_FULL
) {
258 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR
))
259 mds_mitigation
= MDS_MITIGATION_VMWERV
;
261 static_branch_enable(&mds_user_clear
);
263 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY
) &&
264 (mds_nosmt
|| cpu_mitigations_auto_nosmt()))
265 cpu_smt_disable(false);
269 static int __init
mds_cmdline(char *str
)
271 if (!boot_cpu_has_bug(X86_BUG_MDS
))
277 if (!strcmp(str
, "off"))
278 mds_mitigation
= MDS_MITIGATION_OFF
;
279 else if (!strcmp(str
, "full"))
280 mds_mitigation
= MDS_MITIGATION_FULL
;
281 else if (!strcmp(str
, "full,nosmt")) {
282 mds_mitigation
= MDS_MITIGATION_FULL
;
288 early_param("mds", mds_cmdline
);
291 #define pr_fmt(fmt) "TAA: " fmt
293 enum taa_mitigations
{
295 TAA_MITIGATION_UCODE_NEEDED
,
297 TAA_MITIGATION_TSX_DISABLED
,
300 /* Default mitigation for TAA-affected CPUs */
301 static enum taa_mitigations taa_mitigation __ro_after_init
= TAA_MITIGATION_VERW
;
302 static bool taa_nosmt __ro_after_init
;
304 static const char * const taa_strings
[] = {
305 [TAA_MITIGATION_OFF
] = "Vulnerable",
306 [TAA_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
307 [TAA_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
308 [TAA_MITIGATION_TSX_DISABLED
] = "Mitigation: TSX disabled",
311 static void __init
taa_select_mitigation(void)
315 if (!boot_cpu_has_bug(X86_BUG_TAA
)) {
316 taa_mitigation
= TAA_MITIGATION_OFF
;
320 /* TSX previously disabled by tsx=off */
321 if (!boot_cpu_has(X86_FEATURE_RTM
)) {
322 taa_mitigation
= TAA_MITIGATION_TSX_DISABLED
;
326 if (cpu_mitigations_off()) {
327 taa_mitigation
= TAA_MITIGATION_OFF
;
332 * TAA mitigation via VERW is turned off if both
333 * tsx_async_abort=off and mds=off are specified.
335 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
336 mds_mitigation
== MDS_MITIGATION_OFF
)
339 if (boot_cpu_has(X86_FEATURE_MD_CLEAR
))
340 taa_mitigation
= TAA_MITIGATION_VERW
;
342 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
345 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
346 * A microcode update fixes this behavior to clear CPU buffers. It also
347 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
348 * ARCH_CAP_TSX_CTRL_MSR bit.
350 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
351 * update is required.
353 ia32_cap
= x86_read_arch_cap_msr();
354 if ( (ia32_cap
& ARCH_CAP_MDS_NO
) &&
355 !(ia32_cap
& ARCH_CAP_TSX_CTRL_MSR
))
356 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
359 * TSX is enabled, select alternate mitigation for TAA which is
360 * the same as MDS. Enable MDS static branch to clear CPU buffers.
362 * For guests that can't determine whether the correct microcode is
363 * present on host, enable the mitigation for UCODE_NEEDED as well.
365 static_branch_enable(&mds_user_clear
);
367 if (taa_nosmt
|| cpu_mitigations_auto_nosmt())
368 cpu_smt_disable(false);
371 static int __init
tsx_async_abort_parse_cmdline(char *str
)
373 if (!boot_cpu_has_bug(X86_BUG_TAA
))
379 if (!strcmp(str
, "off")) {
380 taa_mitigation
= TAA_MITIGATION_OFF
;
381 } else if (!strcmp(str
, "full")) {
382 taa_mitigation
= TAA_MITIGATION_VERW
;
383 } else if (!strcmp(str
, "full,nosmt")) {
384 taa_mitigation
= TAA_MITIGATION_VERW
;
390 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline
);
393 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
395 enum mmio_mitigations
{
397 MMIO_MITIGATION_UCODE_NEEDED
,
398 MMIO_MITIGATION_VERW
,
401 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
402 static enum mmio_mitigations mmio_mitigation __ro_after_init
= MMIO_MITIGATION_VERW
;
403 static bool mmio_nosmt __ro_after_init
= false;
405 static const char * const mmio_strings
[] = {
406 [MMIO_MITIGATION_OFF
] = "Vulnerable",
407 [MMIO_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
408 [MMIO_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
411 static void __init
mmio_select_mitigation(void)
415 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
) ||
416 cpu_mitigations_off()) {
417 mmio_mitigation
= MMIO_MITIGATION_OFF
;
421 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
424 ia32_cap
= x86_read_arch_cap_msr();
427 * Enable CPU buffer clear mitigation for host and VMM, if also affected
428 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
430 if (boot_cpu_has_bug(X86_BUG_MDS
) || (boot_cpu_has_bug(X86_BUG_TAA
) &&
431 boot_cpu_has(X86_FEATURE_RTM
)))
432 static_branch_enable(&mds_user_clear
);
434 static_branch_enable(&mmio_stale_data_clear
);
437 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
438 * be propagated to uncore buffers, clearing the Fill buffers on idle
439 * is required irrespective of SMT state.
441 if (!(ia32_cap
& ARCH_CAP_FBSDP_NO
))
442 static_branch_enable(&mds_idle_clear
);
445 * Check if the system has the right microcode.
447 * CPU Fill buffer clear mitigation is enumerated by either an explicit
448 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
451 if ((ia32_cap
& ARCH_CAP_FB_CLEAR
) ||
452 (boot_cpu_has(X86_FEATURE_MD_CLEAR
) &&
453 boot_cpu_has(X86_FEATURE_FLUSH_L1D
) &&
454 !(ia32_cap
& ARCH_CAP_MDS_NO
)))
455 mmio_mitigation
= MMIO_MITIGATION_VERW
;
457 mmio_mitigation
= MMIO_MITIGATION_UCODE_NEEDED
;
459 if (mmio_nosmt
|| cpu_mitigations_auto_nosmt())
460 cpu_smt_disable(false);
463 static int __init
mmio_stale_data_parse_cmdline(char *str
)
465 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
471 if (!strcmp(str
, "off")) {
472 mmio_mitigation
= MMIO_MITIGATION_OFF
;
473 } else if (!strcmp(str
, "full")) {
474 mmio_mitigation
= MMIO_MITIGATION_VERW
;
475 } else if (!strcmp(str
, "full,nosmt")) {
476 mmio_mitigation
= MMIO_MITIGATION_VERW
;
482 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline
);
485 #define pr_fmt(fmt) "" fmt
487 static void __init
md_clear_update_mitigation(void)
489 if (cpu_mitigations_off())
492 if (!static_key_enabled(&mds_user_clear
))
496 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
497 * mitigation, if necessary.
499 if (mds_mitigation
== MDS_MITIGATION_OFF
&&
500 boot_cpu_has_bug(X86_BUG_MDS
)) {
501 mds_mitigation
= MDS_MITIGATION_FULL
;
502 mds_select_mitigation();
504 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
505 boot_cpu_has_bug(X86_BUG_TAA
)) {
506 taa_mitigation
= TAA_MITIGATION_VERW
;
507 taa_select_mitigation();
509 if (mmio_mitigation
== MMIO_MITIGATION_OFF
&&
510 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
)) {
511 mmio_mitigation
= MMIO_MITIGATION_VERW
;
512 mmio_select_mitigation();
515 if (boot_cpu_has_bug(X86_BUG_MDS
))
516 pr_info("MDS: %s\n", mds_strings
[mds_mitigation
]);
517 if (boot_cpu_has_bug(X86_BUG_TAA
))
518 pr_info("TAA: %s\n", taa_strings
[taa_mitigation
]);
519 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
520 pr_info("MMIO Stale Data: %s\n", mmio_strings
[mmio_mitigation
]);
523 static void __init
md_clear_select_mitigation(void)
525 mds_select_mitigation();
526 taa_select_mitigation();
527 mmio_select_mitigation();
530 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
531 * and print their mitigation after MDS, TAA and MMIO Stale Data
532 * mitigation selection is done.
534 md_clear_update_mitigation();
538 #define pr_fmt(fmt) "SRBDS: " fmt
540 enum srbds_mitigations
{
541 SRBDS_MITIGATION_OFF
,
542 SRBDS_MITIGATION_UCODE_NEEDED
,
543 SRBDS_MITIGATION_FULL
,
544 SRBDS_MITIGATION_TSX_OFF
,
545 SRBDS_MITIGATION_HYPERVISOR
,
548 static enum srbds_mitigations srbds_mitigation __ro_after_init
= SRBDS_MITIGATION_FULL
;
550 static const char * const srbds_strings
[] = {
551 [SRBDS_MITIGATION_OFF
] = "Vulnerable",
552 [SRBDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
553 [SRBDS_MITIGATION_FULL
] = "Mitigation: Microcode",
554 [SRBDS_MITIGATION_TSX_OFF
] = "Mitigation: TSX disabled",
555 [SRBDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
558 static bool srbds_off
;
560 void update_srbds_msr(void)
564 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
567 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
570 if (srbds_mitigation
== SRBDS_MITIGATION_UCODE_NEEDED
)
573 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
575 switch (srbds_mitigation
) {
576 case SRBDS_MITIGATION_OFF
:
577 case SRBDS_MITIGATION_TSX_OFF
:
578 mcu_ctrl
|= RNGDS_MITG_DIS
;
580 case SRBDS_MITIGATION_FULL
:
581 mcu_ctrl
&= ~RNGDS_MITG_DIS
;
587 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
590 static void __init
srbds_select_mitigation(void)
594 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
598 * Check to see if this is one of the MDS_NO systems supporting TSX that
599 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
600 * by Processor MMIO Stale Data vulnerability.
602 ia32_cap
= x86_read_arch_cap_msr();
603 if ((ia32_cap
& ARCH_CAP_MDS_NO
) && !boot_cpu_has(X86_FEATURE_RTM
) &&
604 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
605 srbds_mitigation
= SRBDS_MITIGATION_TSX_OFF
;
606 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
607 srbds_mitigation
= SRBDS_MITIGATION_HYPERVISOR
;
608 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
609 srbds_mitigation
= SRBDS_MITIGATION_UCODE_NEEDED
;
610 else if (cpu_mitigations_off() || srbds_off
)
611 srbds_mitigation
= SRBDS_MITIGATION_OFF
;
614 pr_info("%s\n", srbds_strings
[srbds_mitigation
]);
617 static int __init
srbds_parse_cmdline(char *str
)
622 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
625 srbds_off
= !strcmp(str
, "off");
628 early_param("srbds", srbds_parse_cmdline
);
631 #define pr_fmt(fmt) "L1D Flush : " fmt
633 enum l1d_flush_mitigations
{
638 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata
= L1D_FLUSH_OFF
;
640 static void __init
l1d_flush_select_mitigation(void)
642 if (!l1d_flush_mitigation
|| !boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
645 static_branch_enable(&switch_mm_cond_l1d_flush
);
646 pr_info("Conditional flush on switch_mm() enabled\n");
649 static int __init
l1d_flush_parse_cmdline(char *str
)
651 if (!strcmp(str
, "on"))
652 l1d_flush_mitigation
= L1D_FLUSH_ON
;
656 early_param("l1d_flush", l1d_flush_parse_cmdline
);
659 #define pr_fmt(fmt) "Spectre V1 : " fmt
661 enum spectre_v1_mitigation
{
662 SPECTRE_V1_MITIGATION_NONE
,
663 SPECTRE_V1_MITIGATION_AUTO
,
666 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init
=
667 SPECTRE_V1_MITIGATION_AUTO
;
669 static const char * const spectre_v1_strings
[] = {
670 [SPECTRE_V1_MITIGATION_NONE
] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
671 [SPECTRE_V1_MITIGATION_AUTO
] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
675 * Does SMAP provide full mitigation against speculative kernel access to
678 static bool smap_works_speculatively(void)
680 if (!boot_cpu_has(X86_FEATURE_SMAP
))
684 * On CPUs which are vulnerable to Meltdown, SMAP does not
685 * prevent speculative access to user data in the L1 cache.
686 * Consider SMAP to be non-functional as a mitigation on these
689 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN
))
695 static void __init
spectre_v1_select_mitigation(void)
697 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1
) || cpu_mitigations_off()) {
698 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
702 if (spectre_v1_mitigation
== SPECTRE_V1_MITIGATION_AUTO
) {
704 * With Spectre v1, a user can speculatively control either
705 * path of a conditional swapgs with a user-controlled GS
706 * value. The mitigation is to add lfences to both code paths.
708 * If FSGSBASE is enabled, the user can put a kernel address in
709 * GS, in which case SMAP provides no protection.
711 * If FSGSBASE is disabled, the user can only put a user space
712 * address in GS. That makes an attack harder, but still
713 * possible if there's no SMAP protection.
715 if (boot_cpu_has(X86_FEATURE_FSGSBASE
) ||
716 !smap_works_speculatively()) {
718 * Mitigation can be provided from SWAPGS itself or
719 * PTI as the CR3 write in the Meltdown mitigation
722 * If neither is there, mitigate with an LFENCE to
723 * stop speculation through swapgs.
725 if (boot_cpu_has_bug(X86_BUG_SWAPGS
) &&
726 !boot_cpu_has(X86_FEATURE_PTI
))
727 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER
);
730 * Enable lfences in the kernel entry (non-swapgs)
731 * paths, to prevent user entry from speculatively
734 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL
);
738 pr_info("%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
741 static int __init
nospectre_v1_cmdline(char *str
)
743 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
746 early_param("nospectre_v1", nospectre_v1_cmdline
);
749 #define pr_fmt(fmt) "Spectre V2 : " fmt
751 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init
=
754 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init
=
755 SPECTRE_V2_USER_NONE
;
756 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init
=
757 SPECTRE_V2_USER_NONE
;
759 #ifdef CONFIG_RETPOLINE
760 static bool spectre_v2_bad_module
;
762 bool retpoline_module_ok(bool has_retpoline
)
764 if (spectre_v2_enabled
== SPECTRE_V2_NONE
|| has_retpoline
)
767 pr_err("System may be vulnerable to spectre v2\n");
768 spectre_v2_bad_module
= true;
772 static inline const char *spectre_v2_module_string(void)
774 return spectre_v2_bad_module
? " - vulnerable module loaded" : "";
777 static inline const char *spectre_v2_module_string(void) { return ""; }
780 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
781 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
782 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
784 #ifdef CONFIG_BPF_SYSCALL
785 void unpriv_ebpf_notify(int new_state
)
790 /* Unprivileged eBPF is enabled */
792 switch (spectre_v2_enabled
) {
793 case SPECTRE_V2_EIBRS
:
794 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
796 case SPECTRE_V2_EIBRS_LFENCE
:
797 if (sched_smt_active())
798 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
806 static inline bool match_option(const char *arg
, int arglen
, const char *opt
)
808 int len
= strlen(opt
);
810 return len
== arglen
&& !strncmp(arg
, opt
, len
);
813 /* The kernel command line selection for spectre v2 */
814 enum spectre_v2_mitigation_cmd
{
817 SPECTRE_V2_CMD_FORCE
,
818 SPECTRE_V2_CMD_RETPOLINE
,
819 SPECTRE_V2_CMD_RETPOLINE_GENERIC
,
820 SPECTRE_V2_CMD_RETPOLINE_LFENCE
,
821 SPECTRE_V2_CMD_EIBRS
,
822 SPECTRE_V2_CMD_EIBRS_RETPOLINE
,
823 SPECTRE_V2_CMD_EIBRS_LFENCE
,
826 enum spectre_v2_user_cmd
{
827 SPECTRE_V2_USER_CMD_NONE
,
828 SPECTRE_V2_USER_CMD_AUTO
,
829 SPECTRE_V2_USER_CMD_FORCE
,
830 SPECTRE_V2_USER_CMD_PRCTL
,
831 SPECTRE_V2_USER_CMD_PRCTL_IBPB
,
832 SPECTRE_V2_USER_CMD_SECCOMP
,
833 SPECTRE_V2_USER_CMD_SECCOMP_IBPB
,
836 static const char * const spectre_v2_user_strings
[] = {
837 [SPECTRE_V2_USER_NONE
] = "User space: Vulnerable",
838 [SPECTRE_V2_USER_STRICT
] = "User space: Mitigation: STIBP protection",
839 [SPECTRE_V2_USER_STRICT_PREFERRED
] = "User space: Mitigation: STIBP always-on protection",
840 [SPECTRE_V2_USER_PRCTL
] = "User space: Mitigation: STIBP via prctl",
841 [SPECTRE_V2_USER_SECCOMP
] = "User space: Mitigation: STIBP via seccomp and prctl",
844 static const struct {
846 enum spectre_v2_user_cmd cmd
;
848 } v2_user_options
[] __initconst
= {
849 { "auto", SPECTRE_V2_USER_CMD_AUTO
, false },
850 { "off", SPECTRE_V2_USER_CMD_NONE
, false },
851 { "on", SPECTRE_V2_USER_CMD_FORCE
, true },
852 { "prctl", SPECTRE_V2_USER_CMD_PRCTL
, false },
853 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB
, false },
854 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP
, false },
855 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB
, false },
858 static void __init
spec_v2_user_print_cond(const char *reason
, bool secure
)
860 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
861 pr_info("spectre_v2_user=%s forced on command line.\n", reason
);
864 static enum spectre_v2_user_cmd __init
865 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd
)
871 case SPECTRE_V2_CMD_NONE
:
872 return SPECTRE_V2_USER_CMD_NONE
;
873 case SPECTRE_V2_CMD_FORCE
:
874 return SPECTRE_V2_USER_CMD_FORCE
;
879 ret
= cmdline_find_option(boot_command_line
, "spectre_v2_user",
882 return SPECTRE_V2_USER_CMD_AUTO
;
884 for (i
= 0; i
< ARRAY_SIZE(v2_user_options
); i
++) {
885 if (match_option(arg
, ret
, v2_user_options
[i
].option
)) {
886 spec_v2_user_print_cond(v2_user_options
[i
].option
,
887 v2_user_options
[i
].secure
);
888 return v2_user_options
[i
].cmd
;
892 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg
);
893 return SPECTRE_V2_USER_CMD_AUTO
;
896 static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode
)
898 return (mode
== SPECTRE_V2_EIBRS
||
899 mode
== SPECTRE_V2_EIBRS_RETPOLINE
||
900 mode
== SPECTRE_V2_EIBRS_LFENCE
);
904 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd
)
906 enum spectre_v2_user_mitigation mode
= SPECTRE_V2_USER_NONE
;
907 bool smt_possible
= IS_ENABLED(CONFIG_SMP
);
908 enum spectre_v2_user_cmd cmd
;
910 if (!boot_cpu_has(X86_FEATURE_IBPB
) && !boot_cpu_has(X86_FEATURE_STIBP
))
913 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
914 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
915 smt_possible
= false;
917 cmd
= spectre_v2_parse_user_cmdline(v2_cmd
);
919 case SPECTRE_V2_USER_CMD_NONE
:
921 case SPECTRE_V2_USER_CMD_FORCE
:
922 mode
= SPECTRE_V2_USER_STRICT
;
924 case SPECTRE_V2_USER_CMD_PRCTL
:
925 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
926 mode
= SPECTRE_V2_USER_PRCTL
;
928 case SPECTRE_V2_USER_CMD_AUTO
:
929 case SPECTRE_V2_USER_CMD_SECCOMP
:
930 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
931 if (IS_ENABLED(CONFIG_SECCOMP
))
932 mode
= SPECTRE_V2_USER_SECCOMP
;
934 mode
= SPECTRE_V2_USER_PRCTL
;
938 /* Initialize Indirect Branch Prediction Barrier */
939 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
940 setup_force_cpu_cap(X86_FEATURE_USE_IBPB
);
942 spectre_v2_user_ibpb
= mode
;
944 case SPECTRE_V2_USER_CMD_FORCE
:
945 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
946 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
947 static_branch_enable(&switch_mm_always_ibpb
);
948 spectre_v2_user_ibpb
= SPECTRE_V2_USER_STRICT
;
950 case SPECTRE_V2_USER_CMD_PRCTL
:
951 case SPECTRE_V2_USER_CMD_AUTO
:
952 case SPECTRE_V2_USER_CMD_SECCOMP
:
953 static_branch_enable(&switch_mm_cond_ibpb
);
959 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
960 static_key_enabled(&switch_mm_always_ibpb
) ?
961 "always-on" : "conditional");
965 * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
968 if (!boot_cpu_has(X86_FEATURE_STIBP
) ||
970 spectre_v2_in_eibrs_mode(spectre_v2_enabled
))
974 * At this point, an STIBP mode other than "off" has been set.
975 * If STIBP support is not being forced, check if STIBP always-on
978 if (mode
!= SPECTRE_V2_USER_STRICT
&&
979 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON
))
980 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
982 spectre_v2_user_stibp
= mode
;
985 pr_info("%s\n", spectre_v2_user_strings
[mode
]);
988 static const char * const spectre_v2_strings
[] = {
989 [SPECTRE_V2_NONE
] = "Vulnerable",
990 [SPECTRE_V2_RETPOLINE
] = "Mitigation: Retpolines",
991 [SPECTRE_V2_LFENCE
] = "Mitigation: LFENCE",
992 [SPECTRE_V2_EIBRS
] = "Mitigation: Enhanced IBRS",
993 [SPECTRE_V2_EIBRS_LFENCE
] = "Mitigation: Enhanced IBRS + LFENCE",
994 [SPECTRE_V2_EIBRS_RETPOLINE
] = "Mitigation: Enhanced IBRS + Retpolines",
997 static const struct {
999 enum spectre_v2_mitigation_cmd cmd
;
1001 } mitigation_options
[] __initconst
= {
1002 { "off", SPECTRE_V2_CMD_NONE
, false },
1003 { "on", SPECTRE_V2_CMD_FORCE
, true },
1004 { "retpoline", SPECTRE_V2_CMD_RETPOLINE
, false },
1005 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1006 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1007 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC
, false },
1008 { "eibrs", SPECTRE_V2_CMD_EIBRS
, false },
1009 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE
, false },
1010 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE
, false },
1011 { "auto", SPECTRE_V2_CMD_AUTO
, false },
1014 static void __init
spec_v2_print_cond(const char *reason
, bool secure
)
1016 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
1017 pr_info("%s selected on command line.\n", reason
);
1020 static enum spectre_v2_mitigation_cmd __init
spectre_v2_parse_cmdline(void)
1022 enum spectre_v2_mitigation_cmd cmd
= SPECTRE_V2_CMD_AUTO
;
1026 if (cmdline_find_option_bool(boot_command_line
, "nospectre_v2") ||
1027 cpu_mitigations_off())
1028 return SPECTRE_V2_CMD_NONE
;
1030 ret
= cmdline_find_option(boot_command_line
, "spectre_v2", arg
, sizeof(arg
));
1032 return SPECTRE_V2_CMD_AUTO
;
1034 for (i
= 0; i
< ARRAY_SIZE(mitigation_options
); i
++) {
1035 if (!match_option(arg
, ret
, mitigation_options
[i
].option
))
1037 cmd
= mitigation_options
[i
].cmd
;
1041 if (i
>= ARRAY_SIZE(mitigation_options
)) {
1042 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1043 return SPECTRE_V2_CMD_AUTO
;
1046 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE
||
1047 cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1048 cmd
== SPECTRE_V2_CMD_RETPOLINE_GENERIC
||
1049 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1050 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1051 !IS_ENABLED(CONFIG_RETPOLINE
)) {
1052 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1053 mitigation_options
[i
].option
);
1054 return SPECTRE_V2_CMD_AUTO
;
1057 if ((cmd
== SPECTRE_V2_CMD_EIBRS
||
1058 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1059 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1060 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1061 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
1062 mitigation_options
[i
].option
);
1063 return SPECTRE_V2_CMD_AUTO
;
1066 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1067 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
) &&
1068 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC
)) {
1069 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1070 mitigation_options
[i
].option
);
1071 return SPECTRE_V2_CMD_AUTO
;
1074 spec_v2_print_cond(mitigation_options
[i
].option
,
1075 mitigation_options
[i
].secure
);
1079 static enum spectre_v2_mitigation __init
spectre_v2_select_retpoline(void)
1081 if (!IS_ENABLED(CONFIG_RETPOLINE
)) {
1082 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1083 return SPECTRE_V2_NONE
;
1086 return SPECTRE_V2_RETPOLINE
;
1089 static void __init
spectre_v2_select_mitigation(void)
1091 enum spectre_v2_mitigation_cmd cmd
= spectre_v2_parse_cmdline();
1092 enum spectre_v2_mitigation mode
= SPECTRE_V2_NONE
;
1095 * If the CPU is not affected and the command line mode is NONE or AUTO
1096 * then nothing to do.
1098 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) &&
1099 (cmd
== SPECTRE_V2_CMD_NONE
|| cmd
== SPECTRE_V2_CMD_AUTO
))
1103 case SPECTRE_V2_CMD_NONE
:
1106 case SPECTRE_V2_CMD_FORCE
:
1107 case SPECTRE_V2_CMD_AUTO
:
1108 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1109 mode
= SPECTRE_V2_EIBRS
;
1113 mode
= spectre_v2_select_retpoline();
1116 case SPECTRE_V2_CMD_RETPOLINE_LFENCE
:
1117 pr_err(SPECTRE_V2_LFENCE_MSG
);
1118 mode
= SPECTRE_V2_LFENCE
;
1121 case SPECTRE_V2_CMD_RETPOLINE_GENERIC
:
1122 mode
= SPECTRE_V2_RETPOLINE
;
1125 case SPECTRE_V2_CMD_RETPOLINE
:
1126 mode
= spectre_v2_select_retpoline();
1129 case SPECTRE_V2_CMD_EIBRS
:
1130 mode
= SPECTRE_V2_EIBRS
;
1133 case SPECTRE_V2_CMD_EIBRS_LFENCE
:
1134 mode
= SPECTRE_V2_EIBRS_LFENCE
;
1137 case SPECTRE_V2_CMD_EIBRS_RETPOLINE
:
1138 mode
= SPECTRE_V2_EIBRS_RETPOLINE
;
1142 if (mode
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
1143 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
1145 if (spectre_v2_in_eibrs_mode(mode
)) {
1146 /* Force it so VMEXIT will restore correctly */
1147 x86_spec_ctrl_base
|= SPEC_CTRL_IBRS
;
1148 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1152 case SPECTRE_V2_NONE
:
1153 case SPECTRE_V2_EIBRS
:
1156 case SPECTRE_V2_LFENCE
:
1157 case SPECTRE_V2_EIBRS_LFENCE
:
1158 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE
);
1161 case SPECTRE_V2_RETPOLINE
:
1162 case SPECTRE_V2_EIBRS_RETPOLINE
:
1163 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
1167 spectre_v2_enabled
= mode
;
1168 pr_info("%s\n", spectre_v2_strings
[mode
]);
1171 * If spectre v2 protection has been enabled, unconditionally fill
1172 * RSB during a context switch; this protects against two independent
1175 * - RSB underflow (and switch to BTB) on Skylake+
1176 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
1178 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW
);
1179 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1182 * Retpoline means the kernel is safe because it has no indirect
1183 * branches. Enhanced IBRS protects firmware too, so, enable restricted
1184 * speculation around firmware calls only when Enhanced IBRS isn't
1187 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1188 * the user might select retpoline on the kernel command line and if
1189 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1190 * enable IBRS around firmware calls.
1192 if (boot_cpu_has(X86_FEATURE_IBRS
) && !spectre_v2_in_eibrs_mode(mode
)) {
1193 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW
);
1194 pr_info("Enabling Restricted Speculation for firmware calls\n");
1197 /* Set up IBPB and STIBP depending on the general spectre V2 command */
1198 spectre_v2_user_select_mitigation(cmd
);
1201 static void update_stibp_msr(void * __unused
)
1203 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1206 /* Update x86_spec_ctrl_base in case SMT state changed. */
1207 static void update_stibp_strict(void)
1209 u64 mask
= x86_spec_ctrl_base
& ~SPEC_CTRL_STIBP
;
1211 if (sched_smt_active())
1212 mask
|= SPEC_CTRL_STIBP
;
1214 if (mask
== x86_spec_ctrl_base
)
1217 pr_info("Update user space SMT mitigation: STIBP %s\n",
1218 mask
& SPEC_CTRL_STIBP
? "always-on" : "off");
1219 x86_spec_ctrl_base
= mask
;
1220 on_each_cpu(update_stibp_msr
, NULL
, 1);
1223 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1224 static void update_indir_branch_cond(void)
1226 if (sched_smt_active())
1227 static_branch_enable(&switch_to_cond_stibp
);
1229 static_branch_disable(&switch_to_cond_stibp
);
1233 #define pr_fmt(fmt) fmt
1235 /* Update the static key controlling the MDS CPU buffer clear in idle */
1236 static void update_mds_branch_idle(void)
1238 u64 ia32_cap
= x86_read_arch_cap_msr();
1241 * Enable the idle clearing if SMT is active on CPUs which are
1242 * affected only by MSBDS and not any other MDS variant.
1244 * The other variants cannot be mitigated when SMT is enabled, so
1245 * clearing the buffers on idle just to prevent the Store Buffer
1246 * repartitioning leak would be a window dressing exercise.
1248 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY
))
1251 if (sched_smt_active()) {
1252 static_branch_enable(&mds_idle_clear
);
1253 } else if (mmio_mitigation
== MMIO_MITIGATION_OFF
||
1254 (ia32_cap
& ARCH_CAP_FBSDP_NO
)) {
1255 static_branch_disable(&mds_idle_clear
);
1259 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1260 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1262 void cpu_bugs_smt_update(void)
1264 mutex_lock(&spec_ctrl_mutex
);
1266 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1267 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
1268 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
1270 switch (spectre_v2_user_stibp
) {
1271 case SPECTRE_V2_USER_NONE
:
1273 case SPECTRE_V2_USER_STRICT
:
1274 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1275 update_stibp_strict();
1277 case SPECTRE_V2_USER_PRCTL
:
1278 case SPECTRE_V2_USER_SECCOMP
:
1279 update_indir_branch_cond();
1283 switch (mds_mitigation
) {
1284 case MDS_MITIGATION_FULL
:
1285 case MDS_MITIGATION_VMWERV
:
1286 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY
))
1287 pr_warn_once(MDS_MSG_SMT
);
1288 update_mds_branch_idle();
1290 case MDS_MITIGATION_OFF
:
1294 switch (taa_mitigation
) {
1295 case TAA_MITIGATION_VERW
:
1296 case TAA_MITIGATION_UCODE_NEEDED
:
1297 if (sched_smt_active())
1298 pr_warn_once(TAA_MSG_SMT
);
1300 case TAA_MITIGATION_TSX_DISABLED
:
1301 case TAA_MITIGATION_OFF
:
1305 mutex_unlock(&spec_ctrl_mutex
);
1309 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1311 static enum ssb_mitigation ssb_mode __ro_after_init
= SPEC_STORE_BYPASS_NONE
;
1313 /* The kernel command line selection */
1314 enum ssb_mitigation_cmd
{
1315 SPEC_STORE_BYPASS_CMD_NONE
,
1316 SPEC_STORE_BYPASS_CMD_AUTO
,
1317 SPEC_STORE_BYPASS_CMD_ON
,
1318 SPEC_STORE_BYPASS_CMD_PRCTL
,
1319 SPEC_STORE_BYPASS_CMD_SECCOMP
,
1322 static const char * const ssb_strings
[] = {
1323 [SPEC_STORE_BYPASS_NONE
] = "Vulnerable",
1324 [SPEC_STORE_BYPASS_DISABLE
] = "Mitigation: Speculative Store Bypass disabled",
1325 [SPEC_STORE_BYPASS_PRCTL
] = "Mitigation: Speculative Store Bypass disabled via prctl",
1326 [SPEC_STORE_BYPASS_SECCOMP
] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1329 static const struct {
1331 enum ssb_mitigation_cmd cmd
;
1332 } ssb_mitigation_options
[] __initconst
= {
1333 { "auto", SPEC_STORE_BYPASS_CMD_AUTO
}, /* Platform decides */
1334 { "on", SPEC_STORE_BYPASS_CMD_ON
}, /* Disable Speculative Store Bypass */
1335 { "off", SPEC_STORE_BYPASS_CMD_NONE
}, /* Don't touch Speculative Store Bypass */
1336 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL
}, /* Disable Speculative Store Bypass via prctl */
1337 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP
}, /* Disable Speculative Store Bypass via prctl and seccomp */
1340 static enum ssb_mitigation_cmd __init
ssb_parse_cmdline(void)
1342 enum ssb_mitigation_cmd cmd
= SPEC_STORE_BYPASS_CMD_AUTO
;
1346 if (cmdline_find_option_bool(boot_command_line
, "nospec_store_bypass_disable") ||
1347 cpu_mitigations_off()) {
1348 return SPEC_STORE_BYPASS_CMD_NONE
;
1350 ret
= cmdline_find_option(boot_command_line
, "spec_store_bypass_disable",
1353 return SPEC_STORE_BYPASS_CMD_AUTO
;
1355 for (i
= 0; i
< ARRAY_SIZE(ssb_mitigation_options
); i
++) {
1356 if (!match_option(arg
, ret
, ssb_mitigation_options
[i
].option
))
1359 cmd
= ssb_mitigation_options
[i
].cmd
;
1363 if (i
>= ARRAY_SIZE(ssb_mitigation_options
)) {
1364 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1365 return SPEC_STORE_BYPASS_CMD_AUTO
;
1372 static enum ssb_mitigation __init
__ssb_select_mitigation(void)
1374 enum ssb_mitigation mode
= SPEC_STORE_BYPASS_NONE
;
1375 enum ssb_mitigation_cmd cmd
;
1377 if (!boot_cpu_has(X86_FEATURE_SSBD
))
1380 cmd
= ssb_parse_cmdline();
1381 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
) &&
1382 (cmd
== SPEC_STORE_BYPASS_CMD_NONE
||
1383 cmd
== SPEC_STORE_BYPASS_CMD_AUTO
))
1387 case SPEC_STORE_BYPASS_CMD_AUTO
:
1388 case SPEC_STORE_BYPASS_CMD_SECCOMP
:
1390 * Choose prctl+seccomp as the default mode if seccomp is
1393 if (IS_ENABLED(CONFIG_SECCOMP
))
1394 mode
= SPEC_STORE_BYPASS_SECCOMP
;
1396 mode
= SPEC_STORE_BYPASS_PRCTL
;
1398 case SPEC_STORE_BYPASS_CMD_ON
:
1399 mode
= SPEC_STORE_BYPASS_DISABLE
;
1401 case SPEC_STORE_BYPASS_CMD_PRCTL
:
1402 mode
= SPEC_STORE_BYPASS_PRCTL
;
1404 case SPEC_STORE_BYPASS_CMD_NONE
:
1409 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1410 * bit in the mask to allow guests to use the mitigation even in the
1411 * case where the host does not enable it.
1413 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
1414 static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1415 x86_spec_ctrl_mask
|= SPEC_CTRL_SSBD
;
1419 * We have three CPU feature flags that are in play here:
1420 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1421 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1422 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1424 if (mode
== SPEC_STORE_BYPASS_DISABLE
) {
1425 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
);
1427 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1428 * use a completely different MSR and bit dependent on family.
1430 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) &&
1431 !static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1432 x86_amd_ssb_disable();
1434 x86_spec_ctrl_base
|= SPEC_CTRL_SSBD
;
1435 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1442 static void ssb_select_mitigation(void)
1444 ssb_mode
= __ssb_select_mitigation();
1446 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1447 pr_info("%s\n", ssb_strings
[ssb_mode
]);
1451 #define pr_fmt(fmt) "Speculation prctl: " fmt
1453 static void task_update_spec_tif(struct task_struct
*tsk
)
1455 /* Force the update of the real TIF bits */
1456 set_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
);
1459 * Immediately update the speculation control MSRs for the current
1460 * task, but for a non-current task delay setting the CPU
1461 * mitigation until it is scheduled next.
1463 * This can only happen for SECCOMP mitigation. For PRCTL it's
1464 * always the current task.
1467 speculation_ctrl_update_current();
1470 static int l1d_flush_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1473 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
1477 case PR_SPEC_ENABLE
:
1478 set_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
1480 case PR_SPEC_DISABLE
:
1481 clear_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
1488 static int ssb_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1490 if (ssb_mode
!= SPEC_STORE_BYPASS_PRCTL
&&
1491 ssb_mode
!= SPEC_STORE_BYPASS_SECCOMP
)
1495 case PR_SPEC_ENABLE
:
1496 /* If speculation is force disabled, enable is not allowed */
1497 if (task_spec_ssb_force_disable(task
))
1499 task_clear_spec_ssb_disable(task
);
1500 task_clear_spec_ssb_noexec(task
);
1501 task_update_spec_tif(task
);
1503 case PR_SPEC_DISABLE
:
1504 task_set_spec_ssb_disable(task
);
1505 task_clear_spec_ssb_noexec(task
);
1506 task_update_spec_tif(task
);
1508 case PR_SPEC_FORCE_DISABLE
:
1509 task_set_spec_ssb_disable(task
);
1510 task_set_spec_ssb_force_disable(task
);
1511 task_clear_spec_ssb_noexec(task
);
1512 task_update_spec_tif(task
);
1514 case PR_SPEC_DISABLE_NOEXEC
:
1515 if (task_spec_ssb_force_disable(task
))
1517 task_set_spec_ssb_disable(task
);
1518 task_set_spec_ssb_noexec(task
);
1519 task_update_spec_tif(task
);
1527 static bool is_spec_ib_user_controlled(void)
1529 return spectre_v2_user_ibpb
== SPECTRE_V2_USER_PRCTL
||
1530 spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
1531 spectre_v2_user_stibp
== SPECTRE_V2_USER_PRCTL
||
1532 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
;
1535 static int ib_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1538 case PR_SPEC_ENABLE
:
1539 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1540 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1544 * With strict mode for both IBPB and STIBP, the instruction
1545 * code paths avoid checking this task flag and instead,
1546 * unconditionally run the instruction. However, STIBP and IBPB
1547 * are independent and either can be set to conditionally
1548 * enabled regardless of the mode of the other.
1550 * If either is set to conditional, allow the task flag to be
1551 * updated, unless it was force-disabled by a previous prctl
1552 * call. Currently, this is possible on an AMD CPU which has the
1553 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1554 * kernel is booted with 'spectre_v2_user=seccomp', then
1555 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1556 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1558 if (!is_spec_ib_user_controlled() ||
1559 task_spec_ib_force_disable(task
))
1562 task_clear_spec_ib_disable(task
);
1563 task_update_spec_tif(task
);
1565 case PR_SPEC_DISABLE
:
1566 case PR_SPEC_FORCE_DISABLE
:
1568 * Indirect branch speculation is always allowed when
1569 * mitigation is force disabled.
1571 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1572 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1575 if (!is_spec_ib_user_controlled())
1578 task_set_spec_ib_disable(task
);
1579 if (ctrl
== PR_SPEC_FORCE_DISABLE
)
1580 task_set_spec_ib_force_disable(task
);
1581 task_update_spec_tif(task
);
1589 int arch_prctl_spec_ctrl_set(struct task_struct
*task
, unsigned long which
,
1593 case PR_SPEC_STORE_BYPASS
:
1594 return ssb_prctl_set(task
, ctrl
);
1595 case PR_SPEC_INDIRECT_BRANCH
:
1596 return ib_prctl_set(task
, ctrl
);
1597 case PR_SPEC_L1D_FLUSH
:
1598 return l1d_flush_prctl_set(task
, ctrl
);
1604 #ifdef CONFIG_SECCOMP
1605 void arch_seccomp_spec_mitigate(struct task_struct
*task
)
1607 if (ssb_mode
== SPEC_STORE_BYPASS_SECCOMP
)
1608 ssb_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
1609 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
1610 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
)
1611 ib_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
1615 static int l1d_flush_prctl_get(struct task_struct
*task
)
1617 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
1618 return PR_SPEC_FORCE_DISABLE
;
1620 if (test_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
))
1621 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1623 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1626 static int ssb_prctl_get(struct task_struct
*task
)
1629 case SPEC_STORE_BYPASS_DISABLE
:
1630 return PR_SPEC_DISABLE
;
1631 case SPEC_STORE_BYPASS_SECCOMP
:
1632 case SPEC_STORE_BYPASS_PRCTL
:
1633 if (task_spec_ssb_force_disable(task
))
1634 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
1635 if (task_spec_ssb_noexec(task
))
1636 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE_NOEXEC
;
1637 if (task_spec_ssb_disable(task
))
1638 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1639 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1641 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1642 return PR_SPEC_ENABLE
;
1643 return PR_SPEC_NOT_AFFECTED
;
1647 static int ib_prctl_get(struct task_struct
*task
)
1649 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
))
1650 return PR_SPEC_NOT_AFFECTED
;
1652 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1653 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1654 return PR_SPEC_ENABLE
;
1655 else if (is_spec_ib_user_controlled()) {
1656 if (task_spec_ib_force_disable(task
))
1657 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
1658 if (task_spec_ib_disable(task
))
1659 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1660 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1661 } else if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_STRICT
||
1662 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
1663 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
)
1664 return PR_SPEC_DISABLE
;
1666 return PR_SPEC_NOT_AFFECTED
;
1669 int arch_prctl_spec_ctrl_get(struct task_struct
*task
, unsigned long which
)
1672 case PR_SPEC_STORE_BYPASS
:
1673 return ssb_prctl_get(task
);
1674 case PR_SPEC_INDIRECT_BRANCH
:
1675 return ib_prctl_get(task
);
1676 case PR_SPEC_L1D_FLUSH
:
1677 return l1d_flush_prctl_get(task
);
1683 void x86_spec_ctrl_setup_ap(void)
1685 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
1686 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1688 if (ssb_mode
== SPEC_STORE_BYPASS_DISABLE
)
1689 x86_amd_ssb_disable();
1692 bool itlb_multihit_kvm_mitigation
;
1693 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation
);
1696 #define pr_fmt(fmt) "L1TF: " fmt
1698 /* Default mitigation for L1TF-affected CPUs */
1699 enum l1tf_mitigations l1tf_mitigation __ro_after_init
= L1TF_MITIGATION_FLUSH
;
1700 #if IS_ENABLED(CONFIG_KVM_INTEL)
1701 EXPORT_SYMBOL_GPL(l1tf_mitigation
);
1703 enum vmx_l1d_flush_state l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_AUTO
;
1704 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation
);
1707 * These CPUs all support 44bits physical address space internally in the
1708 * cache but CPUID can report a smaller number of physical address bits.
1710 * The L1TF mitigation uses the top most address bit for the inversion of
1711 * non present PTEs. When the installed memory reaches into the top most
1712 * address bit due to memory holes, which has been observed on machines
1713 * which report 36bits physical address bits and have 32G RAM installed,
1714 * then the mitigation range check in l1tf_select_mitigation() triggers.
1715 * This is a false positive because the mitigation is still possible due to
1716 * the fact that the cache uses 44bit internally. Use the cache bits
1717 * instead of the reported physical bits and adjust them on the affected
1718 * machines to 44bit if the reported bits are less than 44.
1720 static void override_cache_bits(struct cpuinfo_x86
*c
)
1725 switch (c
->x86_model
) {
1726 case INTEL_FAM6_NEHALEM
:
1727 case INTEL_FAM6_WESTMERE
:
1728 case INTEL_FAM6_SANDYBRIDGE
:
1729 case INTEL_FAM6_IVYBRIDGE
:
1730 case INTEL_FAM6_HASWELL
:
1731 case INTEL_FAM6_HASWELL_L
:
1732 case INTEL_FAM6_HASWELL_G
:
1733 case INTEL_FAM6_BROADWELL
:
1734 case INTEL_FAM6_BROADWELL_G
:
1735 case INTEL_FAM6_SKYLAKE_L
:
1736 case INTEL_FAM6_SKYLAKE
:
1737 case INTEL_FAM6_KABYLAKE_L
:
1738 case INTEL_FAM6_KABYLAKE
:
1739 if (c
->x86_cache_bits
< 44)
1740 c
->x86_cache_bits
= 44;
1745 static void __init
l1tf_select_mitigation(void)
1749 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
1752 if (cpu_mitigations_off())
1753 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
1754 else if (cpu_mitigations_auto_nosmt())
1755 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
1757 override_cache_bits(&boot_cpu_data
);
1759 switch (l1tf_mitigation
) {
1760 case L1TF_MITIGATION_OFF
:
1761 case L1TF_MITIGATION_FLUSH_NOWARN
:
1762 case L1TF_MITIGATION_FLUSH
:
1764 case L1TF_MITIGATION_FLUSH_NOSMT
:
1765 case L1TF_MITIGATION_FULL
:
1766 cpu_smt_disable(false);
1768 case L1TF_MITIGATION_FULL_FORCE
:
1769 cpu_smt_disable(true);
1773 #if CONFIG_PGTABLE_LEVELS == 2
1774 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1778 half_pa
= (u64
)l1tf_pfn_limit() << PAGE_SHIFT
;
1779 if (l1tf_mitigation
!= L1TF_MITIGATION_OFF
&&
1780 e820__mapped_any(half_pa
, ULLONG_MAX
- half_pa
, E820_TYPE_RAM
)) {
1781 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1782 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1784 pr_info("However, doing so will make a part of your RAM unusable.\n");
1785 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1789 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV
);
1792 static int __init
l1tf_cmdline(char *str
)
1794 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
1800 if (!strcmp(str
, "off"))
1801 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
1802 else if (!strcmp(str
, "flush,nowarn"))
1803 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOWARN
;
1804 else if (!strcmp(str
, "flush"))
1805 l1tf_mitigation
= L1TF_MITIGATION_FLUSH
;
1806 else if (!strcmp(str
, "flush,nosmt"))
1807 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
1808 else if (!strcmp(str
, "full"))
1809 l1tf_mitigation
= L1TF_MITIGATION_FULL
;
1810 else if (!strcmp(str
, "full,force"))
1811 l1tf_mitigation
= L1TF_MITIGATION_FULL_FORCE
;
1815 early_param("l1tf", l1tf_cmdline
);
1818 #define pr_fmt(fmt) fmt
1822 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1824 #if IS_ENABLED(CONFIG_KVM_INTEL)
1825 static const char * const l1tf_vmx_states
[] = {
1826 [VMENTER_L1D_FLUSH_AUTO
] = "auto",
1827 [VMENTER_L1D_FLUSH_NEVER
] = "vulnerable",
1828 [VMENTER_L1D_FLUSH_COND
] = "conditional cache flushes",
1829 [VMENTER_L1D_FLUSH_ALWAYS
] = "cache flushes",
1830 [VMENTER_L1D_FLUSH_EPT_DISABLED
] = "EPT disabled",
1831 [VMENTER_L1D_FLUSH_NOT_REQUIRED
] = "flush not necessary"
1834 static ssize_t
l1tf_show_state(char *buf
)
1836 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_AUTO
)
1837 return sprintf(buf
, "%s\n", L1TF_DEFAULT_MSG
);
1839 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_EPT_DISABLED
||
1840 (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_NEVER
&&
1841 sched_smt_active())) {
1842 return sprintf(buf
, "%s; VMX: %s\n", L1TF_DEFAULT_MSG
,
1843 l1tf_vmx_states
[l1tf_vmx_mitigation
]);
1846 return sprintf(buf
, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG
,
1847 l1tf_vmx_states
[l1tf_vmx_mitigation
],
1848 sched_smt_active() ? "vulnerable" : "disabled");
1851 static ssize_t
itlb_multihit_show_state(char *buf
)
1853 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
1854 !boot_cpu_has(X86_FEATURE_VMX
))
1855 return sprintf(buf
, "KVM: Mitigation: VMX unsupported\n");
1856 else if (!(cr4_read_shadow() & X86_CR4_VMXE
))
1857 return sprintf(buf
, "KVM: Mitigation: VMX disabled\n");
1858 else if (itlb_multihit_kvm_mitigation
)
1859 return sprintf(buf
, "KVM: Mitigation: Split huge pages\n");
1861 return sprintf(buf
, "KVM: Vulnerable\n");
1864 static ssize_t
l1tf_show_state(char *buf
)
1866 return sprintf(buf
, "%s\n", L1TF_DEFAULT_MSG
);
1869 static ssize_t
itlb_multihit_show_state(char *buf
)
1871 return sprintf(buf
, "Processor vulnerable\n");
1875 static ssize_t
mds_show_state(char *buf
)
1877 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1878 return sprintf(buf
, "%s; SMT Host state unknown\n",
1879 mds_strings
[mds_mitigation
]);
1882 if (boot_cpu_has(X86_BUG_MSBDS_ONLY
)) {
1883 return sprintf(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
1884 (mds_mitigation
== MDS_MITIGATION_OFF
? "vulnerable" :
1885 sched_smt_active() ? "mitigated" : "disabled"));
1888 return sprintf(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
1889 sched_smt_active() ? "vulnerable" : "disabled");
1892 static ssize_t
tsx_async_abort_show_state(char *buf
)
1894 if ((taa_mitigation
== TAA_MITIGATION_TSX_DISABLED
) ||
1895 (taa_mitigation
== TAA_MITIGATION_OFF
))
1896 return sprintf(buf
, "%s\n", taa_strings
[taa_mitigation
]);
1898 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1899 return sprintf(buf
, "%s; SMT Host state unknown\n",
1900 taa_strings
[taa_mitigation
]);
1903 return sprintf(buf
, "%s; SMT %s\n", taa_strings
[taa_mitigation
],
1904 sched_smt_active() ? "vulnerable" : "disabled");
1907 static ssize_t
mmio_stale_data_show_state(char *buf
)
1909 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
1910 return sysfs_emit(buf
, "%s\n", mmio_strings
[mmio_mitigation
]);
1912 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1913 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
1914 mmio_strings
[mmio_mitigation
]);
1917 return sysfs_emit(buf
, "%s; SMT %s\n", mmio_strings
[mmio_mitigation
],
1918 sched_smt_active() ? "vulnerable" : "disabled");
1921 static char *stibp_state(void)
1923 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled
))
1926 switch (spectre_v2_user_stibp
) {
1927 case SPECTRE_V2_USER_NONE
:
1928 return ", STIBP: disabled";
1929 case SPECTRE_V2_USER_STRICT
:
1930 return ", STIBP: forced";
1931 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1932 return ", STIBP: always-on";
1933 case SPECTRE_V2_USER_PRCTL
:
1934 case SPECTRE_V2_USER_SECCOMP
:
1935 if (static_key_enabled(&switch_to_cond_stibp
))
1936 return ", STIBP: conditional";
1941 static char *ibpb_state(void)
1943 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
1944 if (static_key_enabled(&switch_mm_always_ibpb
))
1945 return ", IBPB: always-on";
1946 if (static_key_enabled(&switch_mm_cond_ibpb
))
1947 return ", IBPB: conditional";
1948 return ", IBPB: disabled";
1953 static ssize_t
spectre_v2_show_state(char *buf
)
1955 if (spectre_v2_enabled
== SPECTRE_V2_LFENCE
)
1956 return sprintf(buf
, "Vulnerable: LFENCE\n");
1958 if (spectre_v2_enabled
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
1959 return sprintf(buf
, "Vulnerable: eIBRS with unprivileged eBPF\n");
1961 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1962 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
1963 return sprintf(buf
, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
1965 return sprintf(buf
, "%s%s%s%s%s%s\n",
1966 spectre_v2_strings
[spectre_v2_enabled
],
1968 boot_cpu_has(X86_FEATURE_USE_IBRS_FW
) ? ", IBRS_FW" : "",
1970 boot_cpu_has(X86_FEATURE_RSB_CTXSW
) ? ", RSB filling" : "",
1971 spectre_v2_module_string());
1974 static ssize_t
srbds_show_state(char *buf
)
1976 return sprintf(buf
, "%s\n", srbds_strings
[srbds_mitigation
]);
1979 static ssize_t
cpu_show_common(struct device
*dev
, struct device_attribute
*attr
,
1980 char *buf
, unsigned int bug
)
1982 if (!boot_cpu_has_bug(bug
))
1983 return sprintf(buf
, "Not affected\n");
1986 case X86_BUG_CPU_MELTDOWN
:
1987 if (boot_cpu_has(X86_FEATURE_PTI
))
1988 return sprintf(buf
, "Mitigation: PTI\n");
1990 if (hypervisor_is_type(X86_HYPER_XEN_PV
))
1991 return sprintf(buf
, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1995 case X86_BUG_SPECTRE_V1
:
1996 return sprintf(buf
, "%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
1998 case X86_BUG_SPECTRE_V2
:
1999 return spectre_v2_show_state(buf
);
2001 case X86_BUG_SPEC_STORE_BYPASS
:
2002 return sprintf(buf
, "%s\n", ssb_strings
[ssb_mode
]);
2005 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV
))
2006 return l1tf_show_state(buf
);
2010 return mds_show_state(buf
);
2013 return tsx_async_abort_show_state(buf
);
2015 case X86_BUG_ITLB_MULTIHIT
:
2016 return itlb_multihit_show_state(buf
);
2019 return srbds_show_state(buf
);
2021 case X86_BUG_MMIO_STALE_DATA
:
2022 return mmio_stale_data_show_state(buf
);
2028 return sprintf(buf
, "Vulnerable\n");
2031 ssize_t
cpu_show_meltdown(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2033 return cpu_show_common(dev
, attr
, buf
, X86_BUG_CPU_MELTDOWN
);
2036 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2038 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V1
);
2041 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2043 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V2
);
2046 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2048 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPEC_STORE_BYPASS
);
2051 ssize_t
cpu_show_l1tf(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2053 return cpu_show_common(dev
, attr
, buf
, X86_BUG_L1TF
);
2056 ssize_t
cpu_show_mds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2058 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MDS
);
2061 ssize_t
cpu_show_tsx_async_abort(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2063 return cpu_show_common(dev
, attr
, buf
, X86_BUG_TAA
);
2066 ssize_t
cpu_show_itlb_multihit(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2068 return cpu_show_common(dev
, attr
, buf
, X86_BUG_ITLB_MULTIHIT
);
2071 ssize_t
cpu_show_srbds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2073 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRBDS
);
2076 ssize_t
cpu_show_mmio_stale_data(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2078 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MMIO_STALE_DATA
);